• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensorflow.minimum函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.minimum函数的典型用法代码示例。如果您正苦于以下问题:Python minimum函数的具体用法?Python minimum怎么用?Python minimum使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了minimum函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: IoU

def IoU(bbox, gt):

    # bbox = [ x , y , w , h ] ( x , y  left up)

    shape = [-1, 1]

    x1 = tf.maximum(tf.cast(bbox[0], tf.float32), tf.reshape(tf.cast(gt[:,0], tf.float32), shape))
    y1 = tf.maximum(tf.cast(bbox[1], tf.float32), tf.reshape(tf.cast(gt[:,1], tf.float32), shape))
    x2 = tf.minimum(tf.cast(bbox[2] + bbox[0], tf.float32), tf.reshape(tf.cast(gt[:,2] + gt[:,0], tf.float32), shape))
    y2 = tf.minimum(tf.cast(bbox[3] + bbox[1], tf.float32), tf.reshape(tf.cast(gt[:,3] + gt[:,1], tf.float32), shape))


    inter_w = tf.sub(x2,x1)

    inter_h = tf.sub(y2,y1)

    inter = tf.cast(inter_w * inter_h, tf.float32)

    bounding_box = tf.cast(tf.mul(bbox[2],bbox[3]), tf.float32)

    ground_truth = tf.reshape(tf.cast(tf.mul(gt[:,2],gt[:,3]), tf.float32), shape)

    #iou = tf.div(inter,tf.sub(tf.add(bounding_box,tf.reshape(ground_truth,shape)),inter))

    iou = inter / (bounding_box + ground_truth - inter)

    # limit the iou range between 0 and 1
    
    mask_less = tf.cast(tf.logical_not(tf.less(iou, tf.zeros_like(iou))), tf.float32)
    #mask_great = tf.cast(tf.logical_not(tf.greater(iou, tf.ones_like(iou))), tf.float32)
    
    iou = tf.mul(iou, mask_less)
    #iou = tf.mul(iou, positive_mask)
    
    return iou
开发者ID:Johannes-brahms,项目名称:Yolo,代码行数:35,代码来源:utils.py


示例2: compute_IOU

def compute_IOU(bboxA, bboxB):
    """Compute the Intersection Over Union.
    Args:
        bboxA: [N X 4 tensor] format = [left, top, right, bottom]
        bboxB: [N X 4 tensor] 

    Return:
        IOU: [N X 1 tensor]
    """

    x1A, y1A, x2A, y2A = tf.split(1, 4, bboxA)
    x1B, y1B, x2B, y2B = tf.split(1, 4, bboxB)

    # compute intersection
    x1_max = tf.maximum(x1A, x1B)
    y1_max = tf.maximum(y1A, y1B)
    x2_min = tf.minimum(x2A, x2B)
    y2_min = tf.minimum(y2A, y2B)

    # overlap_flag = tf.logical_and( tf.less(x1_max, x2_min), tf.less(y1_max, y2_min))

    overlap_flag = tf.to_float(tf.less(x1_max, x2_min)) * \
        tf.to_float(tf.less(y1_max, y2_min))

    overlap_area = tf.mul(overlap_flag, tf.mul(
        x2_min - x1_max, y2_min - y1_max))

    # compute union
    areaA = tf.mul(x2A - x1A, y2A - y1A)
    areaB = tf.mul(x2B - x1B, y2B - y1B)
    union_area = areaA + areaB - overlap_area

    return tf.div(overlap_area, union_area)
开发者ID:renmengye,项目名称:deep-tracker,代码行数:33,代码来源:build_deep_tracker.py


示例3: sample_from_discretized_mix_logistic

def sample_from_discretized_mix_logistic(l, nr_mix):
    ls = int_shape(l)
    xs = ls[:-1] + [3]
    # unpack parameters
    logit_probs = l[:, :, :, :nr_mix]
    l = tf.reshape(l[:, :, :, nr_mix:], xs + [nr_mix * 3])
    # sample mixture indicator from softmax
    sel = tf.one_hot(tf.argmax(logit_probs - tf.log(-tf.log(tf.random_uniform(
        logit_probs.get_shape(), minval=1e-5, maxval=1. - 1e-5))), 3), depth=nr_mix, dtype=tf.float32)
    sel = tf.reshape(sel, xs[:-1] + [1, nr_mix])
    # select logistic parameters
    means = tf.reduce_sum(l[:, :, :, :, :nr_mix] * sel, 4)
    log_scales = tf.maximum(tf.reduce_sum(
        l[:, :, :, :, nr_mix:2 * nr_mix] * sel, 4), -7.)
    coeffs = tf.reduce_sum(tf.nn.tanh(
        l[:, :, :, :, 2 * nr_mix:3 * nr_mix]) * sel, 4)
    # sample from logistic & clip to interval
    # we don't actually round to the nearest 8bit value when sampling
    u = tf.random_uniform(means.get_shape(), minval=1e-5, maxval=1. - 1e-5)
    x = means + tf.exp(log_scales) * (tf.log(u) - tf.log(1. - u))
    x0 = tf.minimum(tf.maximum(x[:, :, :, 0], -1.), 1.)
    x1 = tf.minimum(tf.maximum(
        x[:, :, :, 1] + coeffs[:, :, :, 0] * x0, -1.), 1.)
    x2 = tf.minimum(tf.maximum(
        x[:, :, :, 2] + coeffs[:, :, :, 1] * x0 + coeffs[:, :, :, 2] * x1, -1.), 1.)
    return tf.concat([tf.reshape(x0, xs[:-1] + [1]), tf.reshape(x1, xs[:-1] + [1]), tf.reshape(x2, xs[:-1] + [1])], 3)
开发者ID:aliha,项目名称:pixel-cnn,代码行数:26,代码来源:nn.py


示例4: bboxes_clip

def bboxes_clip(bbox_ref, bboxes, scope=None):
    """Clip bounding boxes to a reference box.
    Batch-compatible if the first dimension of `bbox_ref` and `bboxes`
    can be broadcasted.

    Args:
      bbox_ref: Reference bounding box. Nx4 or 4 shaped-Tensor;
      bboxes: Bounding boxes to clip. Nx4 or 4 shaped-Tensor or dictionary.
    Return:
      Clipped bboxes.
    """
    # Bboxes is dictionary.
    if isinstance(bboxes, dict):
        with tf.name_scope(scope, 'bboxes_clip_dict'):
            d_bboxes = {}
            for c in bboxes.keys():
                d_bboxes[c] = bboxes_clip(bbox_ref, bboxes[c])
            return d_bboxes

    # Tensors inputs.
    with tf.name_scope(scope, 'bboxes_clip'):
        # Easier with transposed bboxes. Especially for broadcasting.
        bbox_ref = tf.transpose(bbox_ref)
        bboxes = tf.transpose(bboxes)
        # Intersection bboxes and reference bbox.
        ymin = tf.maximum(bboxes[0], bbox_ref[0])
        xmin = tf.maximum(bboxes[1], bbox_ref[1])
        ymax = tf.minimum(bboxes[2], bbox_ref[2])
        xmax = tf.minimum(bboxes[3], bbox_ref[3])
        # Double check! Empty boxes when no-intersection.
        ymin = tf.minimum(ymin, ymax)
        xmin = tf.minimum(xmin, xmax)
        bboxes = tf.transpose(tf.stack([ymin, xmin, ymax, xmax], axis=0))
        return bboxes
开发者ID:bowrian,项目名称:SSD-Tensorflow,代码行数:34,代码来源:bboxes.py


示例5: make_minibatch

    def make_minibatch(self, valid_anchors):
        with tf.variable_scope('rpn_minibatch'):

            # in labels(shape is [N, ]): 1 is positive, 0 is negative, -1 is ignored
            labels, anchor_matched_gtboxes, object_mask = \
                self.rpn_find_positive_negative_samples(valid_anchors)  # [num_of_valid_anchors, ]

            positive_indices = tf.reshape(tf.where(tf.equal(labels, 1.0)), [-1])  # use labels is same as object_mask

            num_of_positives = tf.minimum(tf.shape(positive_indices)[0],
                                          tf.cast(self.rpn_mini_batch_size * self.rpn_positives_ratio, tf.int32))

            # num of positives <= minibatch_size * 0.5
            positive_indices = tf.random_shuffle(positive_indices)
            positive_indices = tf.slice(positive_indices, begin=[0], size=[num_of_positives])
            # positive_anchors = tf.gather(self.anchors, positive_indices)

            negative_indices = tf.reshape(tf.where(tf.equal(labels, 0.0)), [-1])
            num_of_negatives = tf.minimum(self.rpn_mini_batch_size - num_of_positives,
                                          tf.shape(negative_indices)[0])

            negative_indices = tf.random_shuffle(negative_indices)
            negative_indices = tf.slice(negative_indices, begin=[0], size=[num_of_negatives])
            # negative_anchors = tf.gather(self.anchors, negative_indices)

            minibatch_indices = tf.concat([positive_indices, negative_indices], axis=0)
            minibatch_indices = tf.random_shuffle(minibatch_indices)

            minibatch_anchor_matched_gtboxes = tf.gather(anchor_matched_gtboxes, minibatch_indices)
            object_mask = tf.gather(object_mask, minibatch_indices)
            labels = tf.cast(tf.gather(labels, minibatch_indices), tf.int32)
            labels_one_hot = tf.one_hot(labels, depth=2)
            return minibatch_indices, minibatch_anchor_matched_gtboxes, object_mask, labels_one_hot
开发者ID:mbossX,项目名称:RRPN_FPN_Tensorflow,代码行数:33,代码来源:build_rpn.py


示例6: bboxes_intersection

def bboxes_intersection(bbox_ref, bboxes, name=None):
    """Compute relative intersection between a reference box and a
    collection of bounding boxes. Namely, compute the quotient between
    intersection area and box area.

    Args:
      bbox_ref: (N, 4) or (4,) Tensor with reference bounding box(es).
      bboxes: (N, 4) Tensor, collection of bounding boxes.
    Return:
      (N,) Tensor with relative intersection.
    """
    with tf.name_scope(name, 'bboxes_intersection'):
        # Should be more efficient to first transpose.
        bboxes = tf.transpose(bboxes)
        bbox_ref = tf.transpose(bbox_ref)
        # Intersection bbox and volume.
        int_ymin = tf.maximum(bboxes[0], bbox_ref[0])
        int_xmin = tf.maximum(bboxes[1], bbox_ref[1])
        int_ymax = tf.minimum(bboxes[2], bbox_ref[2])
        int_xmax = tf.minimum(bboxes[3], bbox_ref[3])
        h = tf.maximum(int_ymax - int_ymin, 0.)
        w = tf.maximum(int_xmax - int_xmin, 0.)
        # Volumes.
        inter_vol = h * w
        bboxes_vol = (bboxes[2] - bboxes[0]) * (bboxes[3] - bboxes[1])
        scores = tfe_math.safe_divide(inter_vol, bboxes_vol, 'intersection')
        return scores
开发者ID:bowrian,项目名称:SSD-Tensorflow,代码行数:27,代码来源:bboxes.py


示例7: bboxes_jaccard

def bboxes_jaccard(bbox_ref, bboxes, name=None):
    """Compute jaccard score between a reference box and a collection
    of bounding boxes.

    Args:
      bbox_ref: (N, 4) or (4,) Tensor with reference bounding box(es).
      bboxes: (N, 4) Tensor, collection of bounding boxes.
    Return:
      (N,) Tensor with Jaccard scores.
    """
    with tf.name_scope(name, 'bboxes_jaccard'):
        # Should be more efficient to first transpose.
        bboxes = tf.transpose(bboxes)
        bbox_ref = tf.transpose(bbox_ref)
        # Intersection bbox and volume.
        int_ymin = tf.maximum(bboxes[0], bbox_ref[0])
        int_xmin = tf.maximum(bboxes[1], bbox_ref[1])
        int_ymax = tf.minimum(bboxes[2], bbox_ref[2])
        int_xmax = tf.minimum(bboxes[3], bbox_ref[3])
        h = tf.maximum(int_ymax - int_ymin, 0.)
        w = tf.maximum(int_xmax - int_xmin, 0.)
        # Volumes.
        inter_vol = h * w
        union_vol = -inter_vol \
            + (bboxes[2] - bboxes[0]) * (bboxes[3] - bboxes[1]) \
            + (bbox_ref[2] - bbox_ref[0]) * (bbox_ref[3] - bbox_ref[1])
        jaccard = tfe_math.safe_divide(inter_vol, union_vol, 'jaccard')
        return jaccard
开发者ID:bowrian,项目名称:SSD-Tensorflow,代码行数:28,代码来源:bboxes.py


示例8: disjunction_of_literals

def disjunction_of_literals(literals, label="no_label"):
    list_of_literal_tensors = [lit.tensor for lit in literals]
    literals_tensor = tf.concat(1,list_of_literal_tensors)
    if default_tnorm == "product":
        result = 1.0-tf.reduce_prod(1.0-literals_tensor, 1, keep_dims=True)
    if default_tnorm == "yager2":
        result = tf.minimum(1.0, tf.sqrt(tf.reduce_sum(tf.square(literals_tensor), 1, keep_dims=True)))
    if default_tnorm == "luk":
        print "data aggregator is lukas"
        result = tf.minimum(1.0, tf.reduce_sum(literals_tensor, 1, keep_dims=True))
        PR(result)
    if default_tnorm == "goedel":
        result = tf.reduce_max(literals_tensor, 1, keep_dims=True, name=label)
    if default_aggregator == "product":
        return tf.reduce_prod(result, keep_dims=True)
    if default_aggregator == "mean":
        print "data aggregator is mean"
        return tf.reduce_mean(result, keep_dims=True, name=label)
    if default_aggregator == "gmean":
        return tf.exp(tf.mul(tf.reduce_sum(tf.log(result), keep_dims=True),
                             tf.inv(tf.to_float(tf.size(result)))), name=label)
    if default_aggregator == "hmean":
        print "data aggregator is hmean"
        return tf.div(tf.to_float(tf.size(result)), tf.reduce_sum(tf.inv(result), keep_dims=True))
    if default_aggregator == "min":
        print "data aggregator is min"
        return tf.reduce_min(result, keep_dims=True, name=label)
    if default_aggregator == "qmean":
        print "data aggregator is qmean"
        return tf.sqrt(tf.reduce_mean(tf.square(result), keep_dims=True), name=label)
    if default_aggregator == "cmean":
        print "data aggregator is cmean"
        return tf.pow(tf.reduce_mean(tf.pow(result, 3), keep_dims=True), tf.inv(tf.to_float(3)), name=label)
开发者ID:ivanDonadello,项目名称:knowPic,代码行数:33,代码来源:logictensornetworks.py


示例9: IoULoss

 def IoULoss(self, pd, gt):
     mask = tf.cast(
         tf.greater(tf.reduce_sum(
             tf.cast(tf.greater(gt, 0), tf.int8), 3), 3),
         tf.float32
     )
     npd = tf.transpose(pd, [3, 0, 1, 2])
     ngt = tf.transpose(gt, [3, 0, 1, 2])
     area_x = tf.mul(
         tf.add(tf.gather(npd, 0), tf.gather(npd, 2)),
         tf.add(tf.gather(npd, 1), tf.gather(npd, 3)),
     )
     area_g = tf.mul(
         tf.add(tf.gather(ngt, 0), tf.gather(ngt, 2)),
         tf.add(tf.gather(ngt, 1), tf.gather(ngt, 3)),
     )
     w_overlap = tf.maximum(tf.constant(0, tf.float32), tf.add(
         tf.minimum(tf.gather(npd, 0), tf.gather(ngt, 0)),
         tf.minimum(tf.gather(npd, 2), tf.gather(ngt, 2)),
     ))
     h_overlap = tf.maximum(tf.constant(0, tf.float32), tf.add(
         tf.minimum(tf.gather(npd, 1), tf.gather(ngt, 1)),
         tf.minimum(tf.gather(npd, 3), tf.gather(ngt, 3)),
     ))
     area_overlap = tf.mul(w_overlap, h_overlap)
     area_u = tf.sub(tf.add(area_x, area_g), area_overlap)
     iou = tf.div(area_overlap, tf.add(area_u, tf.constant(1, tf.float32)))
     iou = tf.maximum(iou, tf.constant(1e-4, tf.float32))
     cost = -tf.log(iou)
     cost = tf.mul(cost, mask)
     cost = tf.reduce_sum(cost)
     return cost
开发者ID:hewr1993,项目名称:nn_expr,代码行数:32,代码来源:run.py


示例10: batch_iou

def batch_iou(bboxes, bbox):
  """Compute iou of a batch of boxes with another box. Box format '[y_min, x_min, y_max, x_max]'.
  Args:
    bboxes: A batch of boxes. 2-D with shape `[B, 4]`.
    bbox: A single box. 1-D with shape `[4]`.

  Returns:
    Batch of IOUs
  """
  lr = tf.maximum(
    tf.minimum(bboxes[:, 3], bbox[3]) -
    tf.maximum(bboxes[:, 1], bbox[1]),
    0
  )
  tb = tf.maximum(
    tf.minimum(bboxes[:, 2], bbox[2]) -
    tf.maximum(bboxes[:, 0], bbox[0]),
    0
  )
  intersection = tf.multiply(tb, lr)
  union = tf.subtract(
    tf.multiply((bboxes[:, 3] - bboxes[:, 1]), (bboxes[:, 2] - bboxes[:, 0])) +
    tf.multiply((bbox[3] - bbox[1]), (bbox[2] - bbox[0])),
    intersection
  )
  iou = tf.div(intersection, union)
  return iou
开发者ID:tigercut,项目名称:MobileNet,代码行数:27,代码来源:det_utils.py


示例11: get_next_input

def get_next_input(output):
    # the next location is computed by the location network
    baseline = tf.sigmoid(tf.matmul(output,Wb_h_b) + Bb_h_b)
    baselines.append(baseline)
    # compute the next location, then impose noise
    if eyeCentered:
        # add the last sampled glimpse location
        # TODO max(-1, min(1, u + N(output, sigma) + prevLoc))
        mean_loc = tf.maximum(-1.0, tf.minimum(1.0, tf.matmul(output, Wl_h_l) + sampled_locs[-1] ))
    else:
        mean_loc = tf.matmul(output, Wl_h_l)

    # mean_loc = tf.stop_gradient(mean_loc)
    mean_locs.append(mean_loc)
    mean_locs_stopGrad.append(tf.stop_gradient(mean_loc))

    # add noise
    # sample_loc = tf.tanh(mean_loc + tf.random_normal(mean_loc.get_shape(), 0, loc_sd))
    sample_loc = tf.maximum(-1.0, tf.minimum(1.0, mean_loc + tf.random_normal(mean_loc.get_shape(), 0, loc_sd)))

    # don't propagate throught the locations
    # sample_loc = tf.stop_gradient(sample_loc)
    sampled_locs.append(sample_loc)
    sampled_locs_stopGrad.append(tf.stop_gradient(sample_loc))

    return get_glimpse(sample_loc)
开发者ID:QihongL,项目名称:RAM,代码行数:26,代码来源:ram.py


示例12: _update_lipschitz

  def _update_lipschitz(self,v,i):
    config = self.config
    if len(v.shape) > 1:
      k = self.config.weight_constraint_k or 100.0000
      wi_hat = v
      if len(v.shape) == 4:
        #fij = tf.reduce_sum(tf.abs(wi_hat),  axis=[0,1])
        fij = wi_hat
        fij = tf.reduce_sum(tf.abs(fij),  axis=[1])
        fij = tf.reduce_max(fij,  axis=[0])
      else:
        fij = wi_hat

      if self.config.ortho_pnorm == "inf":
        wp = tf.reduce_max(tf.reduce_sum(tf.abs(fij), axis=0), axis=0)
      else:
        # conv
        wp = tf.reduce_max(tf.reduce_sum(tf.abs(fij), axis=1), axis=0)
      ratio = (1.0/tf.maximum(1.0, wp/k))
      
      if self.config.weight_bounce:
        bounce = tf.minimum(1.0, tf.ceil(wp/k-0.999))
        ratio -= tf.maximum(0.0, bounce) * 0.2

      if self.config.weight_scaleup:
        up = tf.minimum(1.0, tf.ceil(0.02-wp/k))
        ratio += tf.maximum(0.0, up) * k/wp * 0.2

      wi = ratio*(wi_hat)
      #self.gan.metrics['wi'+str(i)]=wp
      #self.gan.metrics['wk'+str(i)]=ratio
      #self.gan.metrics['bouce'+str(i)]=bounce
      return tf.assign(v, wi)
    return None
开发者ID:255BITS,项目名称:hyperchamber-gan,代码行数:34,代码来源:weight_constraint_train_hook.py


示例13: loss

def loss(y_true_cls, y_pred_cls,
         y_true_geo, y_pred_geo,
         training_mask):
    '''
    define the loss used for training, contraning two part,
    the first part we use dice loss instead of weighted logloss,
    the second part is the iou loss defined in the paper
    :param y_true_cls: ground truth of text
    :param y_pred_cls: prediction os text
    :param y_true_geo: ground truth of geometry
    :param y_pred_geo: prediction of geometry
    :param training_mask: mask used in training, to ignore some text annotated by ###
    :return:
    '''
    classification_loss = dice_coefficient(y_true_cls, y_pred_cls, training_mask)
    # scale classification loss to match the iou loss part
    classification_loss *= 0.01

    # d1 -> top, d2->right, d3->bottom, d4->left
    d1_gt, d2_gt, d3_gt, d4_gt, theta_gt = tf.split(value=y_true_geo, num_or_size_splits=5, axis=3)
    d1_pred, d2_pred, d3_pred, d4_pred, theta_pred = tf.split(value=y_pred_geo, num_or_size_splits=5, axis=3)
    area_gt = (d1_gt + d3_gt) * (d2_gt + d4_gt)
    area_pred = (d1_pred + d3_pred) * (d2_pred + d4_pred)
    w_union = tf.minimum(d2_gt, d2_pred) + tf.minimum(d4_gt, d4_pred)
    h_union = tf.minimum(d1_gt, d1_pred) + tf.minimum(d3_gt, d3_pred)
    area_intersect = w_union * h_union
    area_union = area_gt + area_pred - area_intersect
    L_AABB = -tf.log((area_intersect + 1.0)/(area_union + 1.0))
    L_theta = 1 - tf.cos(theta_pred - theta_gt)
    tf.summary.scalar('geometry_AABB', tf.reduce_mean(L_AABB * y_true_cls * training_mask))
    tf.summary.scalar('geometry_theta', tf.reduce_mean(L_theta * y_true_cls * training_mask))
    L_g = L_AABB + 20 * L_theta

    return tf.reduce_mean(L_g * y_true_cls * training_mask) + classification_loss
开发者ID:ausk,项目名称:EAST_ICPR,代码行数:34,代码来源:model.py


示例14: sum_ohem_loss

def sum_ohem_loss(cls_score, label, bbox_pred, bbox_targets,
                  bbox_inside_weights, bbox_outside_weights,
                  nr_ohem_sampling, sigma=1.0, dim=[1]):
    cls_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=cls_score, labels=label)
    box_loss_base = _smooth_l1_loss_base(bbox_pred, bbox_targets,
                                         bbox_inside_weights,
                                         bbox_outside_weights,
                                         sigma=sigma, dim=[1])

    box_loss = tf.reduce_sum(box_loss_base, axis=dim)
    cls_box_loss = cls_loss + box_loss

    nr_ohem_sampling = tf.minimum(nr_ohem_sampling,
                                  tf.shape(cls_box_loss)[0])

    topk_val, topk_idx = tf.nn.top_k(cls_box_loss, k=nr_ohem_sampling,
                                     sorted=True, name='ohem_loss_index')

    cls_loss_ohem = tf.gather(cls_loss, topk_idx, name='ohem_cls_loss')
    box_loss_ohem = tf.gather(box_loss, topk_idx, name='ohem_box_loss')

    box_loss_ohem = tf.reduce_sum(box_loss_ohem) / \
                    tf.to_float(nr_ohem_sampling)
    cls_norm = tf.stop_gradient(tf.minimum(nr_ohem_sampling,
                                           tf.shape(topk_val)[0]))

    # db_cls_norm = tf.py_func(debug_single, [cls_loss, box_loss, topk_idx, 
    # cls_loss_ohem, box_loss_ohem, cls_norm], [tf.bool])
    # with tf.control_dependencies(db_cls_norm):
    cls_loss_ohem = tf.reduce_sum(cls_loss_ohem) / tf.to_float(cls_norm)

    return cls_loss_ohem, box_loss_ohem
开发者ID:Zumbalamambo,项目名称:light_head_rcnn,代码行数:33,代码来源:loss_opr.py


示例15: transformer_policy

def transformer_policy(global_step, learning_rate, d_model, warmup_steps,
                       max_lr=None, coefficient=1.0, dtype=tf.float32):
  """Transformer's learning rate policy from https://arxiv.org/pdf/1706.03762.pdf
  with a hat (max_lr) (also called "noam" learning rate decay scheme).

  Args:
    global_step: global step TensorFlow tensor (ignored for this policy).
    learning_rate (float): initial learning rate to use.
    d_model (int): model dimensionality.
    warmup_steps (int): number of warm-up steps.
    max_lr (float): maximal learning rate, i.e. hat.
    coefficient (float): optimizer adjustment.
        Recommended 0.002 if using "Adam" else 1.0.
    dtype: dtype for this policy.

  Returns:
    learning rate at step ``global_step``.
  """
  step_num = tf.cast(global_step, dtype=dtype)
  ws = tf.cast(warmup_steps, dtype=dtype)

  decay = coefficient * d_model ** -0.5 * tf.minimum(
    (step_num + 1) * ws ** -1.5, (step_num + 1) ** -0.5)

  new_lr = decay * learning_rate
  if max_lr is not None:
    return tf.minimum(max_lr, new_lr)
  else:
    return new_lr
开发者ID:fotwo,项目名称:OpenSeq2Seq,代码行数:29,代码来源:lr_policies.py


示例16: copy_net_logit_function

                def copy_net_logit_function(state):
                    state = tf.nn.dropout(state, self.dropout_placeholder)

                    # the logits for generating the next word are computed in
                    # the standard way
                    generate_logits = tf.matmul(state, decoding_w) + decoding_b

                    # Equation 8 in the paper ... in shape of source sentence
                    # (batch x time)
                    copy_logits_in_time = tf.reduce_sum(
                        projected_inputs * tf.expand_dims(state, 1), [2])

                    # mask out the padding in exponential domain
                    copy_logits_in_time_exp_masked = tf.exp(
                        tf.minimum([[80.0]], copy_logits_in_time)) * copy_mask

                    #  ... in shape of vocabulary (batch x time x vocabulary)
                    copy_logits_in_vocabulary = tf.expand_dims(
                        copy_logits_in_time_exp_masked,
                        2) * vocabulary_shaped_indices

                    # Equation 6 without normalization
                    copy_logits_exp = tf.reduce_sum(copy_logits_in_vocabulary,
                                                    [1])

                    logits_exp = copy_logits_exp \
                                 + tf.exp(tf.minimum([[80.0]], generate_logits))

                    return (tf.log(tf.maximum([[1e-40]], logits_exp)),
                            copy_logits_in_time)
开发者ID:alvaz16,项目名称:neuralmonkey,代码行数:30,代码来源:decoder.py


示例17: fast_rcnn_minibatch

    def fast_rcnn_minibatch(self, reference_boxes):
        with tf.variable_scope('fast_rcnn_minibatch'):

            reference_boxes_mattached_gtboxes, object_mask, label = \
                self.fast_rcnn_find_positive_negative_samples(reference_boxes)

            positive_indices = tf.reshape(tf.where(tf.not_equal(object_mask, 0.)), [-1])

            num_of_positives = tf.minimum(tf.shape(positive_indices)[0],
                                          tf.cast(self.fast_rcnn_minibatch_size*self.fast_rcnn_positives_ratio, tf.int32))

            positive_indices = tf.random_shuffle(positive_indices)
            positive_indices = tf.slice(positive_indices, begin=[0], size=[num_of_positives])

            negative_indices = tf.reshape(tf.where(tf.equal(object_mask, 0.)), [-1])
            num_of_negatives = tf.minimum(tf.shape(negative_indices)[0],
                                          self.fast_rcnn_minibatch_size - num_of_positives)

            negative_indices = tf.random_shuffle(negative_indices)
            negative_indices = tf.slice(negative_indices, begin=[0], size=[num_of_negatives])

            minibatch_indices = tf.concat([positive_indices, negative_indices], axis=0)
            minibatch_indices = tf.random_shuffle(minibatch_indices)

            minibatch_reference_boxes_mattached_gtboxes = tf.gather(reference_boxes_mattached_gtboxes,
                                                                    minibatch_indices)
            object_mask = tf.gather(object_mask, minibatch_indices)
            label = tf.gather(label, minibatch_indices)
            label_one_hot = tf.one_hot(label, self.num_classes + 1)

            return minibatch_indices, minibatch_reference_boxes_mattached_gtboxes, object_mask, label_one_hot
开发者ID:mbossX,项目名称:RRPN_FPN_Tensorflow,代码行数:31,代码来源:build_fast_rcnn.py


示例18: _compute_model_loss

  def _compute_model_loss(
      self, input_sequence, output_sequence, sequence_length):
    """Builds a model with loss for train/eval."""
    hparams = self.hparams
    batch_size = hparams.batch_size

    self.global_step = tf.train.get_or_create_global_step()

    input_sequence = tf.to_float(input_sequence)
    output_sequence = tf.to_float(output_sequence)

    max_seq_len = tf.minimum(tf.shape(output_sequence)[1], hparams.max_seq_len)

    input_sequence = input_sequence[:, :max_seq_len]

    # The target/expected outputs.
    x_target = output_sequence[:, :max_seq_len]
    # Inputs to be fed to decoder, including zero padding for the initial input.
    x_input = tf.pad(output_sequence[:, :max_seq_len - 1],
                     [(0, 0), (1, 0), (0, 0)])
    x_length = tf.minimum(sequence_length, max_seq_len)

    # Either encode to get `z`, or do unconditional, decoder-only.
    if hparams.conditional:  # vae mode:
      q_z = self.encode(input_sequence, x_length)
      z = q_z.sample()

      # Prior distribution.
      p_z = ds.MultivariateNormalDiag(
          loc=[0.] * hparams.z_size, scale_diag=[1.] * hparams.z_size)

      # KL Divergence (nats)
      kl_div = ds.kl_divergence(q_z, p_z)

      # Concatenate the Z vectors to the inputs at each time step.
    else:  # unconditional, decoder-only generation
      kl_div = tf.zeros([batch_size, 1], dtype=tf.float32)
      z = None

    r_loss, metric_map = self.decoder.reconstruction_loss(
        x_input, x_target, x_length, z)[0:2]

    free_nats = hparams.free_bits * tf.log(2.0)
    kl_cost = tf.maximum(kl_div - free_nats, 0)

    beta = ((1.0 - tf.pow(hparams.beta_rate, tf.to_float(self.global_step)))
            * hparams.max_beta)
    self.loss = tf.reduce_mean(r_loss) + beta * tf.reduce_mean(kl_cost)

    scalars_to_summarize = {
        'loss': self.loss,
        'losses/r_loss': r_loss,
        'losses/kl_loss': kl_cost,
        'losses/kl_bits': kl_div / tf.log(2.0),
        'losses/kl_beta': beta,
    }
    return metric_map, scalars_to_summarize
开发者ID:wyn314,项目名称:magenta,代码行数:57,代码来源:base_model.py


示例19: _get_lr_from_schedule

 def _get_lr_from_schedule(self):
     TINY  = 1e-8
     
     if self._lr_sched_params['type'] == 'stable' :
         curr_lr = self._init_lr_tfv
         
     elif self._lr_sched_params['type'] == 'poly' :
         first_it_for_sch = self._lr_sched_params['poly']['epochs_wait_before_decr']
         final_it_for_sch = self._lr_sched_params['poly']['final_ep_for_sch'] # * subepochs_per_ep
         assert first_it_for_sch < final_it_for_sch
         curr_it = tf.cast(self._num_epochs_trained_tfv, dtype='float32') # * subepochs_per_ep + curr_subepoch
         
         #curr_lr = init_lr * ( 1 - x/x2) ^ power. Power = 0.9 in parsenet, which we validated to behave ok.
         x2 = final_it_for_sch - first_it_for_sch
         x = tf.maximum( tf.constant(0, dtype="float32"), curr_it - first_it_for_sch ) # to make schedule happen within the window (first, final) epoch, stable outside.
         x = tf.minimum( x, x2 ) # in case the current iteration is after max, so that I keep schedule stable afterwards. 
         y1 = self._init_lr_tfv
         y2 = 0.9
         curr_lr = y1 * tf.pow( 1.0 - x/x2, y2 )
         
     elif self._lr_sched_params['type'] == 'expon' :
         first_it_for_sch = self._lr_sched_params['expon']['epochs_wait_before_decr']
         final_it_for_sch = self._lr_sched_params['expon']['final_ep_for_sch'] # * subepochs_per_ep
         assert first_it_for_sch < final_it_for_sch
         curr_it = tf.cast(self._num_epochs_trained_tfv, dtype='float32')
         
         # y = y1 * gamma^x. gamma = (y2 / y1)^(1/x2)
         x2 = final_it_for_sch - first_it_for_sch
         x = tf.maximum( tf.constant(0, dtype="float32"), curr_it-first_it_for_sch )
         x = tf.minimum( x, x2 )
         y1 = self._init_lr_tfv
         y2 = self._lr_sched_params['expon']['lr_to_reach_at_last_ep']
         gamma = tf.pow( (y2+TINY)/y1, 1.0/x2 )
         curr_lr = y1 * tf.pow( gamma, x )
         
     elif self._lr_sched_params['type'] == 'predef' :
         #Predefined Schedule.
         div_lr_by = self._lr_sched_params['predef']['div_lr_by']
         epochs_boundaries = [ tf.cast(e, tf.int32) for e in self._lr_sched_params['predef']['epochs'] ]
         lr_values = [ ( self._init_lr_tfv / pow(div_lr_by, i) ) for i in range( 1+len(epochs_boundaries) ) ]
         curr_lr = tf.train.piecewise_constant(self._num_epochs_trained_tfv, boundaries = epochs_boundaries, values = lr_values)
     
     elif self._lr_sched_params['type'] == 'auto' :
         self._learning_rate_tfv = tf.Variable( self._init_lr_tfv, dtype="float32", trainable=False, name="curr_lr_tfv")
         self._top_mean_val_acc_tfv = tf.Variable(0, dtype="float32", trainable=False, name="top_mean_val_acc")
         self._epoch_with_top_mean_val_acc_tvf = tf.Variable(0, dtype=self._num_epochs_trained_tfv.dtype.as_numpy_dtype, trainable=False, name="ep_top_mean_val_acc")
         self._last_epoch_lr_got_lowered_tvf = tf.Variable(0, dtype="float32", trainable=False, name="last_ep_lr_lowered")
         
         self._op_assign_new_lr = tf.assign(self._learning_rate_tfv, self._tf_plchld_float32)
         self._op_assign_top_mean_val_acc_tfv = tf.assign(self._top_mean_val_acc_tfv, self._tf_plchld_float32)
         self._op_assign_epoch_with_top_mean_val_acc_tvf = tf.assign(self._epoch_with_top_mean_val_acc_tvf, self._tf_plchld_int32)
         self._op_assign_last_epoch_lr_lowered = tf.assign(self._last_epoch_lr_got_lowered_tvf, self._tf_plchld_float32)
         
         # The LR will be changed during the routine.training, by a call to function self.run_lr_sched_updates( sessionTf )
         curr_lr = self._learning_rate_tfv
                 
     return curr_lr
开发者ID:Kamnitsask,项目名称:deepmedic,代码行数:57,代码来源:trainer.py


示例20: leaky_twice_relu6

def leaky_twice_relu6(x, alpha_low=0.2, alpha_high=0.2, name="leaky_relu6"):
    """:func:`leaky_twice_relu6` can be used through its shortcut: :func:`:func:`tl.act.ltrelu6`.

    This activation function is a modified version :func:`leaky_relu` introduced by the following paper:
    `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`__

    This activation function also follows the behaviour of the activation function :func:`tf.nn.relu6` introduced by the following paper:
    `Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] <http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf>`__

    This function push further the logic by adding `leaky` behaviour both below zero and above six.

    The function return the following results:
      - When x < 0: ``f(x) = alpha_low * x``.
      - When x in [0, 6]: ``f(x) = x``.
      - When x > 6: ``f(x) = 6 + (alpha_high * (x-6))``.

    Parameters
    ----------
    x : Tensor
        Support input type ``float``, ``double``, ``int32``, ``int64``, ``uint8``, ``int16``, or ``int8``.
    alpha_low : float
        Slope for x < 0: ``f(x) = alpha_low * x``.
    alpha_high : float
        Slope for x < 6: ``f(x) = 6 (alpha_high * (x-6))``.
    name : str
        The function name (optional).

    Examples
    --------
    >>> import tensorlayer as tl
    >>> net = tl.layers.Input([10, 200])
    >>> net = tl.layers.Dense(n_units=100, act=lambda x : tl.act.leaky_twice_relu6(x, 0.2, 0.2), name='dense')(net)

    Returns
    -------
    Tensor
        A ``Tensor`` in the same type as ``x``.

    References
    ----------
    - `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`__
    - `Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] <http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf>`__

    """
    if not isinstance(alpha_high, tf.Tensor) and not (0 < alpha_high <= 1):
        raise ValueError("`alpha_high` value must be in [0, 1]`")

    if not isinstance(alpha_low, tf.Tensor) and not (0 < alpha_low <= 1):
        raise ValueError("`alpha_low` value must be in [0, 1]`")

    with tf.name_scope(name) as name_scope:
        x = tf.convert_to_tensor(x, name="features")

        x_is_above_0 = tf.minimum(x, 6 * (1 - alpha_high) + alpha_high * x)
        x_is_below_0 = tf.minimum(alpha_low * x, 0)

        return tf.maximum(x_is_above_0, x_is_below_0, name=name_scope)
开发者ID:zsdonghao,项目名称:tensorlayer,代码行数:57,代码来源:activation.py



注:本文中的tensorflow.minimum函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.mod函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.merge_summary函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap