• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensorflow.greater函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.greater函数的典型用法代码示例。如果您正苦于以下问题:Python greater函数的具体用法?Python greater怎么用?Python greater使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了greater函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: thresholding

def thresholding(inputs):
    # find the mean for each example in the batch
    mean_output = tf.reduce_mean(inputs, axis=1)

    # scale each mean based on a factor
    threshold_scalar = tf.Variable(utils.threshold_scalar, tf.float32)
    scaled_mean = tf.scalar_mul(threshold_scalar, mean_output)
    scaled_mean = tf.reshape(scaled_mean, [utils.batch_size])

    # setup matrix for
    min_thresh_for_max = tf.fill([utils.batch_size], 0.05)
    max_thresh_for_min = tf.fill([utils.batch_size], 0.15)   #0.4
    thresholds = tf.maximum(min_thresh_for_max, scaled_mean)
    thresholds = tf.minimum(max_thresh_for_min, thresholds)

    # zero values under the thresholds using bitmask
    thresholds = tf.reshape(thresholds, [128, 1, 1])

    threshold_mask = tf.cast(tf.greater(inputs, thresholds), tf.float32)
    thresholded_input = tf.multiply(inputs, threshold_mask)

    # peak picking
    # select beats by x[i-1] < x[i] > x[i+1] (local maximum)
    x_minus_1 = tf.cast(tf.greater(thresholded_input, tf.manip.roll(thresholded_input, shift=-1, axis=1)), tf.float32)
    x_plus_1 = tf.cast(tf.greater(thresholded_input, tf.manip.roll(thresholded_input, shift=1, axis=1)), tf.float32)
    output = tf.multiply(x_minus_1, x_plus_1)


    return output
开发者ID:nearlyeveryone,项目名称:bpm,代码行数:29,代码来源:bpm_estimator.py


示例2: set_logp_to_neg_inf

def set_logp_to_neg_inf(X, logp, bounds):
    """Set `logp` to negative infinity when `X` is outside the allowed bounds.

    # Arguments
        X: tensorflow.Tensor
            The variable to apply the bounds to
        logp: tensorflow.Tensor
            The log probability corrosponding to `X`
        bounds: list of `Region` objects
            The regions corrosponding to allowed regions of `X`

    # Returns
        logp: tensorflow.Tensor
            The newly bounded log probability
    """
    conditions = []
    for l, u in bounds:
        lower_is_neg_inf = not isinstance(l, tf.Tensor) and np.isneginf(l)
        upper_is_pos_inf = not isinstance(u, tf.Tensor) and np.isposinf(u)

        if not lower_is_neg_inf and upper_is_pos_inf:
            conditions.append(tf.greater(X, l))
        elif lower_is_neg_inf and not upper_is_pos_inf:
            conditions.append(tf.less(X, u))
        elif not (lower_is_neg_inf or upper_is_pos_inf):
            conditions.append(tf.logical_and(tf.greater(X, l), tf.less(X, u)))

    if len(conditions) > 0:
        is_inside_bounds = conditions[0]
        for condition in conditions[1:]:
            is_inside_bounds = tf.logical_or(is_inside_bounds, condition)

        logp = tf.select(is_inside_bounds, logp, tf.fill(tf.shape(X), config.dtype(-np.inf)))

    return logp
开发者ID:tensorprob,项目名称:tensorprob,代码行数:35,代码来源:utilities.py


示例3: _variance

  def _variance(self):
    # We need to put the tf.where inside the outer tf.where to ensure we never
    # hit a NaN in the gradient.
    denom = tf.where(tf.greater(self.df, 2.),
                     self.df - 2.,
                     tf.ones_like(self.df))
    # Abs(scale) superfluous.
    var = (tf.ones(self.batch_shape_tensor(), dtype=self.dtype) *
           tf.square(self.scale) * self.df / denom)
    # When 1 < df <= 2, variance is infinite.
    inf = np.array(np.inf, dtype=self.dtype.as_numpy_dtype())
    result_where_defined = tf.where(
        self.df > tf.fill(self.batch_shape_tensor(), 2.),
        var,
        tf.fill(self.batch_shape_tensor(), inf, name="inf"))

    if self.allow_nan_stats:
      nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
      return tf.where(
          tf.greater(
              self.df,
              tf.ones(self.batch_shape_tensor(), dtype=self.dtype)),
          result_where_defined,
          tf.fill(self.batch_shape_tensor(), nan, name="nan"))
    else:
      return control_flow_ops.with_dependencies(
          [
              tf.assert_less(
                  tf.ones([], dtype=self.dtype),
                  self.df,
                  message="variance not defined for components of df <= 1"),
          ],
          result_where_defined)
开发者ID:asudomoeva,项目名称:probability,代码行数:33,代码来源:student_t.py


示例4: get_losses

                def get_losses(obj_mask):
                  """Get motion constraint loss."""
                  # Find height of segment.
                  coords = tf.where(tf.greater(  # Shape (num_true, 2=yx)
                      obj_mask[:, :, 0], tf.constant(0.5, dtype=tf.float32)))
                  y_max = tf.reduce_max(coords[:, 0])
                  y_min = tf.reduce_min(coords[:, 0])
                  seg_height = y_max - y_min
                  f_y = self.intrinsic_mat[i, 0, 1, 1]
                  approx_depth = ((f_y * self.global_scale_var) /
                                  tf.to_float(seg_height))
                  reference_pred = tf.boolean_mask(
                      depth_pred, tf.greater(
                          tf.reshape(obj_mask[:, :, 0],
                                     (self.img_height, self.img_width, 1)),
                          tf.constant(0.5, dtype=tf.float32)))

                  # Establish loss on approx_depth, a scalar, and
                  # reference_pred, our dense prediction. Normalize both to
                  # prevent degenerative depth shrinking.
                  global_mean_depth_pred = tf.reduce_mean(depth_pred)
                  reference_pred /= global_mean_depth_pred
                  approx_depth /= global_mean_depth_pred
                  spatial_err = tf.abs(reference_pred - approx_depth)
                  mean_spatial_err = tf.reduce_mean(spatial_err)
                  return mean_spatial_err
开发者ID:pcm17,项目名称:models,代码行数:26,代码来源:model.py


示例5: IoULoss

 def IoULoss(self, pd, gt):
     mask = tf.cast(
         tf.greater(tf.reduce_sum(
             tf.cast(tf.greater(gt, 0), tf.int8), 3), 3),
         tf.float32
     )
     npd = tf.transpose(pd, [3, 0, 1, 2])
     ngt = tf.transpose(gt, [3, 0, 1, 2])
     area_x = tf.mul(
         tf.add(tf.gather(npd, 0), tf.gather(npd, 2)),
         tf.add(tf.gather(npd, 1), tf.gather(npd, 3)),
     )
     area_g = tf.mul(
         tf.add(tf.gather(ngt, 0), tf.gather(ngt, 2)),
         tf.add(tf.gather(ngt, 1), tf.gather(ngt, 3)),
     )
     w_overlap = tf.maximum(tf.constant(0, tf.float32), tf.add(
         tf.minimum(tf.gather(npd, 0), tf.gather(ngt, 0)),
         tf.minimum(tf.gather(npd, 2), tf.gather(ngt, 2)),
     ))
     h_overlap = tf.maximum(tf.constant(0, tf.float32), tf.add(
         tf.minimum(tf.gather(npd, 1), tf.gather(ngt, 1)),
         tf.minimum(tf.gather(npd, 3), tf.gather(ngt, 3)),
     ))
     area_overlap = tf.mul(w_overlap, h_overlap)
     area_u = tf.sub(tf.add(area_x, area_g), area_overlap)
     iou = tf.div(area_overlap, tf.add(area_u, tf.constant(1, tf.float32)))
     iou = tf.maximum(iou, tf.constant(1e-4, tf.float32))
     cost = -tf.log(iou)
     cost = tf.mul(cost, mask)
     cost = tf.reduce_sum(cost)
     return cost
开发者ID:hewr1993,项目名称:nn_expr,代码行数:32,代码来源:run.py


示例6: prune_outside_window

def prune_outside_window(boxlist, window, scope=None):
  """Prunes bounding boxes that fall outside a given window.

  This function prunes bounding boxes that even partially fall outside the given
  window. See also clip_to_window which only prunes bounding boxes that fall
  completely outside the window, and clips any bounding boxes that partially
  overflow.

  Args:
    boxlist: a BoxList holding M_in boxes.
    window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax]
      of the window
    scope: name scope.

  Returns:
    pruned_corners: a tensor with shape [M_out, 4] where M_out <= M_in
    valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes
     in the input tensor.
  """
  with tf.name_scope(scope, 'PruneOutsideWindow'):
    y_min, x_min, y_max, x_max = tf.split(
        value=boxlist.get(), num_or_size_splits=4, axis=1)
    win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
    coordinate_violations = tf.concat([
        tf.less(y_min, win_y_min), tf.less(x_min, win_x_min),
        tf.greater(y_max, win_y_max), tf.greater(x_max, win_x_max)
    ], 1)
    valid_indices = tf.reshape(
        tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1])
    return gather(boxlist, valid_indices), valid_indices
开发者ID:NoPointExc,项目名称:models,代码行数:30,代码来源:box_list_ops.py


示例7: _compute_precision_recall

def _compute_precision_recall(input_layer, labels, threshold,
                              per_example_weights):
  """Returns the numerator of both, the denominator of precision and recall."""

  # To apply per_example_weights, we need to collapse each row to a scalar, but
  # we really want the sum.
  labels.get_shape().assert_is_compatible_with(input_layer.get_shape())
  relevant = tf.to_float(tf.greater(labels, 0))
  retrieved = tf.to_float(tf.greater(input_layer, threshold))
  selected = relevant * retrieved

  if per_example_weights:
    per_example_weights = tf.convert_to_tensor(per_example_weights,
                                               name='per_example_weights')
    if selected.get_shape().dims:
      per_example_weights.get_shape().assert_is_compatible_with(
          [selected.get_shape().dims[0]])
    else:
      per_example_weights.get_shape().assert_is_compatible_with([None])
    per_example_weights = tf.to_float(tf.greater(per_example_weights, 0))
    selected = functions.reduce_batch_sum(selected) * per_example_weights
    relevant = functions.reduce_batch_sum(relevant) * per_example_weights
    retrieved = functions.reduce_batch_sum(retrieved) * per_example_weights
  sum_relevant = tf.reduce_sum(relevant)
  sum_retrieved = tf.reduce_sum(retrieved)
  selected = tf.reduce_sum(selected)
  return selected, sum_retrieved, sum_relevant
开发者ID:roger2000hk,项目名称:prettytensor,代码行数:27,代码来源:pretty_tensor_loss_methods.py


示例8: sanitize

  def sanitize(self, x, eps_delta, sigma=None,
               option=ClipOption(None, None), tensor_name=None,
               num_examples=None, add_noise=True):
    """Sanitize the given tensor.

    This santize a given tensor by first applying l2 norm clipping and then
    adding Gaussian noise. It calls the privacy accountant for updating the
    privacy spending.

    Args:
      x: the tensor to sanitize.
      eps_delta: a pair of eps, delta for (eps,delta)-DP. Use it to
        compute sigma if sigma is None.
      sigma: if sigma is not None, use sigma.
      option: a ClipOption which, if supplied, used for
        clipping and adding noise.
      tensor_name: the name of the tensor.
      num_examples: if None, use the number of "rows" of x.
      add_noise: if True, then add noise, else just clip.
    Returns:
      a pair of sanitized tensor and the operation to accumulate privacy
      spending.
    """

    if sigma is None:
      # pylint: disable=unpacking-non-sequence
      eps, delta = eps_delta
      with tf.control_dependencies(
          [tf.Assert(tf.greater(eps, 0),
                     ["eps needs to be greater than 0"]),
           tf.Assert(tf.greater(delta, 0),
                     ["delta needs to be greater than 0"])]):
        # The following formula is taken from
        #   Dwork and Roth, The Algorithmic Foundations of Differential
        #   Privacy, Appendix A.
        #   http://www.cis.upenn.edu/~aaroth/Papers/privacybook.pdf
        sigma = tf.sqrt(2.0 * tf.log(1.25 / delta)) / eps

    l2norm_bound, clip = option
    if l2norm_bound is None:
      l2norm_bound, clip = self._default_option
      if ((tensor_name is not None) and
          (tensor_name in self._options)):
        l2norm_bound, clip = self._options[tensor_name]
    if clip:
      x = utils.BatchClipByL2norm(x, l2norm_bound)

    if add_noise:
      if num_examples is None:
        num_examples = tf.slice(tf.shape(x), [0], [1])
      privacy_accum_op = self._accountant.accumulate_privacy_spending(
          eps_delta, sigma, num_examples)
      with tf.control_dependencies([privacy_accum_op]):
        saned_x = utils.AddGaussianNoise(tf.reduce_sum(x, 0),
                                         sigma * l2norm_bound)
    else:
      saned_x = tf.reduce_sum(x, 0)
    return saned_x
开发者ID:TrendonixNetwork,项目名称:ProjectCybonix,代码行数:58,代码来源:sanitizer.py


示例9: _get_valid_sample_fraction

def _get_valid_sample_fraction(labels, p=0):
    """return fraction of non-negative examples, the ignored examples have been marked as negative"""
    num_valid = tf.reduce_sum(tf.cast(tf.greater_equal(labels, p), tf.float32))
    num_example = tf.cast(tf.size(labels), tf.float32)
    frac = tf.cond(tf.greater(num_example, 0), lambda:num_valid / num_example,  
            lambda: tf.cast(0, tf.float32))
    frac_ = tf.cond(tf.greater(num_valid, 0), lambda:num_example / num_valid, 
            lambda: tf.cast(0, tf.float32))
    return frac, frac_
开发者ID:imyourm8,项目名称:FastMaskRCNN,代码行数:9,代码来源:pyramid_network.py


示例10: style_loss

def style_loss(CNN_structure, const_layers, var_layers, content_segs, style_segs, weight):
    loss_styles = []
    layer_count = float(len(const_layers))
    layer_index = 0

    _, content_seg_height, content_seg_width, _ = content_segs[0].get_shape().as_list()
    _, style_seg_height, style_seg_width, _ = style_segs[0].get_shape().as_list()
    for layer_name in CNN_structure:
        layer_name = layer_name[layer_name.find("/") + 1:]

        # downsampling segmentation
        if "pool" in layer_name:
            content_seg_width, content_seg_height = int(math.ceil(content_seg_width / 2)), int(math.ceil(content_seg_height / 2))
            style_seg_width, style_seg_height = int(math.ceil(style_seg_width / 2)), int(math.ceil(style_seg_height / 2))

            for i in xrange(len(content_segs)):
                content_segs[i] = tf.image.resize_bilinear(content_segs[i], tf.constant((content_seg_height, content_seg_width)))
                style_segs[i] = tf.image.resize_bilinear(style_segs[i], tf.constant((style_seg_height, style_seg_width)))

        elif "conv" in layer_name:
            for i in xrange(len(content_segs)):
                # have some differences on border with torch
                content_segs[i] = tf.nn.avg_pool(tf.pad(content_segs[i], [[0, 0], [1, 1], [1, 1], [0, 0]], "CONSTANT"), \
                ksize=[1, 3, 3, 1], strides=[1, 1, 1, 1], padding='VALID')
                style_segs[i] = tf.nn.avg_pool(tf.pad(style_segs[i], [[0, 0], [1, 1], [1, 1], [0, 0]], "CONSTANT"), \
                ksize=[1, 3, 3, 1], strides=[1, 1, 1, 1], padding='VALID')

        if layer_name == var_layers[layer_index].name[var_layers[layer_index].name.find("/") + 1:]:
            print("Setting up style layer: <{}>".format(layer_name))
            const_layer = const_layers[layer_index]
            var_layer = var_layers[layer_index]

            layer_index = layer_index + 1

            layer_style_loss = 0.0
            for content_seg, style_seg in zip(content_segs, style_segs):
                gram_matrix_const = gram_matrix(tf.multiply(const_layer, style_seg))
                style_mask_mean   = tf.reduce_mean(style_seg)
                gram_matrix_const = tf.cond(tf.greater(style_mask_mean, 0.),
                                        lambda: gram_matrix_const / (tf.to_float(tf.size(const_layer)) * style_mask_mean),
                                        lambda: gram_matrix_const
                                    )

                gram_matrix_var   = gram_matrix(tf.multiply(var_layer, content_seg))
                content_mask_mean = tf.reduce_mean(content_seg)
                gram_matrix_var   = tf.cond(tf.greater(content_mask_mean, 0.),
                                        lambda: gram_matrix_var / (tf.to_float(tf.size(var_layer)) * content_mask_mean),
                                        lambda: gram_matrix_var
                                    )

                diff_style_sum    = tf.reduce_mean(tf.squared_difference(gram_matrix_const, gram_matrix_var)) * content_mask_mean

                layer_style_loss += diff_style_sum

            loss_styles.append(layer_style_loss * weight)
    return loss_styles
开发者ID:4ever911,项目名称:deep-photo-styletransfer-tf,代码行数:56,代码来源:photo_style.py


示例11: get_position_cross_clf_label

def get_position_cross_clf_label(positions, seq_length, ans_avg_len):
  start_labels = tf.reshape(positions[:, 0], [-1])  # positions shape: [batch_size, 2] => [batch_size]
  end_labels = tf.reshape(positions[:, 1], [-1])
  ans_len = end_labels - start_labels +1  # [batch_size] # 超过ans_avg_len则为avg_len

  mask = tf.cast(tf.greater(ans_len, ans_avg_len), tf.int32) * (
            tf.zeros_like(ans_len, dtype=tf.int32) + ans_avg_len)

  ans_len = ans_len * (1 - tf.cast(tf.greater(ans_len, ans_avg_len), tf.int32)) + mask
  return start_labels * ans_avg_len + ans_len - 1
开发者ID:Accagain2014,项目名称:ML,代码行数:10,代码来源:tensorflow_examples.py


示例12: _match_when_rows_are_non_empty

    def _match_when_rows_are_non_empty():
      """Performs matching when the rows of similarity matrix are non empty.

      Returns:
        matches:  int32 tensor indicating the row each column matches to.
      """
      # Matches for each column
      matches = tf.argmax(similarity_matrix, 0, output_type=tf.int32)

      # Deal with matched and unmatched threshold
      if self._matched_threshold is not None:
        # Get logical indices of ignored and unmatched columns as tf.int64
        matched_vals = tf.reduce_max(similarity_matrix, 0)
        below_unmatched_threshold = tf.greater(self._unmatched_threshold,
                                               matched_vals)
        between_thresholds = tf.logical_and(
            tf.greater_equal(matched_vals, self._unmatched_threshold),
            tf.greater(self._matched_threshold, matched_vals))

        if self._negatives_lower_than_unmatched:
          matches = self._set_values_using_indicator(matches,
                                                     below_unmatched_threshold,
                                                     -1)
          matches = self._set_values_using_indicator(matches,
                                                     between_thresholds,
                                                     -2)
        else:
          matches = self._set_values_using_indicator(matches,
                                                     below_unmatched_threshold,
                                                     -2)
          matches = self._set_values_using_indicator(matches,
                                                     between_thresholds,
                                                     -1)

      if self._force_match_for_each_row:
        similarity_matrix_shape = shape_utils.combined_static_and_dynamic_shape(
            similarity_matrix)
        force_match_column_ids = tf.argmax(similarity_matrix, 1,
                                           output_type=tf.int32)
        force_match_column_indicators = (
            tf.one_hot(
                force_match_column_ids, depth=similarity_matrix_shape[1]) *
            tf.cast(tf.expand_dims(valid_rows, axis=-1), dtype=tf.float32))
        force_match_row_ids = tf.argmax(force_match_column_indicators, 0,
                                        output_type=tf.int32)
        force_match_column_mask = tf.cast(
            tf.reduce_max(force_match_column_indicators, 0), tf.bool)
        final_matches = tf.where(force_match_column_mask,
                                 force_match_row_ids, matches)
        return final_matches
      else:
        return matches
开发者ID:pcm17,项目名称:models,代码行数:52,代码来源:argmax_matcher.py


示例13: _compute_alpha

def _compute_alpha(x):
    """
    Computing the scale parameter.
    """
    threshold = _compute_threshold(x)
    alpha1_temp1 = tf.where(tf.greater(x, threshold), x, tf.zeros_like(x, tf.float32))
    alpha1_temp2 = tf.where(tf.less(x, -threshold), x, tf.zeros_like(x, tf.float32))
    alpha_array = tf.add(alpha1_temp1, alpha1_temp2, name=None)
    alpha_array_abs = tf.abs(alpha_array)
    alpha_array_abs1 = tf.where(tf.greater(alpha_array_abs, 0), tf.ones_like(alpha_array_abs, tf.float32), tf.zeros_like(alpha_array_abs, tf.float32))
    alpha_sum = tf.reduce_sum(alpha_array_abs)
    n = tf.reduce_sum(alpha_array_abs1)
    alpha = tf.div(alpha_sum, n)
    return alpha
开发者ID:dccforever,项目名称:tensorlayer,代码行数:14,代码来源:binary.py


示例14: _match_when_rows_are_non_empty

    def _match_when_rows_are_non_empty():
      """Performs matching when the rows of similarity matrix are non empty.

      Returns:
        matches:  int32 tensor indicating the row each column matches to.
      """
      # Matches for each column
      matches = tf.argmax(similarity_matrix, 0)

      # Deal with matched and unmatched threshold
      if self._matched_threshold is not None:
        # Get logical indices of ignored and unmatched columns as tf.int64
        matched_vals = tf.reduce_max(similarity_matrix, 0)
        below_unmatched_threshold = tf.greater(self._unmatched_threshold,
                                               matched_vals)
        between_thresholds = tf.logical_and(
            tf.greater_equal(matched_vals, self._unmatched_threshold),
            tf.greater(self._matched_threshold, matched_vals))

        if self._negatives_lower_than_unmatched:
          matches = self._set_values_using_indicator(matches,
                                                     below_unmatched_threshold,
                                                     -1)
          matches = self._set_values_using_indicator(matches,
                                                     between_thresholds,
                                                     -2)
        else:
          matches = self._set_values_using_indicator(matches,
                                                     below_unmatched_threshold,
                                                     -2)
          matches = self._set_values_using_indicator(matches,
                                                     between_thresholds,
                                                     -1)

      if self._force_match_for_each_row:
        forced_matches_ids = tf.cast(tf.argmax(similarity_matrix, 1), tf.int32)

        # Set matches[forced_matches_ids] = [0, ..., R], R is number of rows.
        row_range = tf.range(tf.shape(similarity_matrix)[0])
        col_range = tf.range(tf.shape(similarity_matrix)[1])
        forced_matches_values = tf.cast(row_range, matches.dtype)
        keep_matches_ids, _ = tf.setdiff1d(col_range, forced_matches_ids)
        keep_matches_values = tf.gather(matches, keep_matches_ids)
        matches = tf.dynamic_stitch(
            [forced_matches_ids,
             keep_matches_ids], [forced_matches_values, keep_matches_values])

      return tf.cast(matches, tf.int32)
开发者ID:DaRealLazyPanda,项目名称:models,代码行数:48,代码来源:argmax_matcher.py


示例15: do_report

    def do_report():
        r = sess.run([best,
                      correct,
                      tf.greater(y[:, 0], 0),
                      y_[:, 0],
                      digits_loss,
                      presence_loss,
                      cross_entropy],
                     feed_dict={x: test_xs, y_: test_ys})
        num_correct = numpy.sum(
                        numpy.logical_or(
                            numpy.all(r[0] == r[1], axis=1),
                            numpy.logical_and(r[2] < 0.5,
                                              r[3] < 0.5)))
        r_short = (r[0][:190], r[1][:190], r[2][:190], r[3][:190])
        for b, c, pb, pc in zip(*r_short):
            print "{} {} <-> {} {}".format(vec_to_plate(c), pc,
                                           vec_to_plate(b), float(pb))
        num_p_correct = numpy.sum(r[2] == r[3])

        print ("B{:3d} {:2.02f}% {:02.02f}% loss: {} "
               "(digits: {}, presence: {}) |{}|").format(
            batch_idx,
            100. * num_correct / (len(r[0])),
            100. * num_p_correct / len(r[2]),
            r[6],
            r[4],
            r[5],
            "".join("X "[numpy.array_equal(b, c) or (not pb and not pc)]
                                           for b, c, pb, pc in zip(*r_short)))
开发者ID:GodBlessZhk,项目名称:deep-anpr,代码行数:30,代码来源:train.py


示例16: accumulate_privacy_spending

  def accumulate_privacy_spending(self, eps_delta, unused_sigma,
                                  num_examples):
    """Accumulate the privacy spending.

    Currently only support approximate privacy. Here we assume we use Gaussian
    noise on randomly sampled batch so we get better composition: 1. the per
    batch privacy is computed using privacy amplication via sampling bound;
    2. the composition is done using the composition with Gaussian noise.
    TODO(liqzhang) Add a link to a document that describes the bounds used.

    Args:
      eps_delta: EpsDelta pair which can be tensors.
      unused_sigma: the noise sigma. Unused for this accountant.
      num_examples: the number of examples involved.
    Returns:
      a TensorFlow operation for updating the privacy spending.
    """

    eps, delta = eps_delta
    with tf.control_dependencies(
        [tf.Assert(tf.greater(delta, 0),
                   ["delta needs to be greater than 0"])]):
      amortize_ratio = (tf.cast(num_examples, tf.float32) * 1.0 /
                        self._total_examples)
      # Use privacy amplification via sampling bound.
      # See Lemma 2.2 in http://arxiv.org/pdf/1405.7085v2.pdf
      # TODO(liqzhang) Add a link to a document with formal statement
      # and proof.
      amortize_eps = tf.reshape(tf.log(1.0 + amortize_ratio * (
          tf.exp(eps) - 1.0)), [1])
      amortize_delta = tf.reshape(amortize_ratio * delta, [1])
      return tf.group(*[tf.assign_add(self._eps_squared_sum,
                                      tf.square(amortize_eps)),
                        tf.assign_add(self._delta_sum, amortize_delta)])
开发者ID:ZhangShiyue,项目名称:models,代码行数:34,代码来源:accountant.py


示例17: unwrap

def unwrap(p, discont=np.pi, axis=-1):
  """Unwrap a cyclical phase tensor.

  Args:
    p: Phase tensor.
    discont: Float, size of the cyclic discontinuity.
    axis: Axis of which to unwrap.

  Returns:
    unwrapped: Unwrapped tensor of same size as input.
  """
  dd = diff(p, axis=axis)
  ddmod = tf.mod(dd + np.pi, 2.0 * np.pi) - np.pi
  idx = tf.logical_and(tf.equal(ddmod, -np.pi), tf.greater(dd, 0))
  ddmod = tf.where(idx, tf.ones_like(ddmod) * np.pi, ddmod)
  ph_correct = ddmod - dd
  idx = tf.less(tf.abs(dd), discont)
  ddmod = tf.where(idx, tf.zeros_like(ddmod), dd)
  ph_cumsum = tf.cumsum(ph_correct, axis=axis)

  shape = p.get_shape().as_list()
  shape[axis] = 1
  ph_cumsum = tf.concat([tf.zeros(shape, dtype=p.dtype), ph_cumsum], axis=axis)
  unwrapped = p + ph_cumsum
  return unwrapped
开发者ID:cghawthorne,项目名称:magenta,代码行数:25,代码来源:spectral_ops.py


示例18: loss

    def loss(self, logits, labels, regularization):
        """Adds to the inference model the layers required to generate loss."""
        with tf.name_scope('loss'):
            with tf.name_scope('var_loss'):
                labels = tf.cast(labels, tf.float32)
                shape = labels.get_shape()

                same_class = tf.boolean_mask(logits, tf.equal(labels, tf.ones(shape)))
                diff_class = tf.boolean_mask(logits, tf.not_equal(labels, tf.ones(shape)))
                same_mean, same_var = tf.nn.moments(same_class, [0])
                diff_mean, diff_var = tf.nn.moments(diff_class, [0])
                var_loss = same_var + diff_var

            with tf.name_scope('mean_loss'):
                mean_loss = self.lamda * tf.where(tf.greater(self.mu - (same_mean - diff_mean), 0),
                                                  self.mu - (same_mean - diff_mean), 0)

            with tf.name_scope('regularization'):
                regularization *= tf.add_n(self.regularizers)

            loss = var_loss + mean_loss + regularization

            # Summaries for TensorBoard.
            tf.summary.scalar('loss/total', loss)
            with tf.name_scope('averages'):
                averages = tf.train.ExponentialMovingAverage(0.9)
                op_averages = averages.apply([var_loss, mean_loss, regularization, loss])
                tf.summary.scalar('loss/avg/var_loss', averages.average(var_loss))
                tf.summary.scalar('loss/avg/mean_loss', averages.average(mean_loss))
                tf.summary.scalar('loss/avg/regularization', averages.average(regularization))
                tf.summary.scalar('loss/avg/total', averages.average(loss))
                with tf.control_dependencies([op_averages]):
                    loss_average = tf.identity(averages.average(loss), name='control')
            return loss, loss_average
开发者ID:parisots,项目名称:gcn_metric_learning,代码行数:34,代码来源:models_siamese.py


示例19: _get_num_pos_neg_samples

  def _get_num_pos_neg_samples(self, sorted_indices_tensor, sample_size):
    """Counts the number of positives and negatives numbers to be sampled.

    Args:
      sorted_indices_tensor: A sorted int32 tensor of shape [N] which contains
        the signed indices of the examples where the sign is based on the label
        value. The examples that cannot be sampled are set to 0. It samples
        atmost sample_size*positive_fraction positive examples and remaining
        from negative examples.
      sample_size: Size of subsamples.

    Returns:
      A tuple containing the number of positive and negative labels in the
      subsample.
    """
    input_length = tf.shape(sorted_indices_tensor)[0]
    valid_positive_index = tf.greater(sorted_indices_tensor,
                                      tf.zeros(input_length, tf.int32))
    num_sampled_pos = tf.reduce_sum(tf.cast(valid_positive_index, tf.int32))
    max_num_positive_samples = tf.constant(
        int(sample_size * self._positive_fraction), tf.int32)
    num_positive_samples = tf.minimum(max_num_positive_samples, num_sampled_pos)
    num_negative_samples = tf.constant(sample_size,
                                       tf.int32) - num_positive_samples

    return num_positive_samples, num_negative_samples
开发者ID:ALISCIFP,项目名称:models,代码行数:26,代码来源:balanced_positive_negative_sampler.py


示例20: __imagenet_data_process_function

 def __imagenet_data_process_function(self, x, y):
     with tf.name_scope("imagenet_data_aug") as scope:
         #random scale
         #apparently, this works better than what we have:
         #https://github.com/facebook/fb.resnet.torch
         #but let's use the 'original' formulation for now
         #randomly sample a size in specified range
         random_size = tf.squeeze(tf.random_uniform((1, 1), 256, 480, dtype=tf.int32, name="random_scale_size"))
         #rescale smaller size with this factor
         tf.cond(tf.greater(tf.shape(x)[0], tf.shape(x)[1]), 
             lambda: tf.image.resize_images(x, [tf.shape(x)[0] * (tf.shape(x)[1] / random_size), random_size]),
             lambda: tf.image.resize_images(x, [random_size, tf.shape(x)[1] * (tf.shape(x)[0] / random_size)]))
         x = tf.image.resize_images(x, [224, 224])
         #random flip
         x = tf.image.flip_left_right(x)
         #random crop
         x = tf.random_crop(x, [224, 224, 3])
         #colour augmentation
         #this is a little more involved than I first thought
         #lets pick the inception colour distortion
         #https://github.com/tensorflow/models/blob/master/inception/inception/image_processing.py
         x = tf.image.random_brightness(x, max_delta=32. / 255.)
         x = tf.image.random_saturation(x, lower=0.5, upper=1.5)
         x = tf.image.random_hue(x, max_delta=0.2)
         x = tf.image.random_contrast(x, lower=0.5, upper=1.5)
         x = tf.clip_by_value(x, 0.0, 1.0)
         #normalisation
         x = tf.image.per_image_standardization(x)
     return [x, y]
开发者ID:deworrall92,项目名称:groupConvolutions,代码行数:29,代码来源:settings.py



注:本文中的tensorflow.greater函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.greater_equal函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.gradients函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap