• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python math_ops.greater_equal函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.math_ops.greater_equal函数的典型用法代码示例。如果您正苦于以下问题:Python greater_equal函数的具体用法?Python greater_equal怎么用?Python greater_equal使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了greater_equal函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _lower_triangular_mask

def _lower_triangular_mask(shape):
  """Creates a lower-triangular boolean mask over the last 2 dimensions."""
  row_index = math_ops.cumsum(
      array_ops.ones(shape=shape, dtype=dtypes.int32), axis=-2)
  col_index = math_ops.cumsum(
      array_ops.ones(shape=shape, dtype=dtypes.int32), axis=-1)
  return math_ops.greater_equal(row_index, col_index)
开发者ID:aritratony,项目名称:tensorflow,代码行数:7,代码来源:dense_attention.py


示例2: _benchmarkMapAndFilter

  def _benchmarkMapAndFilter(self, chain_length, optimize_dataset):
    with ops.Graph().as_default():
      dataset = dataset_ops.Dataset.from_tensors(0).repeat(None)
      for _ in range(chain_length):
        dataset = dataset.map(lambda x: x + 5).filter(
            lambda x: math_ops.greater_equal(x - 5, 0))
      if optimize_dataset:
        dataset = dataset.apply(
            optimization.optimize(["map_and_filter_fusion"]))

      iterator = dataset.make_one_shot_iterator()
      next_element = iterator.get_next()

      with session.Session() as sess:
        for _ in range(10):
          sess.run(next_element.op)
        deltas = []
        for _ in range(100):
          start = time.time()
          for _ in range(100):
            sess.run(next_element.op)
          end = time.time()
          deltas.append(end - start)

        median_wall_time = np.median(deltas) / 100
        opt_mark = "opt" if optimize_dataset else "no-opt"
        print("Map and filter dataset {} chain length: {} Median wall time: {}".
              format(opt_mark, chain_length, median_wall_time))
        self.report_benchmark(
            iters=1000,
            wall_time=median_wall_time,
            name="benchmark_map_and_filter_dataset_chain_latency_{}_{}".format(
                opt_mark, chain_length))
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:33,代码来源:map_benchmark.py


示例3: _filter_input

def _filter_input(input_tensor, vocab_freq_table, vocab_min_count,
                  vocab_subsampling, corpus_size, seed):
  """Filters input tensor based on vocab freq, threshold, and subsampling."""
  if vocab_freq_table is None:
    return input_tensor

  if not isinstance(vocab_freq_table, lookup.InitializableLookupTableBase):
    raise ValueError(
        "vocab_freq_table must be a subclass of "
        "InitializableLookupTableBase (such as HashTable) instead of type "
        "{}.".format(type(vocab_freq_table)))

  with ops.name_scope(
      "filter_vocab", values=[vocab_freq_table, input_tensor, vocab_min_count]):
    freq = vocab_freq_table.lookup(input_tensor)
    # Filters out elements in input_tensor that are not found in
    # vocab_freq_table (table returns a default value of -1 specified above when
    # an element is not found).
    mask = math_ops.not_equal(freq, vocab_freq_table.default_value)

    # Filters out elements whose vocab frequencies are less than the threshold.
    if vocab_min_count is not None:
      cast_threshold = math_ops.cast(vocab_min_count, freq.dtype)
      mask = math_ops.logical_and(mask,
                                  math_ops.greater_equal(freq, cast_threshold))

    input_tensor = array_ops.boolean_mask(input_tensor, mask)
    freq = array_ops.boolean_mask(freq, mask)

  if not vocab_subsampling:
    return input_tensor

  if vocab_subsampling < 0 or vocab_subsampling > 1:
    raise ValueError(
        "Invalid vocab_subsampling={} - it should be within range [0, 1].".
        format(vocab_subsampling))

  # Subsamples the input tokens based on vocabulary frequency and
  # vocab_subsampling threshold (ie randomly discard commonly appearing
  # tokens).
  with ops.name_scope(
      "subsample_vocab", values=[input_tensor, freq, vocab_subsampling]):
    corpus_size = math_ops.cast(corpus_size, dtypes.float64)
    freq = math_ops.cast(freq, dtypes.float64)
    vocab_subsampling = math_ops.cast(vocab_subsampling, dtypes.float64)

    # From tensorflow_models/tutorials/embedding/word2vec_kernels.cc, which is
    # suppose to correlate with Eq. 5 in http://arxiv.org/abs/1310.4546.
    keep_prob = ((math_ops.sqrt(freq /
                                (vocab_subsampling * corpus_size)) + 1.0) *
                 (vocab_subsampling * corpus_size / freq))
    random_prob = random_ops.random_uniform(
        array_ops.shape(freq),
        minval=0,
        maxval=1,
        dtype=dtypes.float64,
        seed=seed)

    mask = math_ops.less_equal(random_prob, keep_prob)
    return array_ops.boolean_mask(input_tensor, mask)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:60,代码来源:skip_gram_ops.py


示例4: _prune_invalid_ids

def _prune_invalid_ids(sparse_ids, sparse_weights):
    """Prune invalid IDs (< 0) from the input ids and weights."""
    is_id_valid = math_ops.greater_equal(sparse_ids.values, 0)
    if sparse_weights is not None:
        is_id_valid = math_ops.logical_and(is_id_valid, math_ops.greater(sparse_weights.values, 0))
    sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_id_valid)
    if sparse_weights is not None:
        sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_id_valid)
    return sparse_ids, sparse_weights
开发者ID:yuikns,项目名称:tensorflow,代码行数:9,代码来源:embedding_ops.py


示例5: _accuracy_at_threshold

def _accuracy_at_threshold(labels, predictions, weights, threshold, name=None):
  with ops.name_scope(
      name, 'accuracy_at_%s' % threshold,
      (predictions, labels, weights, threshold)) as scope:
    threshold_predictions = math_ops.to_float(
        math_ops.greater_equal(predictions, threshold))
    return metrics_lib.accuracy(
        labels=labels, predictions=threshold_predictions, weights=weights,
        name=scope)
开发者ID:vaccine,项目名称:tensorflow,代码行数:9,代码来源:head.py


示例6: cosine_decay_fn

 def cosine_decay_fn(global_step):
   if global_step is None:
     raise ValueError("global_step is required for cosine_decay.")
   global_step = math_ops.minimum(global_step, decay_steps)
   completed_fraction = math_ops.to_float(global_step) / math_ops.to_float(
       decay_steps)
   fraction = 2.0 * num_periods * completed_fraction
   decayed = 0.5 * (
       1.0 + math_ops.cos(constant_op.constant(math.pi) * fraction))
   if zero_after is not None:
     decayed = array_ops.where(
         math_ops.greater_equal(fraction, 2 * zero_after), 0.0, decayed)
   return decayed
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:13,代码来源:sign_decay.py


示例7: assert_rank_at_least

def assert_rank_at_least(x, rank, data=None, summarize=None, name=None):
    """Assert `x` has rank equal to `rank` or higher.

  Example of adding a dependency to an operation:

  ```python
  with tf.control_dependencies([tf.assert_rank_at_least(x, 2)]):
    output = tf.reduce_sum(x)
  ```

  Example of adding dependency to the tensor being checked:

  ```python
  x = tf.with_dependencies([tf.assert_rank_at_least(x, 2)], x)
  ```

  Args:
    x:  Numeric `Tensor`.
    rank:  Scalar `Tensor`.
    data:  The tensors to print out if the condition is False.  Defaults to
      error message and first few entries of `x`.
    summarize: Print this many entries of each tensor.
    name: A name for this operation (optional).
      Defaults to "assert_rank_at_least".

  Returns:
    Op raising `InvalidArgumentError` unless `x` has specified rank or higher.

  Raises:
    ValueError:  If static checks determine `x` has wrong rank.
  """
    with ops.op_scope([x], name, "assert_rank_at_least"):
        x = ops.convert_to_tensor(x, name="x")
        rank = ops.convert_to_tensor(rank, name="rank")

        # Attempt to statically defined rank.
        x_rank_static = x.get_shape().ndims
        rank_static = tensor_util.constant_value(rank)
        if x_rank_static is not None and rank_static is not None:
            if x_rank_static < rank_static:
                raise ValueError(
                    "Tensor %s must have rank %d.  Received rank %d, shape %s"
                    % (x.name, rank_static, x_rank_static, x.get_shape())
                )
            return control_flow_ops.no_op(name="static_checks_determined_all_ok")

        if data is None:
            data = ["Tensor %s must have rank at least" % x.name, rank, "Received shape: ", array_ops.shape(x)]
        condition = math_ops.greater_equal(array_ops.rank(x), rank)
        return logging_ops.Assert(condition, data, summarize=summarize)
开发者ID:RChandrasekar,项目名称:tensorflow,代码行数:50,代码来源:check_ops.py


示例8: maybe_update_masks

 def maybe_update_masks():
   with ops.name_scope(self._spec.name):
     is_step_within_pruning_range = math_ops.logical_and(
         math_ops.greater_equal(self._global_step,
                                self._spec.begin_pruning_step),
         # If end_pruning_step is negative, keep pruning forever!
         math_ops.logical_or(
             math_ops.less_equal(self._global_step,
                                 self._spec.end_pruning_step),
             math_ops.less(self._spec.end_pruning_step, 0)))
     is_pruning_step = math_ops.less_equal(
         math_ops.add(self._last_update_step, self._spec.pruning_frequency),
         self._global_step)
     return math_ops.logical_and(is_step_within_pruning_range,
                                 is_pruning_step)
开发者ID:SylChan,项目名称:tensorflow,代码行数:15,代码来源:pruning.py


示例9: restart_decay_fn

 def restart_decay_fn(global_step):
   if global_step is None:
     raise ValueError("global_step is required for cosine_decay.")
   global_step = math_ops.minimum(global_step, decay_steps)
   num = math_ops.mod(num_periods * math_ops.to_float(global_step),
                      decay_steps)
   fraction = num / math_ops.to_float(decay_steps)
   decayed = 0.5 * (
       1.0 + math_ops.cos(constant_op.constant(math.pi) * fraction))
   if zero_after is not None:
     tmp = math_ops.to_float(
         num_periods * global_step) / math_ops.to_float(decay_steps)
     decayed = array_ops.where(
         math_ops.greater_equal(tmp, zero_after), 0.0, decayed)
   return decayed
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:15,代码来源:sign_decay.py


示例10: _make_logistic_eval_metric_ops

def _make_logistic_eval_metric_ops(labels, predictions, thresholds):
  """Returns a dictionary of evaluation metric ops for logistic regression.

  Args:
    labels: The labels `Tensor`, or a dict with only one `Tensor` keyed by name.
    predictions: The predictions `Tensor`.
    thresholds: List of floating point thresholds to use for accuracy,
      precision, and recall metrics.

  Returns:
    A dict of metric results keyed by name.
  """
  # If labels is a dict with a single key, unpack into a single tensor.
  labels_tensor = labels
  if isinstance(labels, dict) and len(labels) == 1:
    labels_tensor = labels.values()[0]

  metrics = {}
  metrics[metric_key.MetricKey.PREDICTION_MEAN] = metrics_lib.streaming_mean(
      predictions)
  metrics[metric_key.MetricKey.LABEL_MEAN] = metrics_lib.streaming_mean(
      labels_tensor)
  # Also include the streaming mean of the label as an accuracy baseline, as
  # a reminder to users.
  metrics[metric_key.MetricKey.ACCURACY_BASELINE] = metrics_lib.streaming_mean(
      labels_tensor)

  metrics[metric_key.MetricKey.AUC] = metrics_lib.streaming_auc(
      labels=labels_tensor, predictions=predictions)

  for threshold in thresholds:
    predictions_at_threshold = math_ops.cast(
        math_ops.greater_equal(predictions, threshold),
        dtypes.float32,
        name='predictions_at_threshold_%f' % threshold)
    metrics[metric_key.MetricKey.ACCURACY_MEAN % threshold] = (
        metrics_lib.streaming_accuracy(labels=labels_tensor,
                                       predictions=predictions_at_threshold))
    # Precision for positive examples.
    metrics[metric_key.MetricKey.PRECISION_MEAN % threshold] = (
        metrics_lib.streaming_precision(labels=labels_tensor,
                                        predictions=predictions_at_threshold))
    # Recall for positive examples.
    metrics[metric_key.MetricKey.RECALL_MEAN % threshold] = (
        metrics_lib.streaming_recall(labels=labels_tensor,
                                     predictions=predictions_at_threshold))

  return metrics
开发者ID:Albert-Z-Guo,项目名称:tensorflow,代码行数:48,代码来源:logistic_regressor.py


示例11: grow_tree

  def grow_tree(self, stats_summaries_list, feature_ids_list,
                last_layer_nodes_range):
    # For not in memory situation, we need to accumulate enough of batches first
    # before proceeding with building a tree layer.
    max_splits = _get_max_splits(self._tree_hparams)

    # Prepare accumulators.
    accumulators = []
    dependencies = []
    for i, feature_ids in enumerate(feature_ids_list):
      stats_summaries = stats_summaries_list[i]
      accumulator = data_flow_ops.ConditionalAccumulator(
          dtype=dtypes.float32,
          # The stats consist of grads and hessians (the last dimension).
          shape=[len(feature_ids), max_splits, self._bucket_size_list[i], 2],
          shared_name='numeric_stats_summary_accumulator_' + str(i))
      accumulators.append(accumulator)

      apply_grad = accumulator.apply_grad(
          array_ops.stack(stats_summaries, axis=0), self._stamp_token)
      dependencies.append(apply_grad)

    # Grow the tree if enough batches is accumulated.
    with ops.control_dependencies(dependencies):
      if not self._is_chief:
        return control_flow_ops.no_op()

      min_accumulated = math_ops.reduce_min(
          array_ops.stack([acc.num_accumulated() for acc in accumulators]))

      def grow_tree_from_accumulated_summaries_fn():
        """Updates tree with the best layer from accumulated summaries."""
        # Take out the accumulated summaries from the accumulator and grow.
        stats_summaries_list = []
        stats_summaries_list = [
            array_ops.unstack(accumulator.take_grad(1), axis=0)
            for accumulator in accumulators
        ]
        grow_op = self._grow_tree_from_stats_summaries(
            stats_summaries_list, feature_ids_list, last_layer_nodes_range)
        return grow_op

      grow_model = control_flow_ops.cond(
          math_ops.greater_equal(min_accumulated, self._n_batches_per_layer),
          grow_tree_from_accumulated_summaries_fn,
          control_flow_ops.no_op,
          name='wait_until_n_batches_accumulated')
      return grow_model
开发者ID:ZhangXinNan,项目名称:tensorflow,代码行数:48,代码来源:boosted_trees.py


示例12: _update_mask

  def _update_mask(self, weights, threshold):
    """Updates the mask for a given weight tensor.

    This functions first computes the cdf of the weight tensor, and estimates
    the threshold value such that 'desired_sparsity' fraction of weights
    have magnitude less than the threshold.

    Args:
      weights: The weight tensor that needs to be masked.
      threshold: The current threshold value. The function will compute a new
        threshold and return the exponential moving average using the current
        value of threshold

    Returns:
      new_threshold: The new value of the threshold based on weights, and
        sparsity at the current global_step
      new_mask: A numpy array of the same size and shape as weights containing
        0 or 1 to indicate which of the values in weights falls below
        the threshold

    Raises:
      ValueError: if sparsity is not defined
    """
    if self._sparsity is None:
      raise ValueError('Sparsity variable undefined')

    sparsity = self._get_sparsity(weights.op.name)
    with ops.name_scope(weights.op.name + '_pruning_ops'):
      abs_weights = math_ops.abs(weights)
      k = math_ops.cast(
          math_ops.round(
              math_ops.cast(array_ops.size(abs_weights), dtypes.float32) *
              (1 - sparsity)), dtypes.int32)
      # Sort the entire array
      values, _ = nn_ops.top_k(
          array_ops.reshape(abs_weights, [-1]), k=array_ops.size(abs_weights))
      # Grab the (k-1) th value
      current_threshold = array_ops.gather(values, k - 1)
      smoothed_threshold = math_ops.add_n([
          math_ops.multiply(current_threshold, 1 - self._spec.threshold_decay),
          math_ops.multiply(threshold, self._spec.threshold_decay)
      ])

      new_mask = math_ops.cast(
          math_ops.greater_equal(abs_weights, smoothed_threshold),
          dtypes.float32)

    return smoothed_threshold, new_mask
开发者ID:Albert-Z-Guo,项目名称:tensorflow,代码行数:48,代码来源:pruning.py


示例13: assert_greater_equal

def assert_greater_equal(x, y, data=None, summarize=None, message=None,
                         name=None):
  """Assert the condition `x >= y` holds element-wise.

  Example of adding a dependency to an operation:

  ```python
  with tf.control_dependencies([tf.assert_greater_equal(x, y)]):
    output = tf.reduce_sum(x)
  ```

  This condition holds if for every pair of (possibly broadcast) elements
  `x[i]`, `y[i]`, we have `x[i] >= y[i]`.
  If both `x` and `y` are empty, this is trivially satisfied.

  Args:
    x:  Numeric `Tensor`.
    y:  Numeric `Tensor`, same dtype as and broadcastable to `x`.
    data:  The tensors to print out if the condition is False.  Defaults to
      error message and first few entries of `x`, `y`.
    summarize: Print this many entries of each tensor.
    message: A string to prefix to the default message.
    name: A name for this operation (optional).  Defaults to
      "assert_greater_equal"

  Returns:
    Op that raises `InvalidArgumentError` if `x >= y` is False.
  """
  message = message or ''
  with ops.name_scope(name, 'assert_greater_equal', [x, y, data]):
    x = ops.convert_to_tensor(x, name='x')
    y = ops.convert_to_tensor(y, name='y')
    if context.executing_eagerly():
      x_name = _shape_and_dtype_str(x)
      y_name = _shape_and_dtype_str(y)
    else:
      x_name = x.name
      y_name = y.name

    if data is None:
      data = [
          message,
          'Condition x >= y did not hold element-wise:'
          'x (%s) = ' % x_name, x, 'y (%s) = ' % y_name, y
      ]
    condition = math_ops.reduce_all(math_ops.greater_equal(x, y))
    return control_flow_ops.Assert(condition, data, summarize=summarize)
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:47,代码来源:check_ops.py


示例14: grow_not_in_mem

        def grow_not_in_mem():
          """Accumulates the data and grows a layer when ready."""

          accumulators = []
          dependencies = []
          for i, feature_ids in enumerate(feature_ids_list):
            stats_summaries = stats_summaries_list[i]
            accumulator = data_flow_ops.ConditionalAccumulator(
                dtype=dtypes.float32,
                # The stats consist of grads and hessians (the last dimension).
                shape=[len(feature_ids), max_splits, bucket_size_list[i], 2],
                shared_name='numeric_stats_summary_accumulator_' + str(i))
            accumulators.append(accumulator)

            apply_grad = accumulator.apply_grad(
                array_ops.stack(stats_summaries, axis=0), stamp_token)
            dependencies.append(apply_grad)

          def grow_tree_from_accumulated_summaries_fn():
            """Updates tree with the best layer from accumulated summaries."""
            # Take out the accumulated summaries from the accumulator and grow.
            stats_summaries_list = []

            stats_summaries_list = [
                array_ops.unstack(accumulator.take_grad(1), axis=0)
                for accumulator in accumulators
            ]

            grow_op = grow_tree_from_stats_summaries(stats_summaries_list,
                                                     feature_ids_list)
            return grow_op

          with ops.control_dependencies(dependencies):
            if config.is_chief:
              min_accumulated = math_ops.reduce_min(
                  array_ops.stack(
                      [acc.num_accumulated() for acc in accumulators]))

              grow_model = control_flow_ops.cond(
                  math_ops.greater_equal(min_accumulated, n_batches_per_layer),
                  grow_tree_from_accumulated_summaries_fn,
                  control_flow_ops.no_op,
                  name='wait_until_n_batches_accumulated')

              return grow_model
            else:
              return control_flow_ops.no_op()
开发者ID:Eagle732,项目名称:tensorflow,代码行数:47,代码来源:boosted_trees.py


示例15: dropped_inputs

      def dropped_inputs(inputs=inputs, rate=self.rate, seed=self.seed):  # pylint: disable=missing-docstring
        alpha = 1.6732632423543772848170429916717
        scale = 1.0507009873554804934193349852946
        alpha_p = -alpha * scale

        kept_idx = math_ops.greater_equal(
            K.random_uniform(noise_shape, seed=seed), rate)
        kept_idx = math_ops.cast(kept_idx, K.floatx())

        # Get affine transformation params
        a = ((1 - rate) * (1 + rate * alpha_p**2))**-0.5
        b = -a * alpha_p * rate

        # Apply mask
        x = inputs * kept_idx + alpha_p * (1 - kept_idx)

        # Do affine transformation
        return a * x + b
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:18,代码来源:noise.py


示例16: assert_rank_at_least

def assert_rank_at_least(x, rank, data=None, summarize=None, name=None):
  """Assert `x` has rank equal to `rank` or higher.

  Args:
    x:  Numeric `Tensor`.
    rank:  Scalar `Tensor`.
    data:  The tensors to print out if the condition is False.  Defaults to
      error message and first few entries of `x`.
    summarize: Print this many entries of each tensor.
    name: A name for this operation (optional).
      Defaults to "assert_rank_at_least".

  Returns:
    Op raising `InvalidArgumentError` unless `x` has specified rank or higher.

  Raises:
    ValueError:  If static checks determine `x` has wrong rank.
  """
  with ops.op_scope([x], name, 'assert_rank_at_least'):
    x = ops.convert_to_tensor(x, name='x')
    rank = ops.convert_to_tensor(rank, name='rank')

    # Attempt to statically defined rank.
    x_rank_static = x.get_shape().ndims
    rank_static = tensor_util.constant_value(rank)
    if x_rank_static is not None and rank_static is not None:
      if x_rank_static < rank_static:
        raise ValueError(
            'Tensor %s must have rank %d.  Received rank %d, shape %s'
            % (x.name, rank_static, x_rank_static, x.get_shape()))
      return control_flow_ops.no_op(name='static_checks_determined_all_ok')

    if data is None:
      data = [
          'Tensor %s must have rank at least' % x.name,
          rank,
          'Received shape: ',
          array_ops.shape(x)]
    condition = math_ops.greater_equal(array_ops.rank(x), rank)
    return logging_ops.Assert(condition, data, summarize=summarize)
开发者ID:AlKavaev,项目名称:tensorflow,代码行数:40,代码来源:tensor_util.py


示例17: center_bias

  def center_bias(self, center_bias_var, gradients, hessians):
    # For not in memory situation, we need to accumulate enough of batches first
    # before proceeding with centering bias.

    # Create an accumulator.
    bias_dependencies = []
    bias_accumulator = data_flow_ops.ConditionalAccumulator(
        dtype=dtypes.float32,
        # The stats consist of grads and hessians means only.
        # TODO(nponomareva): this will change for a multiclass
        shape=[2, 1],
        shared_name='bias_accumulator')

    grads_and_hess = array_ops.stack([gradients, hessians], axis=0)
    grads_and_hess = math_ops.reduce_mean(grads_and_hess, axis=1)

    apply_grad = bias_accumulator.apply_grad(grads_and_hess, self._stamp_token)
    bias_dependencies.append(apply_grad)

    # Center bias if enough batches were processed.
    with ops.control_dependencies(bias_dependencies):
      if not self._is_chief:
        return control_flow_ops.no_op()

      def center_bias_from_accumulator():
        accumulated = array_ops.unstack(bias_accumulator.take_grad(1), axis=0)
        return self._center_bias_fn(center_bias_var,
                                    array_ops.expand_dims(accumulated[0], 0),
                                    array_ops.expand_dims(accumulated[1], 0))

      center_bias_op = control_flow_ops.cond(
          math_ops.greater_equal(bias_accumulator.num_accumulated(),
                                 self._n_batches_per_layer),
          center_bias_from_accumulator,
          control_flow_ops.no_op,
          name='wait_until_n_batches_for_bias_accumulated')
      return center_bias_op
开发者ID:ZhangXinNan,项目名称:tensorflow,代码行数:37,代码来源:boosted_trees.py


示例18: AddBackPropCounterLoop

  def AddBackPropCounterLoop(self, count):
    """Add the backprop loop that controls the iterations.

    This is added to the backprop loop. It is used to control the loop
    termination and the slice index.

    The pseudocode is:
      `n = count; while (n >= 1) { n--; }`

    Args:
      count: The number of iterations for backprop.

    Returns:
      always 0.
    """
    one = constant_op.constant(1, name="b_count")
    self.Enter()
    self.AddName(count.name)
    enter_count = _Enter(count, self._name, is_constant=False,
                         parallel_iterations=self._parallel_iterations,
                         name="b_count")
    merge_count = merge([enter_count, enter_count])[0]
    self._pivot_for_pred = merge_count

    cond = math_ops.greater_equal(merge_count, one)
    self._pivot = loop_cond(cond, name="b_count")
    switch_count = switch(merge_count, self._pivot)

    # Add next_iteration right after Switch to match the gradient function.
    next_count = next_iteration(switch_count[1])
    self._pivot_for_body = next_count
    self._index = math_ops.sub(next_count, one)
    merge_count.op._update_input(1, self._index)

    exit_count = exit(switch_count[0], name="b_count")
    self.Exit()
    return exit_count
开发者ID:p-zhang,项目名称:tensorflow,代码行数:37,代码来源:control_flow_ops.py


示例19: _GatherDropNegatives

def _GatherDropNegatives(params, ids, zero_clipped_indices=None,
                         is_positive=None):
  """ Helper function for unsorted segment ops. Gathers params for
      positive segment ids and gathers 0 for inputs with negative segment id.
      Also returns the clipped indices and a boolean mask with the same shape
      as ids where a positive id is masked as true. With this, the latter two
      can be passed as arguments to this function to reuse them.
  """
  if zero_clipped_indices is None:
    zero_clipped_indices = math_ops.maximum(ids, array_ops.zeros_like(ids))
  gathered = array_ops.gather(params, zero_clipped_indices)
  if is_positive is None:
    is_positive = math_ops.greater_equal(ids, 0)
    # tf.where(condition, x, y) requires condition to have the same shape as x
    # and y.
    # todo(philjd): remove this if tf.where supports broadcasting (#9284)
    for _ in range(gathered.shape.ndims - is_positive.shape.ndims):
      is_positive = array_ops.expand_dims(is_positive, -1)
    is_positive = (is_positive &
                   array_ops.ones_like(gathered, dtype=dtypes.bool))
  # replace gathered params of negative indices with 0
  zero_slice = array_ops.zeros_like(gathered)
  return (array_ops.where(is_positive, gathered, zero_slice),
          zero_clipped_indices, is_positive)
开发者ID:neuroradiology,项目名称:tensorflow,代码行数:24,代码来源:math_grad.py


示例20: center_bias_not_in_mem

        def center_bias_not_in_mem():
          """Accumulates the data and updates the logits bias, when ready."""
          bias_dependencies = []

          bias_accumulator = data_flow_ops.ConditionalAccumulator(
              dtype=dtypes.float32,
              # The stats consist of grads and hessians means only.
              # TODO(nponomareva): this will change for a multiclass
              shape=[2, 1],
              shared_name='bias_accumulator')

          grads_and_hess = array_ops.stack([gradients, hessians], axis=0)
          grads_and_hess = math_ops.reduce_mean(grads_and_hess, axis=1)

          apply_grad = bias_accumulator.apply_grad(grads_and_hess, stamp_token)
          bias_dependencies.append(apply_grad)

          def center_bias_from_accumulator():
            accumulated = array_ops.unstack(
                bias_accumulator.take_grad(1), axis=0)
            return _center_bias_fn(
                array_ops.expand_dims(accumulated[0], 0),
                array_ops.expand_dims(accumulated[1], 0))

          with ops.control_dependencies(bias_dependencies):
            if config.is_chief:
              center_bias_op = control_flow_ops.cond(
                  math_ops.greater_equal(bias_accumulator.num_accumulated(),
                                         n_batches_per_layer),
                  center_bias_from_accumulator,
                  control_flow_ops.no_op,
                  name='wait_until_n_batches_for_bias_accumulated')

              return center_bias_op
            else:
              return control_flow_ops.no_op()
开发者ID:Eagle732,项目名称:tensorflow,代码行数:36,代码来源:boosted_trees.py



注:本文中的tensorflow.python.ops.math_ops.greater_equal函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python math_ops.imag函数代码示例发布时间:2022-05-27
下一篇:
Python math_ops.greater函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap