• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python math_ops.reduce_any函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.math_ops.reduce_any函数的典型用法代码示例。如果您正苦于以下问题:Python reduce_any函数的具体用法?Python reduce_any怎么用?Python reduce_any使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了reduce_any函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: testAxesType

 def testAxesType(self):
   for dtype in [dtypes.int64, dtypes.int32]:
     with self.test_session(use_gpu=True) as sess:
       v = math_ops.reduce_any([True, True],
                               constant_op.constant(0, dtype=dtype))
       tf_v = sess.run(v)
     self.assertAllEqual(tf_v, True)
开发者ID:ThunderQi,项目名称:tensorflow,代码行数:7,代码来源:reduction_ops_test.py


示例2: kl_divergence

def kl_divergence(distribution_a, distribution_b,
                  allow_nan_stats=True, name=None):
  """Get the KL-divergence KL(distribution_a || distribution_b).

  If there is no KL method registered specifically for `type(distribution_a)`
  and `type(distribution_b)`, then the class hierarchies of these types are
  searched.

  If one KL method is registered between any pairs of classes in these two
  parent hierarchies, it is used.

  If more than one such registered method exists, the method whose registered
  classes have the shortest sum MRO paths to the input types is used.

  If more than one such shortest path exists, the first method
  identified in the search is used (favoring a shorter MRO distance to
  `type(distribution_a)`).

  Args:
    distribution_a: The first distribution.
    distribution_b: The second distribution.
    allow_nan_stats: Python `bool`, default `True`. When `True`,
      statistics (e.g., mean, mode, variance) use the value "`NaN`" to
      indicate the result is undefined. When `False`, an exception is raised
      if one or more of the statistic's batch members are undefined.
    name: Python `str` name prefixed to Ops created by this class.

  Returns:
    A Tensor with the batchwise KL-divergence between `distribution_a`
    and `distribution_b`.

  Raises:
    NotImplementedError: If no KL method is defined for distribution types
      of `distribution_a` and `distribution_b`.
  """
  kl_fn = _registered_kl(type(distribution_a), type(distribution_b))
  if kl_fn is None:
    raise NotImplementedError(
        "No KL(distribution_a || distribution_b) registered for distribution_a "
        "type %s and distribution_b type %s"
        % (type(distribution_a).__name__, type(distribution_b).__name__))

  with ops.name_scope("KullbackLeibler"):
    kl_t = kl_fn(distribution_a, distribution_b, name=name)
    if allow_nan_stats:
      return kl_t

    # Check KL for NaNs
    kl_t = array_ops.identity(kl_t, name="kl")

    with ops.control_dependencies([
        control_flow_ops.Assert(
            math_ops.logical_not(
                math_ops.reduce_any(math_ops.is_nan(kl_t))),
            ["KL calculation between %s and %s returned NaN values "
             "(and was called with allow_nan_stats=False). Values:"
             % (distribution_a.name, distribution_b.name), kl_t])]):
      return array_ops.identity(kl_t, name="checked_kl")
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:58,代码来源:kullback_leibler.py


示例3: next_inputs

  def next_inputs(self, time, outputs, state, sample_ids, name=None):
    with ops.name_scope(name, "ScheduledOutputTrainingHelperNextInputs",
                        [time, outputs, state, sample_ids]):
      (finished, base_next_inputs, state) = (
          super(ScheduledOutputTrainingHelper, self).next_inputs(
              time=time,
              outputs=outputs,
              state=state,
              sample_ids=sample_ids,
              name=name))
      sample_ids = math_ops.cast(sample_ids, dtypes.bool)

      def maybe_sample():
        """Perform scheduled sampling."""

        def maybe_concatenate_auxiliary_inputs(outputs_, indices=None):
          """Concatenate outputs with auxiliary inputs, if they exist."""
          if self._auxiliary_input_tas is None:
            return outputs_

          next_time = time + 1
          auxiliary_inputs = nest.map_structure(
              lambda ta: ta.read(next_time), self._auxiliary_input_tas)
          if indices is not None:
            auxiliary_inputs = array_ops.gather_nd(auxiliary_inputs, indices)
          return nest.map_structure(
              lambda x, y: array_ops.concat((x, y), -1),
              outputs_, auxiliary_inputs)

        if self._next_inputs_fn is None:
          return array_ops.where(
              sample_ids, maybe_concatenate_auxiliary_inputs(outputs),
              base_next_inputs)

        where_sampling = math_ops.cast(
            array_ops.where(sample_ids), dtypes.int32)
        where_not_sampling = math_ops.cast(
            array_ops.where(math_ops.logical_not(sample_ids)), dtypes.int32)
        outputs_sampling = array_ops.gather_nd(outputs, where_sampling)
        inputs_not_sampling = array_ops.gather_nd(base_next_inputs,
                                                  where_not_sampling)
        sampled_next_inputs = maybe_concatenate_auxiliary_inputs(
            self._next_inputs_fn(outputs_sampling), where_sampling)

        base_shape = array_ops.shape(base_next_inputs)
        return (array_ops.scatter_nd(indices=where_sampling,
                                     updates=sampled_next_inputs,
                                     shape=base_shape)
                + array_ops.scatter_nd(indices=where_not_sampling,
                                       updates=inputs_not_sampling,
                                       shape=base_shape))

      all_finished = math_ops.reduce_all(finished)
      no_samples = math_ops.logical_not(math_ops.reduce_any(sample_ids))
      next_inputs = control_flow_ops.cond(
          math_ops.logical_or(all_finished, no_samples),
          lambda: base_next_inputs, maybe_sample)
      return (finished, next_inputs, state)
开发者ID:AnddyWang,项目名称:tensorflow,代码行数:58,代码来源:helper.py


示例4: _detect_nan_inf

    def _detect_nan_inf(tensor):
      """Trace function for detecting any NaN/Inf in the tensor."""

      if tensor.dtype.is_floating:
        output_tensor = math_ops.reduce_any(
            gen_math_ops.logical_or(
                gen_math_ops.is_nan(tensor), gen_math_ops.is_inf(tensor)))
      else:
        output_tensor = constant_op.constant(False)
      # The shape has to be 1. Set it if it does not have the information.
      output_tensor = array_ops.reshape(output_tensor, [1])
      return output_tensor
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:12,代码来源:tensor_tracer.py


示例5: kl

def kl(dist_a, dist_b, allow_nan=False, name=None):
  """Get the KL-divergence KL(dist_a || dist_b).

  If there is no KL method registered specifically for `type(dist_a)` and
  `type(dist_b)`, then the class hierarchies of these types are searched.

  If one KL method is registered between any pairs of classes in these two
  parent hierarchies, it is used.

  If more than one such registered method exists, the method whose registered
  classes have the shortest sum MRO paths to the input types is used.

  If more than one such shortest path exists, the first method
  identified in the search is used (favoring a shorter MRO distance to
  `type(dist_a)`).

  Args:
    dist_a: The first distribution.
    dist_b: The second distribution.
    allow_nan: If `False` (default), a runtime error is raised
      if the KL returns NaN values for any batch entry of the given
      distributions.  If `True`, the KL may return a NaN for the given entry.
    name: (optional) Name scope to use for created operations.

  Returns:
    A Tensor with the batchwise KL-divergence between dist_a and dist_b.

  Raises:
    NotImplementedError: If no KL method is defined for distribution types
      of dist_a and dist_b.
  """
  kl_fn = _registered_kl(type(dist_a), type(dist_b))
  if kl_fn is None:
    raise NotImplementedError(
        "No KL(dist_a || dist_b) registered for dist_a type %s and dist_b "
        "type %s" % ((type(dist_a).__name__, type(dist_b).__name__)))

  with ops.name_scope("KullbackLeibler"):
    kl_t = kl_fn(dist_a, dist_b, name=name)
    if allow_nan:
      return kl_t

    # Check KL for NaNs
    kl_t = array_ops.identity(kl_t, name="kl")

    with ops.control_dependencies([
        control_flow_ops.Assert(
            math_ops.logical_not(
                math_ops.reduce_any(math_ops.is_nan(kl_t))),
            ["KL calculation between %s and %s returned NaN values "
             "(and was called with allow_nan=False).  Values:"
             % (dist_a.name, dist_b.name), kl_t])]):
      return array_ops.identity(kl_t, name="checked_kl")
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:53,代码来源:kullback_leibler.py


示例6: _detect_nan_inf

    def _detect_nan_inf(tensor):
      """Trace function for detecting any NaN/Inf in the tensor."""

      if tensor.dtype.is_floating:
        # Since host can't handle bf16, always convert tensor to f32.
        tensor = math_ops.cast(tensor, dtypes.float32)
        output_tensor = math_ops.reduce_any(
            gen_math_ops.logical_or(gen_math_ops.is_nan(tensor),
                                    gen_math_ops.is_inf(tensor)))
      else:
        output_tensor = constant_op.constant(0)
      return _print_tensor(op_name, output_idx, 1, tensor, output_tensor)
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:12,代码来源:tensor_tracer.py


示例7: _compare

 def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
   np_ans = x
   if reduction_axes is None:
     np_ans = np.any(np_ans, keepdims=keep_dims)
   else:
     for ra in reduction_axes[::-1]:
       np_ans = np.any(np_ans, axis=ra, keepdims=keep_dims)
   with self.test_session(use_gpu=use_gpu):
     if reduction_axes is not None:
       reduction_axes = np.array(reduction_axes).astype(np.int32)
     tf_ans = math_ops.reduce_any(x, reduction_axes, keep_dims)
     out = tf_ans.eval()
   self.assertAllEqual(np_ans, out)
   self.assertShapeEqual(np_ans, tf_ans)
开发者ID:Immexxx,项目名称:tensorflow,代码行数:14,代码来源:reduction_ops_test.py


示例8: kl

def kl(dist_a, dist_b, allow_nan=False, name=None):
    """Get the KL-divergence KL(dist_a || dist_b).

  Args:
    dist_a: instance of distributions.Distribution.
    dist_b: instance of distributions.Distribution.
    allow_nan: If False (default), a runtime error is raised
      if the KL returns NaN values for any batch entry of the given
      distributions.  If True, the KL may return a NaN for the given entry.
    name: (optional) Name scope to use for created operations.

  Returns:
    A Tensor with the batchwise KL-divergence between dist_a and dist_b.

  Raises:
    TypeError: If dist_a or dist_b is not an instance of Distribution.
    NotImplementedError: If no KL method is defined for distribution types
      of dist_a and dist_b.
  """
    if not isinstance(dist_a, distribution.Distribution):
        raise TypeError("dist_a is not an instance of Distribution, received type: %s" % type(dist_a))
    if not isinstance(dist_b, distribution.Distribution):
        raise TypeError("dist_b is not an instance of Distribution, received type: %s" % type(dist_b))
    kl_fn = _DIVERGENCES.get((type(dist_a), type(dist_b)), None)
    if kl_fn is None:
        raise NotImplementedError(
            "No KL(dist_a || dist_b) registered for dist_a type %s and dist_b "
            "type %s" % ((type(dist_a).__name__, type(dist_b).__name__))
        )
    with ops.name_scope("KullbackLeibler"):
        kl_t = kl_fn(dist_a, dist_b, name=name)
        if allow_nan:
            return kl_t

        # Check KL for NaNs
        kl_t = array_ops.identity(kl_t, name="kl")

        with ops.control_dependencies(
            [
                logging_ops.Assert(
                    math_ops.logical_not(math_ops.reduce_any(math_ops.is_nan(kl_t))),
                    [
                        "KL calculation between %s and %s returned NaN values "
                        "(and was called with allow_nan=False).  Values:" % (dist_a.name, dist_b.name),
                        kl_t,
                    ],
                )
            ]
        ):
            return array_ops.identity(kl_t, name="checked_kl")
开发者ID:ChaitanyaCixLive,项目名称:tensorflow,代码行数:50,代码来源:kullback_leibler.py


示例9: _process_scale

  def _process_scale(self, scale, event_ndims):
    """Helper to __init__ which gets scale in batch-ready form.

    This function expands dimensions of `scale` according to the following
    table:
                     event_ndims
    scale.ndims   0            1
              0  [1]+S+[1,1]   "silent error"
              1  [ ]+S+[1,1]   "silent error"
              2  [ ]+S+[1,1]   [1]+S+[ ]
              3  [ ]+S+[1,1]   [ ]+S+[ ]
            ...  (same)        (same)

    The idea is that we want to convert `scale` into something which can always
    work for, say, the left-hand argument of `batch_matmul`.

    Args:
      scale: `Tensor`.
      event_ndims: `Tensor` (0D, `int32`).

    Returns:
      scale: `Tensor` with dims expanded according to [above] table.
      batch_ndims: `Tensor` (0D, `int32`).  The ndims of the `batch` portion.
    """
    ndims = array_ops.rank(scale)
    left = math_ops.select(
        math_ops.reduce_any([
            math_ops.reduce_all([
                math_ops.equal(ndims, 0),
                math_ops.equal(event_ndims, 0)
            ]),
            math_ops.reduce_all([
                math_ops.equal(ndims, 2),
                math_ops.equal(event_ndims, 1)
            ])]), 1, 0)
    right = math_ops.select(math_ops.equal(event_ndims, 0), 2, 0)
    pad = array_ops.concat(0, (
        array_ops.ones([left], dtype=dtypes.int32),
        array_ops.shape(scale),
        array_ops.ones([right], dtype=dtypes.int32)))
    scale = array_ops.reshape(scale, pad)
    batch_ndims = ndims - 2 + right
    return scale, batch_ndims
开发者ID:KalraA,项目名称:tensorflow,代码行数:43,代码来源:bijector.py


示例10: _check_weights_match_logits_and_reshape

def _check_weights_match_logits_and_reshape(weights, logits):
  """Checks that weights shape matches logits and reshapes if needed.

  Consider logits of shape [D0, D1, ... DN, logits_dimension]. Weights shape
  can be either:
  * [D0, D1, ... DN, logits_dimension]
  * [D0, D1, ... DN]: In this case, weights is reshaped into
    [D0, D1, ... DN, 1] to work with weight broadcasting rules.

  Args:
    weights: weights Tensor.
    logits: logits Tensor.
  Returns:
    Validated and reshaped weights Tensor.
  """
  err_msg = (
      'weights shape must be [D0, D1, ... DN], [D0, D1, ... DN, 1] or '
      '[D0, D1, ... DN, logits_dimension]')
  with ops.name_scope(None, 'weights', (weights, logits)) as scope:
    weights_shape = array_ops.shape(weights, name='weights_shape')
    logits_shape = array_ops.shape(logits, name='logits_shape')
    if (weights.shape.ndims is not None and logits.shape.ndims is not None and
        weights.shape.ndims == logits.shape.ndims - 1):
      assert_dimension = check_ops.assert_equal(
          logits_shape[:-1], weights_shape, message=err_msg,
          data=['logits_shape: ', logits_shape,
                'weights_shape: ', weights_shape])
      with ops.control_dependencies([assert_dimension]):
        return array_ops.expand_dims(weights, -1, name=scope)
    supported_weights_shape = array_ops.concat([logits_shape[:-1], [1]], axis=0)
    condition = math_ops.reduce_any(
        [math_ops.reduce_all(math_ops.equal(logits_shape, weights_shape)),
         math_ops.reduce_all(math_ops.equal(
             supported_weights_shape, weights_shape))])
    assert_dimension = control_flow_ops.Assert(
        condition=condition,
        data=[err_msg, 'logits_shape: ', logits_shape,
              'weights_shape: ', weights_shape])
    with ops.control_dependencies([assert_dimension]):
      return array_ops.identity(weights, name=scope)
开发者ID:marcomarchesi,项目名称:tensorflow,代码行数:40,代码来源:head.py


示例11: _assert_sparse_indices_are_ragged_right

def _assert_sparse_indices_are_ragged_right(indices):
  """Checks that the given SparseTensor.indices tensor is ragged-right.

  Example: `indices = [[0, 0], [0, 1], [2, 0], [3, 1]]` is not ragged right
  because the entry `[3, 1]` skips a cell.

  Args:
    indices: The SparseTensor indices to check.

  Returns:
    A list of control dependency op tensors.
  """
  index_prefix = indices[:, :-1]
  index_suffix = indices[:, -1]

  # Check whether each index is starting a new row in the innermost dimension
  # (prefix[i] != prefix[i-1]) or continuing a row (prefix[i] == prefix[i-1]).
  # (Note: this skips the first index; we will check that separately below.)
  index_prefix_changed = math_ops.reduce_any(
      math_ops.not_equal(index_prefix[1:], index_prefix[:-1]), axis=1)

  # Check two cases:
  #   * For indices that start a new row: index_suffix[i] must be zero.
  #   * For indices that continue a row: index_suffix[i] must be equal to
  #     index_suffix[i-1]+1.
  index_ok = array_ops.where(
      index_prefix_changed, math_ops.equal(index_suffix[1:], 0),
      math_ops.equal(index_suffix[1:], index_suffix[:-1] + 1))

  # Also check that the very first index didn't skip any cells.  The first
  # index starts a new row (by definition), so its suffix should be zero.
  sparse_indices_are_ragged_right = math_ops.logical_and(
      math_ops.reduce_all(math_ops.equal(index_suffix[:1], 0)),
      math_ops.reduce_all(index_ok))

  message = [
      'SparseTensor is not right-ragged',
      'SparseTensor.indices =', indices
  ]
  return [control_flow_ops.Assert(sparse_indices_are_ragged_right, message)]
开发者ID:abhinav-upadhyay,项目名称:tensorflow,代码行数:40,代码来源:ragged_conversion_ops.py


示例12: get_cluster_assignment

def get_cluster_assignment(pairwise_distances, centroid_ids):
  """Assign data points to the neareset centroids.

  Tensorflow has numerical instability and doesn't always choose
    the data point with theoretically zero distance as it's nearest neighbor.
    Thus, for each centroid in centroid_ids, explicitly assign
    the centroid itself as the nearest centroid.
    This is done through the mask tensor and the constraint_vect tensor.

  Args:
    pairwise_distances: 2-D Tensor of pairwise distances.
    centroid_ids: 1-D Tensor of centroid indices.

  Returns:
    y_fixed: 1-D tensor of cluster assignment.
  """
  predictions = math_ops.argmin(
      array_ops.gather(pairwise_distances, centroid_ids), dimension=0)
  batch_size = array_ops.shape(pairwise_distances)[0]

  # Deal with numerical instability
  mask = math_ops.reduce_any(array_ops.one_hot(
      centroid_ids, batch_size, True, False, axis=-1, dtype=dtypes.bool),
                             axis=0)
  constraint_one_hot = math_ops.multiply(
      array_ops.one_hot(centroid_ids,
                        batch_size,
                        array_ops.constant(1, dtype=dtypes.int64),
                        array_ops.constant(0, dtype=dtypes.int64),
                        axis=0,
                        dtype=dtypes.int64),
      math_ops.cast(math_ops.range(array_ops.shape(centroid_ids)[0]),
                    dtypes.int64))
  constraint_vect = math_ops.reduce_sum(
      array_ops.transpose(constraint_one_hot), axis=0)

  y_fixed = array_ops.where(mask, constraint_vect, predictions)
  return y_fixed
开发者ID:Albert-Z-Guo,项目名称:tensorflow,代码行数:38,代码来源:metric_loss_ops.py


示例13: insert

  def insert(self, ids, scores):
    """Insert the ids and scores into the TopN."""
    with ops.control_dependencies(self.last_ops):
      scatter_op = state_ops.scatter_update(self.id_to_score, ids, scores)
      larger_scores = math_ops.greater(scores, self.sl_scores[0])

      def shortlist_insert():
        larger_ids = array_ops.boolean_mask(
            math_ops.to_int64(ids), larger_scores)
        larger_score_values = array_ops.boolean_mask(scores, larger_scores)
        shortlist_ids, new_ids, new_scores = tensor_forest_ops.top_n_insert(
            self.sl_ids, self.sl_scores, larger_ids, larger_score_values)
        u1 = state_ops.scatter_update(self.sl_ids, shortlist_ids, new_ids)
        u2 = state_ops.scatter_update(self.sl_scores, shortlist_ids, new_scores)
        return control_flow_ops.group(u1, u2)

      # We only need to insert into the shortlist if there are any
      # scores larger than the threshold.
      cond_op = control_flow_ops.cond(
          math_ops.reduce_any(larger_scores), shortlist_insert,
          control_flow_ops.no_op)
      with ops.control_dependencies([cond_op]):
        self.last_ops = [scatter_op, cond_op]
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:23,代码来源:topn.py


示例14: terminate_when_all_zero

 def terminate_when_all_zero(current_argument, residual_powers, accumulator):
   del current_argument, accumulator  # not used for condition
   do_exit = math_ops.reduce_any(
       math_ops.greater(residual_powers, array_ops.ones_like(residual_powers)))
   return do_exit
开发者ID:AutumnQYN,项目名称:tensorflow,代码行数:5,代码来源:math_utils.py


示例15: test

 def test(self):
   result_lt = ops.reduce_any(self.bool_lt, {'channel'})
   golden_lt = core.LabeledTensor(
       math_ops.reduce_any(self.bool_tensor, 1), [self.a0, self.a2, self.a3])
   self.assertLabeledTensorsEqual(result_lt, golden_lt)
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:5,代码来源:ops_test.py


示例16: build_controller

  def build_controller(self):
    """RL optimization interface.

    Returns:
      ops: A dictionary holding handles of the model used for training.
    """

    self._global_step = training_util.get_or_create_global_step()
    ops = {}
    ops["loss"] = 0

    failing_signal = self.compute_reward(self.hparams.failing_signal)

    ctr = {}

    with tf_ops.name_scope("controller_{}".format(self.ctrl_id)):
      with variable_scope.variable_scope("controller_{}".format(self.ctrl_id)):
        ctr["reward"] = {"value": [], "ph": [], "update": []}
        ctr["ready"] = {"value": [], "ph": [], "update": []}
        ctr["best_reward"] = {"value": [], "update": []}
        for i in range(self.hparams.num_children):
          reward_value = variable_scope.get_local_variable(
              "reward_{}".format(i),
              initializer=0.0,
              dtype=dtypes.float32,
              trainable=False)
          reward_ph = array_ops.placeholder(
              dtypes.float32, shape=(), name="reward_ph_{}".format(i))
          reward_update = state_ops.assign(
              reward_value, reward_ph, use_locking=True)
          ctr["reward"]["value"].append(reward_value)
          ctr["reward"]["ph"].append(reward_ph)
          ctr["reward"]["update"].append(reward_update)
          best_reward = variable_scope.get_local_variable(
              "best_reward_{}".format(i),
              initializer=failing_signal,
              dtype=dtypes.float32,
              trainable=False)
          ctr["best_reward"]["value"].append(best_reward)
          ctr["best_reward"]["update"].append(
              state_ops.assign(best_reward,
                               math_ops.minimum(best_reward, reward_update)))

          ready_value = variable_scope.get_local_variable(
              "ready_{}".format(i),
              initializer=True,
              dtype=dtypes.bool,
              trainable=False)
          ready_ph = array_ops.placeholder(
              dtypes.bool, shape=(), name="ready_ph_{}".format(i))
          ready_update = state_ops.assign(
              ready_value, ready_ph, use_locking=True)
          ctr["ready"]["value"].append(ready_value)
          ctr["ready"]["ph"].append(ready_ph)
          ctr["ready"]["update"].append(ready_update)

      ctr["grouping_y_preds"], ctr["grouping_log_probs"] = self.get_groupings()
      summary.histogram(
          "grouping_actions",
          array_ops.slice(ctr["grouping_y_preds"]["sample"], [0, 0],
                          [1, array_ops.shape(self.op_embeddings)[0]]))

      with variable_scope.variable_scope("controller_{}".format(self.ctrl_id)):
        ctr["baseline"] = variable_scope.get_local_variable(
            "baseline",
            initializer=failing_signal
            if self.hparams.start_with_failing_signal else 0.0,
            dtype=dtypes.float32,
            trainable=False)

      new_baseline = self.hparams.bl_dec * ctr["baseline"] + (
          1 - self.hparams.bl_dec) * math_ops.reduce_mean(
              ctr["reward"]["value"])
      if not self.hparams.always_update_baseline:
        baseline_mask = math_ops.less(ctr["reward"]["value"], failing_signal)
        selected_reward = array_ops.boolean_mask(ctr["reward"]["value"],
                                                 baseline_mask)
        selected_baseline = control_flow_ops.cond(
            math_ops.reduce_any(baseline_mask),
            lambda: math_ops.reduce_mean(selected_reward),
            lambda: constant_op.constant(0, dtype=dtypes.float32))
        ctr["pos_reward"] = selected_baseline
        pos_ = math_ops.less(
            constant_op.constant(0, dtype=dtypes.float32), selected_baseline)
        selected_baseline = self.hparams.bl_dec * ctr["baseline"] + (
            1 - self.hparams.bl_dec) * selected_baseline
        selected_baseline = control_flow_ops.cond(
            pos_, lambda: selected_baseline, lambda: ctr["baseline"])
        new_baseline = control_flow_ops.cond(
            math_ops.less(self.global_step,
                          self.hparams.stop_updating_after_steps),
            lambda: new_baseline, lambda: selected_baseline)
      ctr["baseline_update"] = state_ops.assign(
          ctr["baseline"], new_baseline, use_locking=True)

      ctr["y_preds"], ctr["log_probs"] = self.get_placements()
      summary.histogram("actions", ctr["y_preds"]["sample"])
      mask = math_ops.less(ctr["reward"]["value"], failing_signal)
      ctr["loss"] = ctr["reward"]["value"] - ctr["baseline"]
      ctr["loss"] *= (
#.........这里部分代码省略.........
开发者ID:neuroradiology,项目名称:tensorflow,代码行数:101,代码来源:hierarchical_controller.py


示例17: _initialized_limit_check

 def _initialized_limit_check():
   return control_flow_ops.cond(
       math_ops.reduce_any(non_decreasing),
       lambda: state_ops.assign(discarded_windows_limiter, 0),
       lambda: discarded_windows_limiter.count_up_to(self._discard_limit))
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:5,代码来源:input_pipeline.py


示例18: _cond

 def _cond(unused_chosen_inputs, running_products, unused_idx, unused_count):
   """Resampling loop exit condition."""
   return math_ops.reduce_any(running_products > floor_vals)
开发者ID:ComeOnGetMe,项目名称:tensorflow,代码行数:3,代码来源:resample.py


示例19: _get_weights_and_check_match_logits

def _get_weights_and_check_match_logits(
    features, weight_column, logits, allow_per_logit_weights=False):
  """Fetches weights from features and checks that the shape matches logits.

  Consider logits of shape [D0, D1, ... DN, logits_dimension]. Weights shape
  can be either:
  * [D0, D1, ... DN, logits_dimension] if `allow_per_logit_weights=True`.
  * [D0, D1, ... DN, 1]
  * [D0, D1, ... DN]: In this case, weights is reshaped into
    [D0, D1, ... DN, 1] to work with weight broadcasting rules.

  Args:
    features: The features dict that contains weights.
    weight_column: The weight column. If not given, this method returns 1.
    logits: logits Tensor.
    allow_per_logit_weights: Boolean. Whether we allow weights along the logits
      dimension, namely shape `[D0, D1, ... DN, logits_dimension]`.
  Returns:
    Validated and reshaped weights Tensor.
  Raises:
    ValueError: If the weights `Tensor` cannot be cast into float.
  """
  if allow_per_logit_weights:
    err_msg = (
        'weights shape must be [D0, D1, ... DN], [D0, D1, ... DN, 1] or '
        '[D0, D1, ... DN, logits_dimension]')
  else:
    err_msg = (
        'weights shape must be [D0, D1, ... DN] or [D0, D1, ... DN, 1]')
  with ops.name_scope(
      None, 'weights',
      values=tuple(six.itervalues(features)) + (logits,)) as scope:
    # Fetch the weights.
    if weight_column is None:
      return 1.
    if isinstance(weight_column, six.string_types):
      weight_column = feature_column_lib.numeric_column(
          key=weight_column, shape=(1,))
    if not isinstance(weight_column, feature_column_lib._NumericColumn):  # pylint: disable=protected-access
      raise TypeError('Weight column must be either a string or _NumericColumn.'
                      ' Given type: {}.'.format(type(weight_column)))
    weights = weight_column._get_dense_tensor(  # pylint: disable=protected-access
        feature_column_lib._LazyBuilder(features))  # pylint: disable=protected-access
    if not (weights.dtype.is_floating or weights.dtype.is_integer):
      raise ValueError('Weight column should be castable to float. '
                       'Given dtype: {}'.format(weights.dtype))
    weights = math_ops.to_float(weights, name='weights')

    # Validate the weights shape.
    weights_shape = array_ops.shape(weights, name='weights_shape')
    logits_shape = array_ops.shape(logits, name='logits_shape')
    if (weights.shape.ndims is not None and logits.shape.ndims is not None and
        weights.shape.ndims == logits.shape.ndims - 1):
      assert_dimension = check_ops.assert_equal(
          logits_shape[:-1], weights_shape, message=err_msg,
          data=['logits_shape: ', logits_shape,
                'weights_shape: ', weights_shape])
      with ops.control_dependencies([assert_dimension]):
        return array_ops.expand_dims(weights, -1, name=scope)
    supported_weights_shape = array_ops.concat([logits_shape[:-1], [1]], axis=0)
    if allow_per_logit_weights:
      condition = math_ops.reduce_any(
          [math_ops.reduce_all(math_ops.equal(logits_shape, weights_shape)),
           math_ops.reduce_all(math_ops.equal(
               supported_weights_shape, weights_shape))])
      assert_dimension = control_flow_ops.Assert(
          condition=condition,
          data=[err_msg, 'logits_shape: ', logits_shape,
                'weights_shape: ', weights_shape])
    else:
      assert_dimension = check_ops.assert_equal(
          supported_weights_shape, weights_shape, message=err_msg,
          data=['logits_shape: ', logits_shape,
                'weights_shape: ', weights_shape])
    with ops.control_dependencies([assert_dimension]):
      return array_ops.identity(weights, name=scope)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:76,代码来源:head.py


示例20: get_next

  def get_next(self, name=None):
    """Returns the next input from the iterator for all replicas."""
    if not self._enable_get_next_as_optional:
      replicas = []
      for i, worker in enumerate(self._input_workers.worker_devices):
        if name is not None:
          d = tf_device.DeviceSpec.from_string(worker)
          new_name = "%s_%s_%d" % (name, d.job, d.task)
        else:
          new_name = None
        with ops.device(worker):
          # Make `replicas` a flat list of values across all replicas.
          replicas.extend(
              self._iterators[i].get_next_as_list_deprecated(new_name))
      return values.regroup(self._input_workers.device_map, replicas)

    replicas = []
    worker_has_values = []
    for i, worker in enumerate(self._input_workers.worker_devices):
      if name is not None:
        d = tf_device.DeviceSpec.from_string(worker)
        new_name = "%s_%s_%d" % (name, d.job, d.task)
      else:
        new_name = None
      with ops.device(worker):
        worker_has_value, next_element = (
            self._iterators[i].get_next_as_list(new_name))
        worker_has_values.append(worker_has_value)
        # Make `replicas` a flat list of values across all replicas.
        replicas.append(next_element)

    out_of_range_replicas = []

    def out_of_range_fn(worker_index, device):
      """This function will throw an OutOfRange error."""
      # As this will be only called when there is no data left, so calling
      # get_next() will trigger an OutOfRange error.
      data = self._iterators[worker_index].get_next(device)
      out_of_range_replicas.append(data)
      return data

    # `global_has_value` indicates whether there is data in this global batch.
    # We do a all-reduce across all the workers in the multi-worker case.
    # TODO(b/126259107): Do strategy.reduce for CollectiveAllReduceStrategy.
    if len(worker_has_values) > 1:
      with ops.device(self._input_workers.compute_devices_for_worker(0)[0]):
        # Place the tf.reduce_any op in device 0 to minimize communication
        # cost.
        # TODO(b/128545270): Investigate why placing it on worker 0 will cause
        # the entire data to copy back from device to host.
        global_has_value = math_ops.reduce_any(worker_has_values)
    else:
      global_has_value = worker_has_values[0]

    results = []
    for i, worker in enumerate(self._input_workers.worker_devices):
      with ops.device(worker):
        devices = self._input_workers.compute_devices_for_worker(i)
        for j, device in enumerate(devices):
          with ops.device(device):
            # pylint: disable=undefined-loop-variable
            # pylint: disable=cell-var-from-loop
            # It is fine for the lambda to capture variables from the loop as
            # the lambda is executed in the loop as well.
            result = control_flow_ops.cond(global_has_value,
                                           lambda: replicas[i][j],
                                           lambda: out_of_range_fn(i, device))
            # pylint: enable=cell-var-from-loop
            # pylint: enable=undefined-loop-variable
            results.append(result)
    replicas = results

    # Some dimensions in `replicas` will become unknown after we conditionally
    # return the real tensors or the dummy tensors. We fix the input shapes by
    # using the shapes from `out_of_range_replicas` because it is calling
    # get_next() inside.
    flattened_replicas = nest.flatten(replicas)
    for i, replica_data in enumerate(nest.flatten(out_of_range_replicas)):
      flattened_replicas[i].set_shape(replica_data.get_shape())
    replicas = nest.pack_sequence_as(replicas, flattened_replicas)

    return values.regroup(self._input_workers.device_map, replicas)
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:82,代码来源:input_lib.py



注:本文中的tensorflow.python.ops.math_ops.reduce_any函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python math_ops.reduce_logsumexp函数代码示例发布时间:2022-05-27
下一篇:
Python math_ops.reduce_all函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap