• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python math_ops.argmax函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.math_ops.argmax函数的典型用法代码示例。如果您正苦于以下问题:Python argmax函数的具体用法?Python argmax怎么用?Python argmax使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了argmax函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: testArgMinMax

  def testArgMinMax(self):
    for dtype in self.numeric_types:
      self._assertOpOutputMatchesExpected(
          lambda x: math_ops.argmax(x, axis=0, output_type=dtypes.int32),
          np.array([1, 10, 27, 3, 3, 4], dtype=dtype),
          expected=np.int32(2))
      self._assertOpOutputMatchesExpected(
          lambda x: math_ops.argmax(x, axis=0, output_type=dtypes.int32),
          np.array([[4, 1, 7], [3, 2, 4]], dtype=dtype),
          expected=np.array([0, 1, 0], dtype=np.int32))
      self._assertOpOutputMatchesExpected(
          lambda x: math_ops.argmax(x, axis=1, output_type=dtypes.int32),
          np.array([[4, 1], [3, 2]], dtype=dtype),
          expected=np.array([0, 0], dtype=np.int32))

      self._assertOpOutputMatchesExpected(
          lambda x: math_ops.argmin(x, axis=0, output_type=dtypes.int32),
          np.array([3, 10, 27, 3, 2, 4], dtype=dtype),
          expected=np.int32(4))
      self._assertOpOutputMatchesExpected(
          lambda x: math_ops.argmin(x, axis=0, output_type=dtypes.int32),
          np.array([[4, 1, 7], [3, 2, 4]], dtype=dtype),
          expected=np.array([1, 0, 1], dtype=np.int32))
      self._assertOpOutputMatchesExpected(
          lambda x: math_ops.argmin(x, axis=1, output_type=dtypes.int32),
          np.array([[4, 1], [3, 2]], dtype=dtype),
          expected=np.array([1, 1], dtype=np.int32))
开发者ID:Crazyonxh,项目名称:tensorflow,代码行数:27,代码来源:argminmax_test.py


示例2: call

  def call(self, labels, predictions, weights=None):
    """Accumulate accuracy statistics.

    `labels` and `predictions` should have the same shape.
    As argmax is being done here, labels and predictions type
    can be different.

    Args:
      labels: One-hot Tensor.
      predictions: Tensor with the logits or probabilities for each example.
      weights: Optional weighting of each example. Defaults to 1.

    Returns:
      The arguments, for easy chaining.
    """
    check_ops.assert_equal(
        array_ops.shape(labels), array_ops.shape(predictions),
        message="Shapes of labels and predictions are unequal")
    labels = math_ops.argmax(labels, axis=-1)
    predictions = math_ops.argmax(predictions, axis=-1)
    matches = math_ops.equal(labels, predictions)
    matches = math_ops.cast(matches, self.dtype)
    super(CategoricalAccuracy, self).call(matches, weights=weights)
    if weights is None:
      return labels, predictions
    return labels, predictions, weights
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:26,代码来源:metrics_impl.py


示例3: testArgMinMax

  def testArgMinMax(self):
    # Complex numbers do not support argmin/argmax.
    minmax_types = set(self.numeric_types) - set(self.complex_types)
    for dtype in minmax_types:
      self._assertOpOutputMatchesExpected(
          lambda x: math_ops.argmax(x, axis=0, output_type=dtypes.int32),
          np.array([1, 10, 27, 3, 3, 4], dtype=dtype),
          expected=np.int32(2))
      self._assertOpOutputMatchesExpected(
          lambda x: math_ops.argmax(x, axis=0, output_type=dtypes.int32),
          np.array([[4, 1, 7], [3, 2, 4]], dtype=dtype),
          expected=np.array([0, 1, 0], dtype=np.int32))
      self._assertOpOutputMatchesExpected(
          lambda x: math_ops.argmax(x, axis=1, output_type=dtypes.int32),
          np.array([[4, 1], [3, 2]], dtype=dtype),
          expected=np.array([0, 0], dtype=np.int32))

      self._assertOpOutputMatchesExpected(
          lambda x: math_ops.argmin(x, axis=0, output_type=dtypes.int32),
          np.array([3, 10, 27, 3, 2, 4], dtype=dtype),
          expected=np.int32(4))
      self._assertOpOutputMatchesExpected(
          lambda x: math_ops.argmin(x, axis=0, output_type=dtypes.int32),
          np.array([[4, 1, 7], [3, 2, 4]], dtype=dtype),
          expected=np.array([1, 0, 1], dtype=np.int32))
      self._assertOpOutputMatchesExpected(
          lambda x: math_ops.argmin(x, axis=1, output_type=dtypes.int32),
          np.array([[4, 1], [3, 2]], dtype=dtype),
          expected=np.array([1, 1], dtype=np.int32))
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:29,代码来源:argminmax_test.py


示例4: _logits_to_prediction

  def _logits_to_prediction(self, logits=None):
    predictions = {}
    # Workaround for argmax dropping the second demension.
    predictions[PedictionKey.LOGITS] = array_ops.expand_dims(
        math_ops.argmax(logits, 1), 1)
    logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
    predictions[PedictionKey.CLASSES] = array_ops.expand_dims(
        math_ops.argmax(logits, 1), 1)

    return predictions
开发者ID:MrCrumpets,项目名称:tensorflow,代码行数:10,代码来源:head.py


示例5: _convert_to_estimator_model_result

 def _convert_to_estimator_model_result(self, logits_fn_result):
   logits, loss, train_op = logits_fn_result
   return {
       Classifier.CLASS_OUTPUT:
           math_ops.argmax(logits, len(logits.get_shape()) - 1),
       Classifier.PROBABILITY_OUTPUT: nn.softmax(logits)
   }, loss, train_op
开发者ID:ComeOnGetMe,项目名称:tensorflow,代码行数:7,代码来源:classifier.py


示例6: mode

 def mode(self, name="mode"):
   with ops.name_scope(self.name):
     with ops.op_scope([], name):
       ret = math_ops.argmax(self.logits, dimension=self._batch_rank)
       ret = math_ops.cast(ret, self._dtype)
       ret.set_shape(self.get_batch_shape())
       return ret
开发者ID:363158858,项目名称:tensorflow,代码行数:7,代码来源:categorical.py


示例7: _multi_value_predictions

def _multi_value_predictions(
    activations, target_column, problem_type, predict_probabilities):
  """Maps `activations` from the RNN to predictions for multi value models.

  If `predict_probabilities` is `False`, this function returns a `dict`
  containing single entry with key `prediction_key.PredictionKey.CLASSES` for
  `problem_type` `ProblemType.CLASSIFICATION` or
  `prediction_key.PredictionKey.SCORE` for `problem_type`
  `ProblemType.LINEAR_REGRESSION`.

  If `predict_probabilities` is `True`, it will contain a second entry with key
  `prediction_key.PredictionKey.PROBABILITIES`. The
  value of this entry is a `Tensor` of probabilities with shape
  `[batch_size, padded_length, num_classes]`.

  Note that variable length inputs will yield some predictions that don't have
  meaning. For example, if `sequence_length = [3, 2]`, then prediction `[1, 2]`
  has no meaningful interpretation.

  Args:
    activations: Output from an RNN. Should have dtype `float32` and shape
      `[batch_size, padded_length, ?]`.
    target_column: An initialized `TargetColumn`, calculate predictions.
    problem_type: Either `ProblemType.CLASSIFICATION` or
      `ProblemType.LINEAR_REGRESSION`.
    predict_probabilities: A Python boolean, indicating whether probabilities
      should be returned. Should only be set to `True` for
      classification/logistic regression problems.
  Returns:
    A `dict` mapping strings to `Tensors`.
  """
  with ops.name_scope('MultiValuePrediction'):
    activations_shape = array_ops.shape(activations)
    flattened_activations = array_ops.reshape(activations,
                                              [-1, activations_shape[2]])
    prediction_dict = {}
    if predict_probabilities:
      flat_probabilities = target_column.logits_to_predictions(
          flattened_activations, proba=True)
      flat_predictions = math_ops.argmax(flat_probabilities, 1)
      if target_column.num_label_columns == 1:
        probability_shape = array_ops.concat([activations_shape[:2], [2]], 0)
      else:
        probability_shape = activations_shape
      probabilities = array_ops.reshape(
          flat_probabilities, probability_shape,
          name=prediction_key.PredictionKey.PROBABILITIES)
      prediction_dict[
          prediction_key.PredictionKey.PROBABILITIES] = probabilities
    else:
      flat_predictions = target_column.logits_to_predictions(
          flattened_activations, proba=False)
    predictions_name = (prediction_key.PredictionKey.CLASSES
                        if problem_type == constants.ProblemType.CLASSIFICATION
                        else prediction_key.PredictionKey.SCORES)
    predictions = array_ops.reshape(
        flat_predictions, [activations_shape[0], activations_shape[1]],
        name=predictions_name)
    prediction_dict[predictions_name] = predictions
    return prediction_dict
开发者ID:arnonhongklay,项目名称:tensorflow,代码行数:60,代码来源:state_saving_rnn_estimator.py


示例8: _logits_to_prediction

  def _logits_to_prediction(self, logits=None):
    predictions = {}
    predictions[PredictionKey.LOGITS] = logits
    logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
    predictions[PredictionKey.CLASSES] = math_ops.argmax(logits, 1)

    return predictions
开发者ID:caikehe,项目名称:tensorflow,代码行数:7,代码来源:head.py


示例9: _single_value_predictions

def _single_value_predictions(activations, sequence_length, target_column, predict_probabilities):
    """Maps `activations` from the RNN to predictions for single value models.

  If `predict_probabilities` is `False`, this function returns a `dict`
  containing single entry with key `PREDICTIONS_KEY`. If `predict_probabilities`
  is `True`, it will contain a second entry with key `PROBABILITIES_KEY`. The
  value of this entry is a `Tensor` of probabilities with shape
  `[batch_size, num_classes]`.

  Args:
    activations: Output from an RNN. Should have dtype `float32` and shape
      `[batch_size, padded_length, ?]`.
    sequence_length: A `Tensor` with shape `[batch_size]` and dtype `int32`
      containing the length of each sequence in the batch. If `None`, sequences
      are assumed to be unpadded.
    target_column: An initialized `TargetColumn`, calculate predictions.
    predict_probabilities: A Python boolean, indicating whether probabilities
      should be returned. Should only be set to `True` for
      classification/logistic regression problems.
  Returns:
    A `dict` mapping strings to `Tensors`.
  """
    with ops.name_scope("SingleValuePrediction"):
        last_activations = select_last_activations(activations, sequence_length)
        if predict_probabilities:
            probabilities = target_column.logits_to_predictions(last_activations, proba=True)
            prediction_dict = {
                RNNKeys.PROBABILITIES_KEY: probabilities,
                RNNKeys.PREDICTIONS_KEY: math_ops.argmax(probabilities, 1),
            }
        else:
            predictions = target_column.logits_to_predictions(last_activations, proba=False)
            prediction_dict = {RNNKeys.PREDICTIONS_KEY: predictions}
        return prediction_dict
开发者ID:kdavis-mozilla,项目名称:tensorflow,代码行数:34,代码来源:dynamic_rnn_estimator.py


示例10: __call__

  def __call__(self, inputs, state, scope=None):
    """Build the CrfDecodeForwardRnnCell.

    Args:
      inputs: A [batch_size, num_tags] matrix of unary potentials.
      state: A [batch_size, num_tags] matrix containing the previous step's
            score values.
      scope: Unused variable scope of this cell.

    Returns:
      backpointers: A [batch_size, num_tags] matrix of backpointers.
      new_state: A [batch_size, num_tags] matrix of new score values.
    """
    # For simplicity, in shape comments, denote:
    # 'batch_size' by 'B', 'max_seq_len' by 'T' , 'num_tags' by 'O' (output).
    state = array_ops.expand_dims(state, 2)                         # [B, O, 1]

    # This addition op broadcasts self._transitions_params along the zeroth
    # dimension and state along the second dimension.
    # [B, O, 1] + [1, O, O] -> [B, O, O]
    transition_scores = state + self._transition_params             # [B, O, O]
    new_state = inputs + math_ops.reduce_max(transition_scores, [1])  # [B, O]
    backpointers = math_ops.argmax(transition_scores, 1)
    backpointers = math_ops.cast(backpointers, dtype=dtypes.int32)    # [B, O]
    return backpointers, new_state
开发者ID:Jordan1237,项目名称:tensorflow,代码行数:25,代码来源:crf.py


示例11: calculate_sequence_by_mask

def calculate_sequence_by_mask(mask, time_major):
  """Calculate the sequence length tensor (1-D) based on the masking tensor.

  The masking tensor is a 2D boolean tensor with shape [batch, timestep]. For
  any timestep that should be masked, the corresponding field will be False.
  Consider the following example:
    a = [[True, True, False, False],
         [True, False, True, False]]
  It is a (2, 4) tensor, and the corresponding sequence length result should be
  1D tensor with value [2, 3]. Note that for the second example, we need to find
  the index of the last True value, which is 2 and sequence length is 3.

  Args:
    mask: Boolean tensor with shape [batch, timestep] or [timestep, batch] if
      time_major=True.
    time_major: Boolean, which indicates whether the mask is time major or batch
      major.
  Returns:
    sequence_length: 1D int32 tensor.
  """
  timestep_index = 0 if time_major else 1
  max_seq_length = array_ops.shape(mask)[timestep_index]
  reversed_mask = math_ops.cast(array_ops.reverse(mask, axis=[timestep_index]),
                                dtypes.int32)
  # Use the argmax to find the index of leading 1 in the reversed mask, which is
  # the index of the last True value in the original mask.
  reversed_index = math_ops.argmax(reversed_mask, axis=timestep_index,
                                   output_type=dtypes.int32)
  return max_seq_length - reversed_index
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:29,代码来源:recurrent_v2.py


示例12: logits_to_predictions

  def logits_to_predictions(self, logits, proba=False):
    if proba:
      raise ValueError(
          "logits to probabilities is not supported for _BinarySvmTargetColumn")

    logits = array_ops.concat([array_ops.zeros_like(logits), logits], 1)
    return math_ops.argmax(logits, 1)
开发者ID:Albert-Z-Guo,项目名称:tensorflow,代码行数:7,代码来源:target_column.py


示例13: _ModelFn

    def _ModelFn(features, labels, mode):
      if is_training:
        logits_out = self._BuildGraph(features)
      else:
        graph_def = self._GetGraphDef(use_trt, batch_size, model_dir)
        logits_out = importer.import_graph_def(
            graph_def,
            input_map={INPUT_NODE_NAME: features},
            return_elements=[OUTPUT_NODE_NAME + ':0'],
            name='')[0]

      loss = losses.sparse_softmax_cross_entropy(
          labels=labels, logits=logits_out)
      summary.scalar('loss', loss)

      classes_out = math_ops.argmax(logits_out, axis=1, name='classes_out')
      accuracy = metrics.accuracy(
          labels=labels, predictions=classes_out, name='acc_op')
      summary.scalar('accuracy', accuracy[1])

      if mode == ModeKeys.EVAL:
        return EstimatorSpec(
            mode, loss=loss, eval_metric_ops={'accuracy': accuracy})
      elif mode == ModeKeys.TRAIN:
        optimizer = AdamOptimizer(learning_rate=1e-2)
        train_op = optimizer.minimize(loss, global_step=get_global_step())
        return EstimatorSpec(mode, loss=loss, train_op=train_op)
开发者ID:kylin9872,项目名称:tensorflow,代码行数:27,代码来源:quantization_mnist_test.py


示例14: extract_argmax_and_embed

 def extract_argmax_and_embed(prev, _):
   """Loop_function that extracts the symbol from prev and embeds it."""
   if output_projection is not None:
     prev = nn_ops.xw_plus_b(
         prev, output_projection[0], output_projection[1])
   prev_symbol = array_ops.stop_gradient(math_ops.argmax(prev, 1))
   return embedding_ops.embedding_lookup(embedding, prev_symbol)
开发者ID:maxkarlovitz,项目名称:tensorflow,代码行数:7,代码来源:seq2seq.py


示例15: _one_hot_to_embedding

def _one_hot_to_embedding(one_hot, embedding_size):
  """Get a dense embedding vector from a one-hot encoding."""
  num_tokens = one_hot.shape[1]
  label_id = math_ops.argmax(one_hot, axis=1)
  embedding = variable_scope.get_variable(
      'embedding', [num_tokens, embedding_size])
  return embedding_ops.embedding_lookup(
      embedding, label_id, name='token_to_embedding')
开发者ID:Albert-Z-Guo,项目名称:tensorflow,代码行数:8,代码来源:conditioning_utils_impl.py


示例16: _multiclass_metrics

 def _multiclass_metrics(predictions, labels, weights):
   """Prepares eval metrics for multiclass eval."""
   metrics = dict()
   logits = predictions["scores"]
   classes = math_ops.argmax(logits, 1)
   metrics["accuracy"] = metrics_lib.streaming_accuracy(
       classes, labels, weights)
   return metrics
开发者ID:Crazyonxh,项目名称:tensorflow,代码行数:8,代码来源:mnist.py


示例17: _logits_to_predictions

  def _logits_to_predictions(self, logits, proba=False):
    if self._n_classes == 2:
      logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])

    if proba:
      return nn.softmax(logits)
    else:
      return math_ops.argmax(logits, 1)
开发者ID:Ambier,项目名称:tensorflow,代码行数:8,代码来源:dnn_linear_combined.py


示例18: _logits_to_predictions

 def _logits_to_predictions(self, logits):
   """See `_MultiClassHead`."""
   with ops.name_scope(None, "predictions", (logits,)):
     return {
         prediction_key.PredictionKey.LOGITS: logits,
         prediction_key.PredictionKey.CLASSES: math_ops.argmax(
             _one_class_to_two_class_logits(logits), 1,
             name=prediction_key.PredictionKey.CLASSES)
     }
开发者ID:Hwhitetooth,项目名称:tensorflow,代码行数:9,代码来源:head.py


示例19: _predictions

def _predictions(logits, n_classes):
    """Returns predictions for the given logits and n_classes."""
    predictions = {}
    if n_classes == 2:
        predictions[_LOGISTIC] = math_ops.sigmoid(logits)
        logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
    predictions[_PROBABILITIES] = nn.softmax(logits)
    predictions[_CLASSES] = array_ops.reshape(math_ops.argmax(logits, 1), shape=(-1, 1))
    return predictions
开发者ID:pronobis,项目名称:tensorflow,代码行数:9,代码来源:dnn.py


示例20: body

    def body(i, prev_c, prev_h, actions, log_probs):
      # pylint: disable=g-long-lambda
      signal = control_flow_ops.cond(
          math_ops.equal(i, 0),
          lambda: array_ops.tile(device_go_embedding,
                                 [self.hparams.num_children, 1]),
          lambda: embedding_ops.embedding_lookup(device_embeddings,
                                                 actions.read(i - 1))
      )
      if self.hparams.keep_prob is not None:
        signal = nn_ops.dropout(signal, self.hparams.keep_prob)
      next_c, next_h = lstm(signal, prev_c, prev_h, w_lstm, forget_bias)
      query = math_ops.matmul(next_h, attn_w_2)
      query = array_ops.reshape(
          query, [self.hparams.num_children, 1, self.hparams.hidden_size])
      query = math_ops.tanh(query + attn_mem)
      query = array_ops.reshape(query, [
          self.hparams.num_children * self.num_groups, self.hparams.hidden_size
      ])
      query = math_ops.matmul(query, attn_v)
      query = array_ops.reshape(query,
                                [self.hparams.num_children, self.num_groups])
      query = nn_ops.softmax(query)
      query = array_ops.reshape(query,
                                [self.hparams.num_children, self.num_groups, 1])
      query = math_ops.reduce_sum(attn_mem * query, axis=1)
      query = array_ops.concat([next_h, query], axis=1)
      logits = math_ops.matmul(query, device_softmax)
      logits /= self.hparams.temperature
      if self.hparams.tanh_constant > 0:
        logits = math_ops.tanh(logits) * self.hparams.tanh_constant
      if self.hparams.logits_std_noise > 0:
        num_in_logits = math_ops.cast(
            array_ops.size(logits), dtype=dtypes.float32)
        avg_norm = math_ops.divide(
            linalg_ops.norm(logits), math_ops.sqrt(num_in_logits))
        logits_noise = random_ops.random_normal(
            array_ops.shape(logits),
            stddev=self.hparams.logits_std_noise * avg_norm)
        logits = control_flow_ops.cond(
            self.global_step > self.hparams.stop_noise_step, lambda: logits,
            lambda: logits + logits_noise)

      if mode == "sample":
        next_y = random_ops.multinomial(logits, 1, seed=self.hparams.seed)
      elif mode == "greedy":
        next_y = math_ops.argmax(logits, 1)
      elif mode == "target":
        next_y = array_ops.slice(y, [0, i], [-1, 1])
      else:
        raise NotImplementedError
      next_y = math_ops.to_int32(next_y)
      next_y = array_ops.reshape(next_y, [self.hparams.num_children])
      actions = actions.write(i, next_y)
      log_probs += nn_ops.sparse_softmax_cross_entropy_with_logits(
          logits=logits, labels=next_y)
      return i + 1, next_c, next_h, actions, log_probs
开发者ID:neuroradiology,项目名称:tensorflow,代码行数:57,代码来源:hierarchical_controller.py



注:本文中的tensorflow.python.ops.math_ops.argmax函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python math_ops.batch_matmul函数代码示例发布时间:2022-05-27
下一篇:
Python math_ops.approximate_equal函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap