• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python nn_ops.sparse_softmax_cross_entropy_with_logits函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.nn_ops.sparse_softmax_cross_entropy_with_logits函数的典型用法代码示例。如果您正苦于以下问题:Python sparse_softmax_cross_entropy_with_logits函数的具体用法?Python sparse_softmax_cross_entropy_with_logits怎么用?Python sparse_softmax_cross_entropy_with_logits使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了sparse_softmax_cross_entropy_with_logits函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: testLabelsPlaceholderScalar

 def testLabelsPlaceholderScalar(self):
   with self.test_session(use_gpu=True):
     labels = array_ops.placeholder(np.int32)
     y = nn_ops.sparse_softmax_cross_entropy_with_logits(
         labels=labels, logits=[[7.]])
     with self.assertRaisesOpError("labels must be 1-D"):
       y.eval(feed_dict={labels: 0})
开发者ID:kadeng,项目名称:tensorflow,代码行数:7,代码来源:sparse_xent_op_test.py


示例2: MMIloss

def MMIloss(logits, targets, weights, lam, gam,
                             average_across_timesteps=True,
                             softmax_loss_function=None, name=None):
    
"""lam is lambda value(diversity penalty) of the object, gam is gamma value(length penalty) of the object
(see section 4.5.1 of Li et al)"""


  if len(targets) != len(logits) or len(weights) != len(logits):
    raise ValueError("Lengths of logits, weights, and targets must be the same "
                     "%d, %d, %d." % (len(logits), len(weights), len(targets)))
  
  with ops.op_scope(logits + targets + weights, name,
                    "sequence_loss_by_example"):
    log_perp_list = []
    for logit, target, weight in zip(logits, targets, weights):
        if softmax_loss_function is None:
        
            target = array_ops.reshape(target, [-1])
            crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(
            logit, target)
        else:
            crossent = softmax_loss_function(logit, target)
        log_perp_list.append(crossent * weight)
    log_perps = math_ops.add_n(log_perp_list)
    if average_across_timesteps:
        total_size = math_ops.add_n(weights)
        total_size += 1e-12  # Just to avoid division by 0 for all-0 weights.
        log_perps /= total_size
        
       
    final_perps= log_perps - (lam)*lm_perps + (gam)*len(targets)   
    return final_perps
开发者ID:myungin,项目名称:NLP_baseline,代码行数:33,代码来源:MMIloss.py


示例3: sequence_loss_tensor

def sequence_loss_tensor(
    logits, targets, weights, num_classes, average_across_timesteps=True, softmax_loss_function=None, name=None
):
    """Weighted cross-entropy loss for a sequence of logits (per example).

    """
    #    if (logits.get_shape()[0:2]) != targets.get_shape() \
    #        or (logits.get_shape()[0:2]) != weights.get_shape():
    #        print(logits.get_shape()[0:2])
    #        print(targets.get_shape())
    #        print(weights.get_shape())
    #        raise ValueError("Shapes of logits, weights, and targets must be the "
    #            "same")
    with ops.op_scope([logits, targets, weights], name, "sequence_loss_by_example"):
        probs_flat = tf.reshape(logits, [-1, num_classes])
        targets = tf.reshape(targets, [-1])
        if softmax_loss_function is None:
            crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(probs_flat, targets)
        else:
            crossent = softmax_loss_function(probs_flat, targets)
        crossent = crossent * tf.reshape(weights, [-1])
        crossent = tf.reduce_sum(crossent)
        total_size = math_ops.reduce_sum(weights)
        total_size += 1e-12  # to avoid division by zero
        crossent /= total_size
        return crossent
开发者ID:Styrke,项目名称:master-code,代码行数:26,代码来源:tfextensions.py


示例4: testInt32GPU

 def testInt32GPU(self):
   if not context.context().num_gpus():
     self.skipTest('No GPUs found')
   with ops.device('gpu:0'):
     xent = nn_ops.sparse_softmax_cross_entropy_with_logits(
         logits=[[0.0, 0.0]], labels=[0])
   self.assertAllClose(xent, [0.69314718])
开发者ID:StephenOman,项目名称:tensorflow,代码行数:7,代码来源:core_test.py


示例5: _log_prob

 def _log_prob(self, k):
     k = ops.convert_to_tensor(k, name="k")
     logits = self.logits * array_ops.ones_like(array_ops.expand_dims(k, -1), dtype=self.logits.dtype)
     shape = array_ops.slice(array_ops.shape(logits), [0], [array_ops.rank(logits) - 1])
     k *= array_ops.ones(shape, dtype=k.dtype)
     k.set_shape(tensor_shape.TensorShape(logits.get_shape()[:-1]))
     return -nn_ops.sparse_softmax_cross_entropy_with_logits(logits, k)
开发者ID:zhang8128,项目名称:tensorflow,代码行数:7,代码来源:categorical.py


示例6: log_prob

  def log_prob(self, k, name="log_prob"):
    """Log-probability of class `k`.

    Args:
      k: `int32` or `int64` Tensor. Must be broadcastable with a `batch_shape`
        `Tensor`.
      name: A name for this operation (optional).

    Returns:
      The log-probabilities of the classes indexed by `k`
    """
    with ops.name_scope(self.name):
      with ops.op_scope([k, self.logits], name):
        k = ops.convert_to_tensor(k, name="k")

        logits = self.logits * array_ops.ones_like(
            array_ops.expand_dims(k, -1),
            dtype=self.logits.dtype)
        k *= array_ops.ones(
            array_ops.slice(
                array_ops.shape(logits), [0], [array_ops.rank(logits) - 1]),
            dtype=k.dtype)
        k.set_shape(tensor_shape.TensorShape(logits.get_shape()[:-1]))

        return -nn_ops.sparse_softmax_cross_entropy_with_logits(logits, k)
开发者ID:2020zyc,项目名称:tensorflow,代码行数:25,代码来源:categorical.py


示例7: MYsequence_loss_by_example

 def MYsequence_loss_by_example(logits, targets, weights,
                              average_across_timesteps=True,
                              softmax_loss_function=None, name=None):
   if len(targets) != len(logits) or len(weights) != len(logits):
     raise ValueError("Lengths of logits, weights, and targets must be the same "
                      "%d, %d, %d." % (len(logits), len(weights), len(targets)))
   with ops.op_scope(logits + targets + weights, name,
                     "sequence_loss_by_example"):
     log_perp_list = []
     for logit, target, weight in zip(logits, targets, weights):
       if softmax_loss_function is None:
         # TODO(irving,ebrevdo): This reshape is needed because
         # sequence_loss_by_example is called with scalars sometimes, which
         # violates our general scalar strictness policy.
         target = array_ops.reshape(target, [-1])
         crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(
             logit, target)
       else:
         crossent = softmax_loss_function(logit, target)
       print crossent, weight
       log_perp_list.append(crossent * weight)
       print log_perp_list              
     log_perps = math_ops.add_n(log_perp_list)
     if average_across_timesteps:
       total_size = math_ops.add_n(weights)
       total_size += 1e-12  # Just to avoid division by 0 for all-0 weights.
       log_perps /= total_size
   return log_perps
开发者ID:awjuliani,项目名称:NeuralDreamVideos,代码行数:28,代码来源:model_rnn.py


示例8: generate_single_output

def generate_single_output(encoder_state, attention_states, sequence_length, targets, num_classes, buckets,
                       use_mean_attention=False,
                       softmax_loss_function=None, per_example_loss=False, name=None, use_attention=False):
  all_inputs = targets
  with ops.op_scope(all_inputs, name, "model_with_buckets"):
    with variable_scope.variable_scope(variable_scope.get_variable_scope(),
                                       reuse=None):
      bucket_attention_states, bucket_attn_weights, bucket_attns, bucket_outputs = attention_single_output_decoder(
                                                                                        encoder_state, attention_states, output_size=num_classes,
                                                                                        num_heads=1,
                                                                                        sequence_length=sequence_length,
                                                                                        initial_state_attention=True,
                                                                                        use_attention=use_attention)

      if softmax_loss_function is None:
        assert len(bucket_outputs) == len(targets) == 1
        # We need to make target and int64-tensor and set its shape.
        bucket_target = array_ops.reshape(math_ops.to_int64(targets[0]), [-1])
        crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(
            logits=bucket_outputs[0], labels=bucket_target)
      else:
        assert len(bucket_outputs) == len(targets) == 1
        crossent = softmax_loss_function(bucket_outputs[0], targets[0])

      batch_size = array_ops.shape(targets[0])[0]
      loss = tf.reduce_sum(crossent) / math_ops.cast(batch_size, dtypes.float32)

  return bucket_outputs, loss
开发者ID:bwang514,项目名称:rnn-nlu,代码行数:28,代码来源:seq_classification.py


示例9: _log_prob

  def _log_prob(self, k):
    k = ops.convert_to_tensor(k, name="k")
    if self.validate_args:
      k = distribution_util.embed_check_integer_casting_closed(
          k, target_dtype=dtypes.int32)

    if self.logits.get_shape()[:-1] == k.get_shape():
      logits = self.logits
    else:
      logits = self.logits * array_ops.ones_like(
          array_ops.expand_dims(k, -1), dtype=self.logits.dtype)
      logits_shape = array_ops.shape(logits)[:-1]
      k *= array_ops.ones(logits_shape, dtype=k.dtype)
      k.set_shape(tensor_shape.TensorShape(logits.get_shape()[:-1]))
      if k.dtype.is_integer:
        pass
      elif k.dtype.is_floating:
        # When `validate_args=True` we've already ensured int/float casting
        # is closed.
        return ops.cast(k, dtype=dtypes.int32)
      else:
        raise TypeError("`value` should have integer `dtype` or "
                        "`self.dtype` ({})".format(self.dtype.base_dtype))
    return -nn_ops.sparse_softmax_cross_entropy_with_logits(labels=k,
                                                            logits=logits)
开发者ID:AutumnQYN,项目名称:tensorflow,代码行数:25,代码来源:categorical.py


示例10: body

    def body(i, prev_c, prev_h, actions, log_probs):
      # pylint: disable=g-long-lambda
      signal = control_flow_ops.cond(
          math_ops.equal(i, 0),
          lambda: array_ops.tile(device_go_embedding,
                                 [self.hparams.num_children, 1]),
          lambda: embedding_ops.embedding_lookup(device_embeddings,
                                                 actions.read(i - 1))
      )
      if self.hparams.keep_prob is not None:
        signal = nn_ops.dropout(signal, self.hparams.keep_prob)
      next_c, next_h = lstm(signal, prev_c, prev_h, w_lstm, forget_bias)
      query = math_ops.matmul(next_h, attn_w_2)
      query = array_ops.reshape(
          query, [self.hparams.num_children, 1, self.hparams.hidden_size])
      query = math_ops.tanh(query + attn_mem)
      query = array_ops.reshape(query, [
          self.hparams.num_children * self.num_groups, self.hparams.hidden_size
      ])
      query = math_ops.matmul(query, attn_v)
      query = array_ops.reshape(query,
                                [self.hparams.num_children, self.num_groups])
      query = nn_ops.softmax(query)
      query = array_ops.reshape(query,
                                [self.hparams.num_children, self.num_groups, 1])
      query = math_ops.reduce_sum(attn_mem * query, axis=1)
      query = array_ops.concat([next_h, query], axis=1)
      logits = math_ops.matmul(query, device_softmax)
      logits /= self.hparams.temperature
      if self.hparams.tanh_constant > 0:
        logits = math_ops.tanh(logits) * self.hparams.tanh_constant
      if self.hparams.logits_std_noise > 0:
        num_in_logits = math_ops.cast(
            array_ops.size(logits), dtype=dtypes.float32)
        avg_norm = math_ops.divide(
            linalg_ops.norm(logits), math_ops.sqrt(num_in_logits))
        logits_noise = random_ops.random_normal(
            array_ops.shape(logits),
            stddev=self.hparams.logits_std_noise * avg_norm)
        logits = control_flow_ops.cond(
            self.global_step > self.hparams.stop_noise_step, lambda: logits,
            lambda: logits + logits_noise)

      if mode == "sample":
        next_y = random_ops.multinomial(logits, 1, seed=self.hparams.seed)
      elif mode == "greedy":
        next_y = math_ops.argmax(logits, 1)
      elif mode == "target":
        next_y = array_ops.slice(y, [0, i], [-1, 1])
      else:
        raise NotImplementedError
      next_y = math_ops.to_int32(next_y)
      next_y = array_ops.reshape(next_y, [self.hparams.num_children])
      actions = actions.write(i, next_y)
      log_probs += nn_ops.sparse_softmax_cross_entropy_with_logits(
          logits=logits, labels=next_y)
      return i + 1, next_c, next_h, actions, log_probs
开发者ID:neuroradiology,项目名称:tensorflow,代码行数:57,代码来源:hierarchical_controller.py


示例11: _testHighDim

 def _testHighDim(self, features, labels):
   np_loss, np_backprop = self._npXent(np.array(features), np.array(labels))
   # manually reshape loss
   np_loss = np.reshape(np_loss, np.array(labels).shape)
   with self.test_session(use_gpu=True) as sess:
     loss = nn_ops.sparse_softmax_cross_entropy_with_logits(features, labels)
     backprop = loss.op.inputs[0].op.outputs[1]
     tf_loss, tf_backprop = sess.run([loss, backprop])
   self.assertAllCloseAccordingToType(np_loss, tf_loss)
   self.assertAllCloseAccordingToType(np_backprop, tf_backprop)
开发者ID:kdavis-mozilla,项目名称:tensorflow,代码行数:10,代码来源:sparse_xent_op_test.py


示例12: _log_prob

  def _log_prob(self, k):
    k = ops.convert_to_tensor(k, name="k")
    if self.validate_args:
      k = distribution_util.embed_check_integer_casting_closed(
          k, target_dtype=dtypes.int32)
    k, logits = _broadcast_cat_event_and_params(
        k, self.logits, base_dtype=self.dtype.base_dtype)

    return -nn_ops.sparse_softmax_cross_entropy_with_logits(labels=k,
                                                            logits=logits)
开发者ID:didukhle,项目名称:tensorflow,代码行数:10,代码来源:categorical.py


示例13: _sparse_vs_dense_xent_benchmark_sparse

def _sparse_vs_dense_xent_benchmark_sparse(labels, logits):
  # Using sparse_softmax_cross_entropy_with_logits
  labels = labels.astype(np.int64)
  labels = array_ops.identity(labels)
  logits = array_ops.identity(logits)
  crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(
      logits, labels, name="SequenceLoss/CrossEntropy")
  crossent_sum = math_ops.reduce_sum(crossent)
  grads = gradients_impl.gradients([crossent_sum], [logits])[0]

  return (crossent_sum, grads)
开发者ID:kdavis-mozilla,项目名称:tensorflow,代码行数:11,代码来源:sparse_xent_op_test.py


示例14: testScalarHandling

 def testScalarHandling(self):
   with self.test_session(use_gpu=False) as sess:
     with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
                                  ".*labels must be 1-D.*"):
       labels = array_ops.placeholder(dtypes.int32, shape=[None, 1])
       logits = array_ops.placeholder(dtypes.float32, shape=[None, 3])
       ce = nn_ops.sparse_softmax_cross_entropy_with_logits(
           logits, array_ops.squeeze(labels))
       labels_v2 = np.zeros((1, 1), dtype=np.int32)
       logits_v2 = np.random.randn(1, 3)
       sess.run([ce], feed_dict={labels: labels_v2, logits: logits_v2})
开发者ID:kdavis-mozilla,项目名称:tensorflow,代码行数:11,代码来源:sparse_xent_op_test.py


示例15: testGradient

 def testGradient(self):
   with self.test_session(use_gpu=True):
     l = constant_op.constant([3, 0, 1], name="l")
     f = constant_op.constant(
         [0.1, 0.2, 0.3, 0.4, 0.1, 0.4, 0.9, 1.6, 0.1, 0.8, 2.7, 6.4],
         shape=[3, 4],
         dtype=dtypes.float64,
         name="f")
     x = nn_ops.sparse_softmax_cross_entropy_with_logits(f, l, name="xent")
     err = gradient_checker.compute_gradient_error(f, [3, 4], x, [3])
   print("cross entropy gradient err = ", err)
   self.assertLess(err, 5e-8)
开发者ID:kdavis-mozilla,项目名称:tensorflow,代码行数:12,代码来源:sparse_xent_op_test.py


示例16: _log_prob

 def _log_prob(self, k):
   k = ops.convert_to_tensor(k, name="k")
   if self.logits.get_shape()[:-1] == k.get_shape():
     logits = self.logits
   else:
     logits = self.logits * array_ops.ones_like(
         array_ops.expand_dims(k, -1), dtype=self.logits.dtype)
     logits_shape = array_ops.shape(logits)[:-1]
     k *= array_ops.ones(logits_shape, dtype=k.dtype)
     k.set_shape(tensor_shape.TensorShape(logits.get_shape()[:-1]))
   return -nn_ops.sparse_softmax_cross_entropy_with_logits(labels=k,
                                                           logits=logits)
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:12,代码来源:categorical.py


示例17: log_pmf

  def log_pmf(self, k, name="log_pmf"):
    """Log-probability of class `k`.

    Args:
      k: `int32` or `int64` Tensor with shape = `self.batch_shape()`.
      name: A name for this operation (optional).

    Returns:
      The log-probabilities of the classes indexed by `k`
    """
    with ops.name_scope(self.name):
      k = ops.convert_to_tensor(k, name="k")
      k.set_shape(self.get_batch_shape())
      return -nn_ops.sparse_softmax_cross_entropy_with_logits(
          self.logits, k, name=name)
开发者ID:363158858,项目名称:tensorflow,代码行数:15,代码来源:categorical.py


示例18: testSecondGradient

  def testSecondGradient(self):
    images_placeholder = array_ops.placeholder(dtypes.float32, shape=(3, 2))
    labels_placeholder = array_ops.placeholder(dtypes.int32, shape=(3))
    weights = variables.Variable(random_ops.truncated_normal([2], stddev=1.0))
    weights_with_zeros = array_ops.stack([array_ops.zeros([2]), weights],
                                         axis=1)
    logits = math_ops.matmul(images_placeholder, weights_with_zeros)
    cross_entropy = nn_ops.sparse_softmax_cross_entropy_with_logits(
        labels=labels_placeholder, logits=logits)
    loss = math_ops.reduce_mean(cross_entropy)

    # Taking ths second gradient should fail, since it is not
    # yet supported.
    with self.assertRaisesRegexp(LookupError,
                                 "explicitly disabled"):
      _ = gradients_impl.hessians(loss, [weights])
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:16,代码来源:sparse_xent_op_test.py


示例19: make_grouping_predictions

  def make_grouping_predictions(self, input_layer, reuse=None):
    """model that predicts grouping (grouping_actions).

    Args:
      input_layer: group_input_layer
      reuse: reuse

    Returns:
       grouping_actions: actions
       grouping_log_probs: log probabilities corresponding to actions
    """
    with variable_scope.variable_scope(self.hparams.name, reuse=True):
      # input_layer: tensor of size [1, num_ops, hidden_size]
      w_grouping_ff = variable_scope.get_variable("w_grouping_ff")
      w_grouping_softmax = variable_scope.get_variable("w_grouping_softmax")

    batch_size = array_ops.shape(input_layer)[0]
    embedding_dim = array_ops.shape(input_layer)[2]

    reshaped = array_ops.reshape(input_layer,
                                 [batch_size * self.num_ops, embedding_dim])
    ff_output = math_ops.matmul(reshaped, w_grouping_ff)
    logits = math_ops.matmul(ff_output, w_grouping_softmax)
    if self.hparams.logits_std_noise > 0:
      num_in_logits = math_ops.cast(
          array_ops.size(logits), dtype=dtypes.float32)
      avg_norm = math_ops.divide(
          linalg_ops.norm(logits), math_ops.sqrt(num_in_logits))
      logits_noise = random_ops.random_normal(
          array_ops.shape(logits),
          stddev=self.hparams.logits_std_noise * avg_norm)
      logits = control_flow_ops.cond(
          self.global_step > self.hparams.stop_noise_step, lambda: logits,
          lambda: logits + logits_noise)
    logits = array_ops.reshape(logits,
                               [batch_size * self.num_ops, self.num_groups])
    actions = random_ops.multinomial(logits, 1, seed=self.hparams.seed)
    actions = math_ops.to_int32(actions)
    actions = array_ops.reshape(actions, [batch_size, self.num_ops])
    action_label = array_ops.reshape(actions, [-1])
    log_probs = nn_ops.sparse_softmax_cross_entropy_with_logits(
        logits=logits, labels=action_label)
    log_probs = array_ops.reshape(log_probs, [batch_size, -1])
    log_probs = math_ops.reduce_sum(log_probs, 1)
    grouping_actions = actions
    grouping_log_probs = log_probs
    return grouping_actions, grouping_log_probs
开发者ID:neuroradiology,项目名称:tensorflow,代码行数:47,代码来源:hierarchical_controller.py


示例20: loss

  def loss(self, data, labels):
    """The loss to minimize while training."""

    if self.is_regression:
      diff = self.training_inference_graph(data) - math_ops.to_float(labels)
      mean_squared_error = math_ops.reduce_mean(diff * diff)
      root_mean_squared_error = math_ops.sqrt(mean_squared_error, name="loss")
      loss = root_mean_squared_error
    else:
      loss = math_ops.reduce_mean(
          nn_ops.sparse_softmax_cross_entropy_with_logits(
              self.training_inference_graph(data),
              array_ops.squeeze(math_ops.to_int32(labels))),
          name="loss")
    if self.regularizer:
      loss += layers.apply_regularization(self.regularizer,
                                          variables.trainable_variables())
    return loss
开发者ID:821760408-sp,项目名称:tensorflow,代码行数:18,代码来源:hybrid_model.py



注:本文中的tensorflow.python.ops.nn_ops.sparse_softmax_cross_entropy_with_logits函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python nn_ops.top_k函数代码示例发布时间:2022-05-27
下一篇:
Python nn_ops.softplus函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap