• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python nn_ops.l2_loss函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.nn_ops.l2_loss函数的典型用法代码示例。如果您正苦于以下问题:Python l2_loss函数的具体用法?Python l2_loss怎么用?Python l2_loss使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了l2_loss函数的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: testGradientFloat16

  def testGradientFloat16(self):
    with self.test_session(use_gpu=True) as sess:
      # Randomly construct a 1D shape from [1, 40)
      shape = random_ops.random_uniform(
          [1], minval=1, maxval=40, dtype=dtypes.int32)

      # Construct the fp32 graph and its gradient.
      x = random_ops.random_uniform(shape, minval=-1, maxval=1, name="x")
      y1 = nn_ops.relu(x, name="relu_fp32")
      l1 = nn_ops.l2_loss(y1)
      dx_f32 = gradients_impl.gradients(l1, x)

      # Construct the fp16 graph and its gradient.
      # It starts with the same x, in fp32. But before it reaches Relu, it is
      # cast into fp16. So during backprop, the gradient computation is in fp16.
      x2 = math_ops.cast(x, dtype=dtypes.float16, name="cast")
      y2 = nn_ops.relu(x2, name="relu_fp16")
      l2 = nn_ops.l2_loss(y2)
      dx_f16 = gradients_impl.gradients(l2, x)

      # Repeat the experiment for 100 times. All tensor shapes and its tensor
      # values are randomly generated for each run.
      for _ in xrange(100):
        dx_f32_v, dx_f16_v = sess.run([dx_f32, dx_f16])
        self.assertAllClose(dx_f32_v, dx_f16_v, atol=3e-4)
开发者ID:HughKu,项目名称:tensorflow,代码行数:25,代码来源:relu_op_test.py


示例2: _testGradient

  def _testGradient(self, np_input, bias, dtype, data_format, use_gpu):
    with self.test_session(use_gpu=use_gpu):
      if data_format == "NCHW":
        np_input = self._NHWCToNCHW(np_input)
      input_tensor = constant_op.constant(
          np_input, shape=np_input.shape, dtype=dtype)
      bias_tensor = constant_op.constant(bias, shape=bias.shape, dtype=dtype)
      output_tensor = nn_ops.bias_add(
          input_tensor, bias_tensor, data_format=data_format)
      tensor_jacob_t, tensor_jacob_n = gradient_checker.compute_gradient(
          input_tensor, np_input.shape, output_tensor, np_input.shape)
      bias_jacob_t, bias_jacob_n = gradient_checker.compute_gradient(
          bias_tensor, bias.shape, output_tensor, np_input.shape)

      # Test gradient of BiasAddGrad
      bias_add_grad = gradients_impl.gradients(
          nn_ops.l2_loss(output_tensor), bias_tensor)[0]
      grad_jacob_t, grad_jacob_n = gradient_checker.compute_gradient(
          output_tensor, np_input.shape, bias_add_grad, bias.shape)

      if dtype == np.float16:
        # Compare fp16 theoretical gradients to fp32 numerical gradients,
        # since fp16 numerical gradients are too imprecise unless great
        # care is taken with choosing the inputs and the delta. This is
        # a weaker check (in particular, it does not test the op itself,
        # only its gradient), but it's much better than nothing.
        input_tensor = constant_op.constant(
            np_input, shape=np_input.shape, dtype=np.float32)
        bias_tensor = constant_op.constant(
            bias, shape=bias.shape, dtype=np.float32)
        output_tensor = nn_ops.bias_add(
            input_tensor, bias_tensor, data_format=data_format)
        _, tensor_jacob_n = gradient_checker.compute_gradient(input_tensor,
                                                              np_input.shape,
                                                              output_tensor,
                                                              np_input.shape)
        _, bias_jacob_n = gradient_checker.compute_gradient(bias_tensor,
                                                            bias.shape,
                                                            output_tensor,
                                                            np_input.shape)

        bias_add_grad = gradients_impl.gradients(
            nn_ops.l2_loss(output_tensor), bias_tensor)[0]
        _, grad_jacob_n = gradient_checker.compute_gradient(output_tensor,
                                                            np_input.shape,
                                                            bias_add_grad,
                                                            bias.shape)

      threshold = 2e-3
      if dtype == dtypes.float64:
        threshold = 1e-10
      self.assertAllClose(tensor_jacob_t, tensor_jacob_n, threshold, threshold)
      # TODO(annarev): Re-add assertion for float16, float32 dtypes and NCHW
      # once we figure out why this check started failing with cuda mavx.
      if dtype == dtypes.float64 or data_format != "NCHW":
        self.assertAllClose(bias_jacob_t, bias_jacob_n, threshold, threshold)
        self.assertAllClose(grad_jacob_t, grad_jacob_n, threshold, threshold)
开发者ID:xylary,项目名称:tensorflow,代码行数:57,代码来源:bias_op_test.py


示例3: testL2Loss

 def testL2Loss(self):
   for dtype in [dtypes.float32, dtypes.float64]:
     x = constant_op.constant(
         [1.0, 0.0, 3.0, 2.0], shape=[2, 2], name="x", dtype=dtype)
     l2loss = nn_ops.l2_loss(x)
     value = self.evaluate(l2loss)
     self.assertAllClose(7.0, value)
开发者ID:AnddyWang,项目名称:tensorflow,代码行数:7,代码来源:nn_test.py


示例4: testL2Loss

 def testL2Loss(self):
   for dtype in [dtypes.float32, dtypes.float64]:
     with self.test_session():
       x = constant_op.constant(
           [1.0, 0.0, 3.0, 2.0], shape=[2, 2], name="x", dtype=dtype)
       l2loss = nn_ops.l2_loss(x)
       value = l2loss.eval()
     self.assertAllClose(7.0, value)
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:8,代码来源:nn_test.py


示例5: testGradient

 def testGradient(self):
   x_shape = [20, 7, 3]
   np.random.seed(1)  # Make it reproducible.
   x_val = np.random.random_sample(x_shape).astype(np.float64)
   with self.test_session():
     x = constant_op.constant(x_val, name="x")
     output = nn_ops.l2_loss(x)
     err = gradient_checker.compute_gradient_error(x, x_shape, output, [1])
   print("L2Loss gradient err = %g " % err)
   err_tolerance = 1e-11
   self.assertLess(err, err_tolerance)
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:11,代码来源:nn_test.py


示例6: BuildFullModel

def BuildFullModel():
  """Build the full model with conv,rnn,opt."""
  seq = []
  for i in range(4):
    with variable_scope.variable_scope('inp_%d' % i):
      seq.append(array_ops.reshape(BuildSmallModel(), [2, 1, -1]))

  cell = rnn_cell.BasicRNNCell(16)
  out = rnn.dynamic_rnn(
      cell, array_ops.concat(seq, axis=1), dtype=dtypes.float32)[0]

  target = array_ops.ones_like(out)
  loss = nn_ops.l2_loss(math_ops.reduce_mean(target - out))
  sgd_op = gradient_descent.GradientDescentOptimizer(1e-2)
  return sgd_op.minimize(loss)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:15,代码来源:model_analyzer_testlib.py


示例7: global_norm

def global_norm(t_list, name=None):
  """Computes the global norm of multiple tensors.

  Given a tuple or list of tensors `t_list`, this operation returns the
  global norm of the elements in all tensors in `t_list`. The global norm is
  computed as:

  `global_norm = sqrt(sum([l2norm(t)**2 for t in t_list]))`

  Any entries in `t_list` that are of type None are ignored.

  Args:
    t_list: A tuple or list of mixed `Tensors`, `IndexedSlices`, or None.
    name: A name for the operation (optional).

  Returns:
    A 0-D (scalar) `Tensor` of type `float`.

  Raises:
    TypeError: If `t_list` is not a sequence.
  """
  if (not isinstance(t_list, collections.Sequence)
      or isinstance(t_list, six.string_types)):
    raise TypeError("t_list should be a sequence")
  t_list = list(t_list)
  with ops.name_scope(name, "global_norm", t_list) as name:
    values = [
        ops.convert_to_tensor(
            t.values if isinstance(t, ops.IndexedSlices) else t,
            name="t_%d" % i)
        if t is not None else t
        for i, t in enumerate(t_list)]
    half_squared_norms = []
    for v in values:
      if v is not None:
        with ops.colocate_with(v):
          half_squared_norms.append(nn_ops.l2_loss(v))

    half_squared_norm = math_ops.reduce_sum(array_ops.pack(half_squared_norms))

    norm = math_ops.sqrt(
        half_squared_norm *
        constant_op.constant(2.0, dtype=half_squared_norm.dtype),
        name="global_norm")

  return norm
开发者ID:821760408-sp,项目名称:tensorflow,代码行数:46,代码来源:clip_ops.py


示例8: grad

 def grad(x):
   with backprop.GradientTape() as tape:
     tape.watch(x)
     y = nn_ops.l2_loss(nn_ops.relu(x))
   return tape.gradient(y, x)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:5,代码来源:relu_op_test.py


示例9: l2norm_squared

def l2norm_squared(v):
  return constant_op.constant(2, dtype=v.dtype.base_dtype) * nn_ops.l2_loss(v)
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:2,代码来源:util.py



注:本文中的tensorflow.python.ops.nn_ops.l2_loss函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python nn_ops.log_softmax函数代码示例发布时间:2022-05-27
下一篇:
Python nn_ops.dropout函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap