• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python gradient_checker_v2.compute_gradient函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.gradient_checker_v2.compute_gradient函数的典型用法代码示例。如果您正苦于以下问题:Python compute_gradient函数的具体用法?Python compute_gradient怎么用?Python compute_gradient使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了compute_gradient函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: Test

  def Test(self):
    if not use_static_shape_ or a_np_.dtype in (np.int32, np.int64, np.float16):
      self.skipTest("Skipping infeasible gradient test.")

    # Transpose and possibly conjugate a_np_ and b_np_ according to the
    # attributes such that tf.matmul(effective_a_np, effective_b_np, **kwargs)
    # results in a valid matrix multiplication and produces the same result as
    # np.matrix(a_np_) * np.matrix(b_np_)
    effective_a_np = _GetTransposedMatrices(a_np_, "a", kwargs_)
    effective_b_np = _GetTransposedMatrices(b_np_, "b", kwargs_)

    epsilon = np.finfo(a_np_.dtype).eps
    delta = epsilon**(1.0 / 3.0)
    tol = 20 * delta
    with self.session(), test_util.use_gpu():
      theoretical, numerical = gradient_checker_v2.compute_gradient(
          lambda x: math_ops.matmul(x, effective_b_np, **kwargs_),
          [effective_a_np],
          delta=delta)
      self.assertAllClose(theoretical, numerical, rtol=tol, atol=tol)

      theoretical, numerical = gradient_checker_v2.compute_gradient(
          lambda x: math_ops.matmul(effective_a_np, x, **kwargs_),
          [effective_b_np],
          delta=delta)
      self.assertAllClose(theoretical, numerical, rtol=tol, atol=tol)
开发者ID:aeverall,项目名称:tensorflow,代码行数:26,代码来源:matmul_op_test.py


示例2: testEmptySucceeds

 def testEmptySucceeds(self):
   def f(x):
     return array_ops.identity(x)
   x = constant_op.constant(np.random.random_sample((0, 3)),
                            dtype=dtypes.float32)
   for grad in gradient_checker.compute_gradient(f, [x]):
     self.assertEqual(grad[0].shape, (0, 0))
   error = gradient_checker.max_error(*gradient_checker.compute_gradient(
       f, [x]))
   self.assertEqual(error, 0)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:10,代码来源:gradient_checker_v2_test.py


示例3: testComplexConj

 def testComplexConj(self):
   def f(x):
     return math_ops.conj(x)
   x = constant_op.constant(11 - 13j, dtype=dtypes.complex64)
   analytical, numerical = gradient_checker.compute_gradient(
       f, [x], delta=0.1)
   correct = np.array([[1, 0], [0, -1]])
   self.assertAllEqual(correct, analytical[0])
   self.assertAllClose(correct, numerical[0], rtol=2e-5)
   self.assertLess(
       gradient_checker.max_error(*gradient_checker.compute_gradient(
           f, [x], delta=0.1)), 2e-5)
开发者ID:aeverall,项目名称:tensorflow,代码行数:12,代码来源:gradient_checker_v2_test.py


示例4: testComplexConj

 def testComplexConj(self):
   def f(x):
     return math_ops.conj(x)
   x_shape = ()
   x_dtype = dtypes.complex64
   x = constant_op.constant(_random_complex(x_shape, x_dtype))
   analytical, numerical = gradient_checker.compute_gradient(
       f, [x])
   correct = np.array([[1, 0], [0, -1]])
   self.assertAllEqual(correct, analytical[0])
   self.assertAllClose(correct, numerical[0], rtol=2e-5)
   x = constant_op.constant(_random_complex(x_shape, x_dtype))
   self.assertLess(
       gradient_checker.max_error(*gradient_checker.compute_gradient(
           f, [x])), 2e-5)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:15,代码来源:gradient_checker_v2_test.py


示例5: testComplexMul

 def testComplexMul(self):
   if not context.executing_eagerly():
     return
   c = constant_op.constant(5 + 7j, dtype=dtypes.complex64)
   def f(x):
     return c * x
   x = constant_op.constant(11 - 13j, dtype=dtypes.complex64)
   analytical, numerical = gradient_checker.compute_gradient(
       f, [x], delta=0.1)
   correct = np.array([[5, 7], [-7, 5]])
   self.assertAllEqual(correct, analytical[0])
   self.assertAllClose(correct, numerical[0], rtol=1e-4)
   self.assertLess(
       gradient_checker.max_error(*gradient_checker.compute_gradient(
           f, [x], delta=0.1)), 2e-4)
开发者ID:aeverall,项目名称:tensorflow,代码行数:15,代码来源:gradient_checker_v2_test.py


示例6: testEmptyFails

 def testEmptyFails(self):
   @custom_gradient.custom_gradient
   def id_bad_grad(x):
     y = array_ops.identity(x)
     def grad_fn(dy):
       # dx = constant_op.constant(np.zeros((1, 4)), dtype=dtypes.float32)
       dx = array_ops.transpose(dy)
       return dx
     return y, grad_fn
   def f(x):
     return id_bad_grad(x)
   x = constant_op.constant(np.random.random_sample((0, 3)),
                            dtype=dtypes.float32)
   bad = r"Empty gradient has wrong shape: expected \(0, 3\), got \(3, 0\)"
   with self.assertRaisesRegexp(ValueError, bad):
     gradient_checker.compute_gradient(f, [x])
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:16,代码来源:gradient_checker_v2_test.py


示例7: _gradientTest

  def _gradientTest(self, diags, rhs, dtype=dtypes.float64):

    def reference_matmul(diags, rhs):
      matrix = self._makeTridiagonalMatrix(diags[..., 0, :-1], diags[..., 1, :],
                                           diags[..., 2, 1:])
      return math_ops.matmul(matrix, rhs)

    diags = constant_op.constant(diags, dtype=dtype)
    rhs = constant_op.constant(rhs, dtype=dtype)
    with self.cached_session(use_gpu=True):
      grad_reference, _ = gradient_checker_v2.compute_gradient(
          reference_matmul, [diags, rhs])
      grad_theoretical, grad_numerical = gradient_checker_v2.compute_gradient(
          linalg_impl.tridiagonal_matmul, [diags, rhs])
    self.assertAllClose(grad_theoretical, grad_numerical)
    self.assertAllClose(grad_theoretical, grad_reference)
开发者ID:aritratony,项目名称:tensorflow,代码行数:16,代码来源:tridiagonal_matmul_op_test.py


示例8: testComplexMul

 def testComplexMul(self):
   c = constant_op.constant(5 + 7j, dtype=dtypes.complex64)
   def f(x):
     return c * x
   x_shape = c.shape
   x_dtype = c.dtype
   x = constant_op.constant(_random_complex(x_shape, x_dtype))
   analytical, numerical = gradient_checker.compute_gradient(
       f, [x])
   correct = np.array([[5, 7], [-7, 5]])
   self.assertAllEqual(correct, analytical[0])
   self.assertAllClose(correct, numerical[0], rtol=1e-4)
   x = constant_op.constant(_random_complex(x_shape, x_dtype))
   self.assertLess(
       gradient_checker.max_error(*gradient_checker.compute_gradient(
           f, [x])), 3e-4)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:16,代码来源:gradient_checker_v2_test.py


示例9: testAddSimple

 def testAddSimple(self):
   size = (2, 3)
   x1 = constant_op.constant(2.0, shape=size, name="x1")
   x2 = constant_op.constant(3.0, shape=size, name="x2")
   error = gradient_checker.max_error(*gradient_checker.compute_gradient(
       lambda x1: math_ops.add(x1, x2), [x1]))
   tf_logging.info("x1 error = %f", error)
   assert error < 1e-4
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:8,代码来源:gradient_checker_v2_test.py


示例10: testGradientFloat64

 def testGradientFloat64(self):
   with self.cached_session():
     x_val = [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]]
     x = np.asarray(x_val, dtype=np.float64, order="F")
     err = gradient_checker_v2.max_error(
         *gradient_checker_v2.compute_gradient(nn_ops.selu, [x]))
   print("selu (float64) gradient err = ", err)
   self.assertLess(err, 1e-6)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:8,代码来源:relu_op_test.py


示例11: testBroadcastingWithGradientChecker

 def testBroadcastingWithGradientChecker(self):
   for dtype in [dtypes.float32, dtypes.float64]:
     with self.cached_session():
       x1 = np.array([-1, 0, 1, 2, 3], dtype=dtype.as_numpy_dtype)
       x2 = np.array([2], dtype=dtype.as_numpy_dtype)
       err = gradient_checker_v2.max_error(
           *gradient_checker_v2.compute_gradient(
               lambda x: math_ops.nextafter(x, x2), [x1]))  # pylint: disable=cell-var-from-loop
       self.assertLess(err, 1e-3)
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:9,代码来源:math_grad_test.py


示例12: testGradientFloat32

 def testGradientFloat32(self):
   with self.cached_session():
     x = np.asarray(
         [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
         dtype=np.float32,
         order="F")
     err = gradient_checker_v2.max_error(
         *gradient_checker_v2.compute_gradient(nn_ops.leaky_relu, [x]))
   print("leaky_relu (float32) gradient err = ", err)
   self.assertLess(err, 1e-4)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:10,代码来源:relu_op_test.py


示例13: testAddCustomized

 def testAddCustomized(self):
   size = (2, 3)
   x1 = constant_op.constant(
       2.0, shape=size, dtype=dtypes.float64, name="x1")
   x2 = np.asarray(np.arange(6, dtype=np.float64).reshape(2, 3))
   # checkint gradients for x2 using a special delta
   error = gradient_checker.max_error(*gradient_checker.compute_gradient(
       lambda x2: math_ops.add(x1, x2),
       [x2], delta=1e-2))
   tf_logging.info("x2 error = %f", error)
   assert error < 1e-10
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:11,代码来源:gradient_checker_v2_test.py


示例14: testAddSimple

 def testAddSimple(self):
   # if context.executing_eagerly():
   #   return
   np.random.seed(1)  # Fix seed to avoid flakiness
   size = (2, 3)
   x1 = constant_op.constant(2.0, shape=size, name="x1")
   x2 = constant_op.constant(3.0, shape=size, name="x2")
   error = gradient_checker.max_error(*gradient_checker.compute_gradient(
       lambda x1: math_ops.add(x1, x2), [x1]))
   tf_logging.info("x1 error = %f", error)
   assert error < 1e-4
开发者ID:aeverall,项目名称:tensorflow,代码行数:11,代码来源:gradient_checker_v2_test.py


示例15: testGradGrad

  def testGradGrad(self):

    def f(x):
      with backprop.GradientTape() as tape:
        tape.watch(x)
        y = math_ops.square(x)
        z = math_ops.square(y)
      return tape.gradient(z, x)

    analytical, numerical = gradient_checker.compute_gradient(f, [2.0])
    self.assertAllEqual([[[48.]]], analytical)
    self.assertAllClose([[[48.]]], numerical, rtol=1e-4)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:12,代码来源:gradient_checker_v2_test.py


示例16: Test

  def Test(self):
    np.random.seed(1)
    n = shape_[-1]
    batch_shape = shape_[:-2]
    np_dtype = dtype_.as_numpy_dtype

    def RandomInput():
      a = np.random.uniform(
          low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
      if dtype_.is_complex:
        a += 1j * np.random.uniform(
            low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
      a += np.conj(a.T)
      a = np.tile(a, batch_shape + (1, 1))
      return a

    # Optimal stepsize for central difference is O(epsilon^{1/3}).
    epsilon = np.finfo(np_dtype).eps
    delta = 0.1 * epsilon**(1.0 / 3.0)
    # tolerance obtained by looking at actual differences using
    # np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
    # after discarding one random input sample
    _ = RandomInput()
    if dtype_ in (dtypes_lib.float32, dtypes_lib.complex64):
      tol = 1e-2
    else:
      tol = 1e-7
    with self.session(use_gpu=True):
      def Compute(x):
        e, v = linalg_ops.self_adjoint_eig(x)
        # (complex) Eigenvectors are only unique up to an arbitrary phase
        # We normalize the vectors such that the first component has phase 0.
        top_rows = v[..., 0:1, :]
        if dtype_.is_complex:
          angle = -math_ops.angle(top_rows)
          phase = math_ops.complex(math_ops.cos(angle), math_ops.sin(angle))
        else:
          phase = math_ops.sign(top_rows)
        v *= phase
        return e, v

      if compute_v_:
        funcs = [lambda x: Compute(x)[0], lambda x: Compute(x)[1]]
      else:
        funcs = [linalg_ops.self_adjoint_eigvals]

      for f in funcs:
        theoretical, numerical = gradient_checker_v2.compute_gradient(
            f,
            [RandomInput()],
            delta=delta)
        self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:52,代码来源:self_adjoint_eig_op_test.py


示例17: testGather

 def testGather(self):
   def f(params):
     index_values = [1, 3]
     indices = constant_op.constant(index_values, name="i")
     return array_ops.gather(params, indices, name="y")
   p_shape = (4, 2)
   p_size = 8
   params = constant_op.constant(
       np.arange(p_size).astype(np.float), shape=p_shape, name="p")
   error = gradient_checker.max_error(*gradient_checker.compute_gradient(
       f, [params]))
   tf_logging.info("gather error = %f", error)
   assert error < 1e-4
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:13,代码来源:gradient_checker_v2_test.py


示例18: testComplexAbsGradGrad

  def testComplexAbsGradGrad(self):

    def f(x):
      real = math_ops.cos(x)
      imag = ops.convert_to_tensor(1.)
      return math_ops.abs(math_ops.complex(real, imag))

    def g(x):
      with backprop.GradientTape() as t:
        t.watch(x)
        y = f(x)
      return t.gradient(y, x)

    err = gradient_checker_v2.max_error(
        *gradient_checker_v2.compute_gradient(g, [ops.convert_to_tensor(2.0)]))
    self.assertLess(err, 1e-3)
开发者ID:aritratony,项目名称:tensorflow,代码行数:16,代码来源:cwise_ops_unary_test.py


示例19: testNestedGather

 def testNestedGather(self):
   def f(params):
     index_values = [1, 3, 5, 6]
     indices = constant_op.constant(index_values, name="i")
     y = array_ops.gather(params, indices, name="y")
     index_values2 = [0, 2]
     indices2 = constant_op.constant(index_values2, name="i2")
     return array_ops.gather(y, indices2, name="y2")
   p_shape = (8, 2)
   p_size = 16
   params = constant_op.constant(
       np.arange(p_size).astype(np.float), shape=p_shape, name="p")
   error = gradient_checker.max_error(*gradient_checker.compute_gradient(
       f, [params]))
   tf_logging.info("nested gather error = %f", error)
   assert error < 1e-4
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:16,代码来源:gradient_checker_v2_test.py


示例20: _checkGrad

 def _checkGrad(self, x_in, y_in, adjoint_a, adjoint_b):
   x_t_shape = x_in.shape[:-2] + (x_in.shape[-1], x_in.shape[-2])
   y_t_shape = y_in.shape[:-2] + (y_in.shape[-1], y_in.shape[-2])
   x = x_in if not adjoint_a else x_in.reshape(x_t_shape)
   y = y_in if not adjoint_b else y_in.reshape(y_t_shape)
   epsilon = np.finfo(x.dtype).eps
   delta = epsilon**(1.0 / 3.0)
   def Loss(x, y):
     z = math_ops.matmul(x, y, adjoint_a, adjoint_b)
     return math_ops.reduce_sum(z)
   with self.cached_session(use_gpu=True):
     ((x_jacob_t, y_jacob_t),
      (x_jacob_n, y_jacob_n)) = gradient_checker_v2.compute_gradient(
          Loss, [x, y], delta=delta)
     tol = 20 * delta
     self.assertAllClose(x_jacob_t, x_jacob_n, rtol=tol, atol=tol)
     self.assertAllClose(y_jacob_t, y_jacob_n, rtol=tol, atol=tol)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:17,代码来源:batch_matmul_op_test.py



注:本文中的tensorflow.python.ops.gradient_checker_v2.compute_gradient函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python gradients.gradients函数代码示例发布时间:2022-05-27
下一篇:
Python gradient_checker.compute_gradient_error函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap