• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python gen_array_ops.broadcast_gradient_args函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.gen_array_ops.broadcast_gradient_args函数的典型用法代码示例。如果您正苦于以下问题:Python broadcast_gradient_args函数的具体用法?Python broadcast_gradient_args怎么用?Python broadcast_gradient_args使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了broadcast_gradient_args函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _ComplexGrad

def _ComplexGrad(op, grad):
  """Returns the real and imaginary components of 'grad', respectively."""
  x = op.inputs[0]
  y = op.inputs[1]
  sx = array_ops.shape(x)
  sy = array_ops.shape(y)
  rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
  return (array_ops.reshape(math_ops.reduce_sum(math_ops.real(grad), rx), sx),
          array_ops.reshape(math_ops.reduce_sum(math_ops.imag(grad), ry), sy))
开发者ID:PuchatekwSzortach,项目名称:tensorflow,代码行数:9,代码来源:math_grad.py


示例2: _BroadcastToGrad

def _BroadcastToGrad(op, grad):
  input_value = op.inputs[0]
  broadcast_shape = op.inputs[1]
  input_value_shape = array_ops.shape(input_value)
  _, reduction_axes = gen_array_ops.broadcast_gradient_args(broadcast_shape,
                                                            input_value_shape)
  updates_grad_reshaped = math_ops.reduce_sum(grad,
                                              axis=reduction_axes,
                                              keepdims=True)
  updates_grad = array_ops.reshape(updates_grad_reshaped, input_value_shape)
  return [updates_grad, None]
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:11,代码来源:array_grad.py


示例3: _SubGrad

def _SubGrad(op, grad):
  """Gradient for Sub."""
  x = op.inputs[0]
  y = op.inputs[1]
  if (isinstance(grad, ops.Tensor) and
      _ShapesFullySpecifiedAndEqual(x, y, grad)):
    return grad, -grad
  sx = array_ops.shape(x)
  sy = array_ops.shape(y)
  rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
  return (array_ops.reshape(math_ops.reduce_sum(grad, rx), sx),
          array_ops.reshape(-math_ops.reduce_sum(grad, ry), sy))
开发者ID:PuchatekwSzortach,项目名称:tensorflow,代码行数:12,代码来源:math_grad.py


示例4: _DivGrad

def _DivGrad(op, grad):
  """The gradient for the Div operator."""
  x = op.inputs[0]
  y = op.inputs[1]
  sx = array_ops.shape(x)
  sy = array_ops.shape(y)
  rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
  x = math_ops.conj(x)
  y = math_ops.conj(y)
  return (array_ops.reshape(math_ops.reduce_sum(math_ops.div(grad, y), rx), sx),
          array_ops.reshape(
              math_ops.reduce_sum(grad * math_ops.div(math_ops.div(-x, y), y),
                                  ry), sy))
开发者ID:PuchatekwSzortach,项目名称:tensorflow,代码行数:13,代码来源:math_grad.py


示例5: _FloorModGrad

def _FloorModGrad(op, grad):
  """Returns grad * (1, -floor(x/y))."""
  x = math_ops.conj(op.inputs[0])
  y = math_ops.conj(op.inputs[1])

  sx = array_ops.shape(x)
  sy = array_ops.shape(y)
  rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
  floor_xy = math_ops.floor_div(x, y)
  gx = array_ops.reshape(math_ops.reduce_sum(grad, rx), sx)
  gy = array_ops.reshape(
      math_ops.reduce_sum(grad * math_ops.negative(floor_xy), ry), sy)
  return gx, gy
开发者ID:PuchatekwSzortach,项目名称:tensorflow,代码行数:13,代码来源:math_grad.py


示例6: _SquaredDifferenceGrad

def _SquaredDifferenceGrad(op, grad):
  """Returns the gradient for (x-y)^2."""
  x = op.inputs[0]
  y = op.inputs[1]
  sx = array_ops.shape(x)
  sy = array_ops.shape(y)
  rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
  with ops.control_dependencies([grad]):
    # The parens ensure that if grad is IndexedSlices, it'll get multiplied by
    # Tensor (not a number like 2.0) which causes it to convert to Tensor.
    x_grad = math_ops.scalar_mul(2.0, grad) * (x - y)
  return (array_ops.reshape(math_ops.reduce_sum(x_grad, rx), sx),
          -array_ops.reshape(math_ops.reduce_sum(x_grad, ry), sy))
开发者ID:PuchatekwSzortach,项目名称:tensorflow,代码行数:13,代码来源:math_grad.py


示例7: _XDivyGrad

def _XDivyGrad(op, grad):
  """Returns gradient of xdivy(x, y) with respect to x and y."""
  x = op.inputs[0]
  y = op.inputs[1]
  sx = array_ops.shape(x)
  sy = array_ops.shape(y)
  rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
  with ops.control_dependencies([grad]):
    not_zero_x = math_ops.cast(
        math_ops.not_equal(x, math_ops.cast(0., dtype=x.dtype)), dtype=x.dtype)
    partial_x = gen_math_ops.xdivy(not_zero_x, y)
    partial_y = gen_math_ops.xdivy(math_ops.negative(x), y**2)
    return (array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx),
            array_ops.reshape(math_ops.reduce_sum(partial_y * grad, ry), sy))
开发者ID:aeverall,项目名称:tensorflow,代码行数:14,代码来源:math_grad.py


示例8: _NextAfterGrad

def _NextAfterGrad(op, grad):
  """Returns gradient of nextafter(x1, x2) with respect to x1 and x2."""
  x1 = op.inputs[0]
  x2 = op.inputs[1]
  s_x1 = array_ops.shape(x1)
  s_x2 = array_ops.shape(x2)
  r_x1, r_x2 = gen_array_ops.broadcast_gradient_args(s_x1, s_x2)
  with ops.control_dependencies([grad]):
    partial_x1 = array_ops.ones(s_x1, dtype=x1.dtype)
    partial_x2 = array_ops.zeros(s_x2, dtype=x2.dtype)
    return (array_ops.reshape(
        math_ops.reduce_sum(partial_x1 * grad, r_x1), s_x1),
            array_ops.reshape(
                math_ops.reduce_sum(partial_x2 * grad, r_x2), s_x2))
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:14,代码来源:math_grad.py


示例9: _IgammaGrad

def _IgammaGrad(op, grad):
  """Returns gradient of igamma(a, x) with respect to x."""
  # TODO(ebrevdo): Perhaps add the derivative w.r.t. a
  a = op.inputs[0]
  x = op.inputs[1]
  sa = array_ops.shape(a)
  sx = array_ops.shape(x)
  unused_ra, rx = gen_array_ops.broadcast_gradient_args(sa, sx)

  # Perform operations in log space before summing, because Gamma(a)
  # and Gamma'(a) can grow large.
  partial_x = math_ops.exp(-x + (a - 1) * math_ops.log(x) - math_ops.lgamma(a))
  # TODO(b/36815900): Mark None return values as NotImplemented
  return (None, array_ops.reshape(
      math_ops.reduce_sum(partial_x * grad, rx), sx))
开发者ID:PuchatekwSzortach,项目名称:tensorflow,代码行数:15,代码来源:math_grad.py


示例10: _DivNoNanGrad

def _DivNoNanGrad(op, grad):
  """DivNoNan op gradient."""
  x = op.inputs[0]
  y = op.inputs[1]
  sx = array_ops.shape(x)
  sy = array_ops.shape(y)
  rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
  x = math_ops.conj(x)
  y = math_ops.conj(y)
  return (array_ops.reshape(
      math_ops.reduce_sum(math_ops.div_no_nan(grad, y), rx), sx),
          array_ops.reshape(
              math_ops.reduce_sum(
                  grad * math_ops.div_no_nan(math_ops.div_no_nan(-x, y), y),
                  ry), sy))
开发者ID:AnishShah,项目名称:tensorflow,代码行数:15,代码来源:math_grad.py


示例11: _IgammaGrad

def _IgammaGrad(op, grad):
  """Returns gradient of igamma(a, x) with respect to a and x."""
  a = op.inputs[0]
  x = op.inputs[1]
  sa = array_ops.shape(a)
  sx = array_ops.shape(x)
  ra, rx = gen_array_ops.broadcast_gradient_args(sa, sx)

  with ops.control_dependencies([grad]):
    partial_a = gen_math_ops.igamma_grad_a(a, x)
    # Perform operations in log space before summing, because Gamma(a)
    # and Gamma'(a) can grow large.
    partial_x = math_ops.exp(-x + (a - 1) * math_ops.log(x)
                             - math_ops.lgamma(a))
    return (array_ops.reshape(math_ops.reduce_sum(partial_a * grad, ra), sa),
            array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx))
开发者ID:AnishShah,项目名称:tensorflow,代码行数:16,代码来源:math_grad.py


示例12: _MaximumMinimumGrad

def _MaximumMinimumGrad(op, grad, selector_op):
  """Factor out the code for the gradient of Maximum or Minimum."""
  x = op.inputs[0]
  y = op.inputs[1]
  gdtype = grad.dtype
  sx = array_ops.shape(x)
  sy = array_ops.shape(y)
  gradshape = array_ops.shape(grad)
  zeros = array_ops.zeros(gradshape, gdtype)
  xmask = selector_op(x, y)
  rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
  xgrad = array_ops.where(xmask, grad, zeros)
  ygrad = array_ops.where(xmask, zeros, grad)
  gx = array_ops.reshape(math_ops.reduce_sum(xgrad, rx), sx)
  gy = array_ops.reshape(math_ops.reduce_sum(ygrad, ry), sy)
  return (gx, gy)
开发者ID:PuchatekwSzortach,项目名称:tensorflow,代码行数:16,代码来源:math_grad.py


示例13: _ZetaGrad

def _ZetaGrad(op, grad):
  """Returns gradient of zeta(x, q) with respect to x and q."""
  # TODO(tillahoffmann): Add derivative with respect to x
  x = op.inputs[0]
  q = op.inputs[1]
  # Broadcast gradients
  sx = array_ops.shape(x)
  sq = array_ops.shape(q)
  unused_rx, rq = gen_array_ops.broadcast_gradient_args(sx, sq)
  # Evaluate gradient
  with ops.control_dependencies([grad]):
    x = math_ops.conj(x)
    q = math_ops.conj(q)
    partial_q = -x * math_ops.zeta(x + 1, q)
    # TODO(b/36815900): Mark None return values as NotImplemented
    return (None,
            array_ops.reshape(math_ops.reduce_sum(partial_q * grad, rq), sq))
开发者ID:PuchatekwSzortach,项目名称:tensorflow,代码行数:17,代码来源:math_grad.py


示例14: _PolygammaGrad

def _PolygammaGrad(op, grad):
  """Returns gradient of psi(n, x) with respect to n and x."""
  # TODO(tillahoffmann): Add derivative with respect to n
  n = op.inputs[0]
  x = op.inputs[1]
  # Broadcast gradients
  sn = array_ops.shape(n)
  sx = array_ops.shape(x)
  unused_rn, rx = gen_array_ops.broadcast_gradient_args(sn, sx)
  # Evaluate gradient
  with ops.control_dependencies([grad]):
    n = math_ops.conj(n)
    x = math_ops.conj(x)
    partial_x = math_ops.polygamma(n + 1, x)
    # TODO(b/36815900): Mark None return values as NotImplemented
    return (None,
            array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx))
开发者ID:PuchatekwSzortach,项目名称:tensorflow,代码行数:17,代码来源:math_grad.py


示例15: _MulGrad

def _MulGrad(op, grad):
  """The gradient of scalar multiplication."""
  x = op.inputs[0]
  y = op.inputs[1]
  if (isinstance(grad, ops.Tensor) and
      _ShapesFullySpecifiedAndEqual(x, y, grad) and
      grad.dtype in (dtypes.int32, dtypes.float32)):
    return gen_math_ops.mul(grad, y), gen_math_ops.mul(grad, x)
  assert x.dtype.base_dtype == y.dtype.base_dtype, (x.dtype, " vs. ", y.dtype)
  sx = array_ops.shape(x)
  sy = array_ops.shape(y)
  rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
  x = math_ops.conj(x)
  y = math_ops.conj(y)
  return (array_ops.reshape(
      math_ops.reduce_sum(gen_math_ops.mul(grad, y), rx), sx),
          array_ops.reshape(
              math_ops.reduce_sum(gen_math_ops.mul(x, grad), ry), sy))
开发者ID:PuchatekwSzortach,项目名称:tensorflow,代码行数:18,代码来源:math_grad.py


示例16: _PowGrad

def _PowGrad(op, grad):
  """Returns grad * (y*x^(y-1), z*log(x))."""
  x = op.inputs[0]
  y = op.inputs[1]
  z = op.outputs[0]
  sx = array_ops.shape(x)
  sy = array_ops.shape(y)
  rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
  x = math_ops.conj(x)
  y = math_ops.conj(y)
  z = math_ops.conj(z)
  gx = array_ops.reshape(
      math_ops.reduce_sum(grad * y * math_ops.pow(x, y - 1), rx), sx)
  # Avoid false singularity at x = 0
  if x.dtype.is_complex:
    # real(x) < 0 is fine for the complex case
    log_x = array_ops.where(
        math_ops.not_equal(x, 0), math_ops.log(x), array_ops.zeros_like(x))
  else:
    # There's no sensible real value to return if x < 0, so return 0
    log_x = array_ops.where(x > 0, math_ops.log(x), array_ops.zeros_like(x))
  gy = array_ops.reshape(math_ops.reduce_sum(grad * z * log_x, ry), sy)
  return gx, gy
开发者ID:PuchatekwSzortach,项目名称:tensorflow,代码行数:23,代码来源:math_grad.py


示例17: _BetaincGrad

def _BetaincGrad(op, grad):
  """Returns gradient of betainc(a, b, x) with respect to x."""
  # TODO(ebrevdo): Perhaps add the derivative w.r.t. a, b
  a, b, x = op.inputs

  # two cases: x is a scalar and a/b are same-shaped tensors, or vice
  # versa; so its sufficient to check against shape(a).
  sa = array_ops.shape(a)
  sx = array_ops.shape(x)
  _, rx = gen_array_ops.broadcast_gradient_args(sa, sx)

  # Perform operations in log space before summing, because terms
  # can grow large.
  log_beta = (
      gen_math_ops.lgamma(a) + gen_math_ops.lgamma(b) -
      gen_math_ops.lgamma(a + b))
  partial_x = math_ops.exp((b - 1) * math_ops.log(1 - x) +
                           (a - 1) * math_ops.log(x) - log_beta)

  # TODO(b/36815900): Mark None return values as NotImplemented
  return (
      None,  # da
      None,  # db
      array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx))
开发者ID:PuchatekwSzortach,项目名称:tensorflow,代码行数:24,代码来源:math_grad.py


示例18: _reduce_and_reshape_grad

def _reduce_and_reshape_grad(g, t):
  """Returns the gradient, sum-reduced and reshaped to `t`'s shape."""
  shape = array_ops.shape(t)
  g_shape = array_ops.shape(g)
  bcast_dims, _ = gen_array_ops.broadcast_gradient_args(shape, g_shape)
  return array_ops.reshape(math_ops.reduce_sum(g, bcast_dims), shape)
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:6,代码来源:scaled_softplus.py


示例19: _GetGradientArgs

 def _GetGradientArgs(self, xs, ys):
   with self.test_session(use_gpu=True) as sess:
     return sess.run(broadcast_gradient_args(xs, ys))
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:3,代码来源:basic_gpu_test.py


示例20: _GetGradientArgs

 def _GetGradientArgs(self, xs, ys):
   with self.cached_session() as sess:
     return sess.run(broadcast_gradient_args(xs, ys))
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:3,代码来源:bcast_ops_test.py



注:本文中的tensorflow.python.ops.gen_array_ops.broadcast_gradient_args函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python gen_array_ops.concat_offset函数代码示例发布时间:2022-05-27
下一篇:
Python gen_array_ops._concat_offset函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap