• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python functional_ops._symbolic_gradient函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.functional_ops._symbolic_gradient函数的典型用法代码示例。如果您正苦于以下问题:Python _symbolic_gradient函数的具体用法?Python _symbolic_gradient怎么用?Python _symbolic_gradient使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了_symbolic_gradient函数的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: XSquarePlusOneGrad

 def XSquarePlusOneGrad(x, dy):
   dx = functional_ops._symbolic_gradient(
       input=[x, dy],
       Tout=[tf.float32],
       # This line on define_function to register the above
       # function with name "XSquarePlusOneFn"
       f="XSquarePlusOneFn",
       name="dx")
   return dx
开发者ID:apollos,项目名称:tensorflow,代码行数:9,代码来源:function_test.py


示例2: _SymGrad

def _SymGrad(op, out_grads):
  """Backprop through a function call node op given its outputs' gradients."""
  f_in = [x for x in op.inputs] + out_grads
  f_types = [x.dtype for x in op.inputs]
  f = attr_value_pb2.NameAttrList()
  f.name = op.type
  for k in op.node_def.attr:
    f.attr[k].CopyFrom(op.node_def.attr[k])
  # pylint: disable=protected-access
  in_grads = functional_ops._symbolic_gradient(input=f_in, Tout=f_types, f=f)
  # pylint: enable=protected-access
  return in_grads
开发者ID:kdavis-mozilla,项目名称:tensorflow,代码行数:12,代码来源:gradients_impl.py


示例3: testSymGradShape

 def testSymGradShape(self):
     g = tf.Graph()
     with g.as_default():
         x = tf.placeholder(tf.float32, [25, 4])
         y = tf.placeholder(tf.float32, [200, 100])
         dz = tf.placeholder(tf.float32, [1])
         # We assume Foo is a function of (x, y) -> (z) Then, Foo's
         # gradient function is (x, y, dz) -> (dx, dy).  dx's shape
         # should be the same as x's; and dy's shape should be the same
         # as y's.
         dx, dy = functional_ops._symbolic_gradient(input=[x, y, dz], Tout=[tf.float32] * 2, f="Foo")
         self.assertEqual(x.get_shape(), dx.get_shape())
         self.assertEqual(y.get_shape(), dy.get_shape())
开发者ID:brchiu,项目名称:tensorflow,代码行数:13,代码来源:function_test.py


示例4: XSquarePlusOneGrad

 def XSquarePlusOneGrad(x, dy):
   dx = functional_ops._symbolic_gradient(
       input=[x, dy], Tout=[dtypes.float32], f="XSquarePlusOneFn", name="dx")
   return dx
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:4,代码来源:function_test.py


示例5: gradients


#.........这里部分代码省略.........
    stop_ops = _StopOps(from_ops, pending_count)
    while queue:
      # generate gradient subgraph for op.
      op = queue.popleft()
      with _maybe_colocate_with(op, colocate_gradients_with_ops):
        if loop_state:
          loop_state.EnterGradWhileContext(op, before=True)
        out_grads = _AggregatedGrads(grads, op, loop_state, aggregation_method)
        if loop_state:
          loop_state.ExitGradWhileContext(op, before=True)

        grad_fn = None
        # pylint: disable=protected-access
        is_func_call = ops.get_default_graph()._is_function(op.type)
        if not is_func_call and any(
            isinstance(g, ops.Tensor) or g for g in out_grads) and (
                op._id not in stop_ops):
          # pylint: enable=protected-access
          # A grad_fn must be defined, either as a function or as None
          # for ops that do not have gradients.
          try:
            grad_fn = ops.get_gradient_function(op)
          except LookupError:
            raise LookupError(
                "No gradient defined for operation '%s' (op type: %s)" %
                (op.name, op.type))

        if loop_state:
          loop_state.EnterGradWhileContext(op, before=False)
        if (grad_fn or is_func_call) and any(
            isinstance(g, ops.Tensor) or g for g in out_grads):
          # NOTE: If _AggregatedGrads didn't compute a value for the i'th
          # output, it means that the cost does not depend on output[i],
          # therefore dC/doutput[i] is 0.
          for i, out_grad in enumerate(out_grads):
            if (not isinstance(out_grad, ops.Tensor)
                and not out_grad) and _IsFloat(op.outputs[i]):
              # Only floating-point outputs get a zero gradient. Gradient
              # functions should ignore the gradient for other outputs.
              if loop_state:
                out_grads[i] = loop_state.ZerosLike(op, i)
              else:
                out_grads[i] = control_flow_ops.ZerosLikeOutsideLoop(op, i)
          with ops.name_scope(op.name + "_grad"):
            # pylint: disable=protected-access
            with ops.get_default_graph()._original_op(op):
              # pylint: enable=protected-access
              if is_func_call:
                # For function call ops, we add a 'SymbolicGradient'
                # node to the graph to compute gradients.
                f_in = [x for x in op.inputs] + out_grads
                f_types = [x.dtype for x in op.inputs]
                # pylint: disable=protected-access
                in_grads = _AsList(functional_ops._symbolic_gradient(
                    f_in, f_types, op.type))
                # pylint: enable=protected-access
              else:
                in_grads = _AsList(grad_fn(op, *out_grads))
              _VerifyGeneratedGradients(in_grads, op)
              if gate_gradients and len(
                  [x for x in in_grads if x is not None]) > 1:
                in_grads = control_flow_ops.tuple(in_grads)
          logging.vlog(1, "Gradient for '" + op.name + "'")
          def _FilterGrad(x):
            if x is None:
              return False
            if isinstance(x, (list, tuple)):
              return bool(x)
            else:
              return True
          logging.vlog(1, "  in  --> %s",
                       ", ".join([x.name for x in out_grads if _FilterGrad(x)]))
          logging.vlog(1, "  out --> %s",
                       ", ".join([x.name for x in in_grads if _FilterGrad(x)]))
        else:
          # If no grad_fn is defined or none of out_grads is available,
          # just propagates a list of None backwards.
          in_grads = [None] * len(op.inputs)
        for t_in, in_grad in zip(op.inputs, in_grads):
          if in_grad is not None:
            _SetGrad(grads, t_in, in_grad)
        if loop_state:
          loop_state.ExitGradWhileContext(op, before=False)

      # update pending count for the inputs of op.
      # pylint: disable=protected-access
      for x in op.inputs:
        pending_count[x.op._id] -= 1
        ready = (pending_count[x.op._id] == 0)
        if loop_state and not ready:
          ready = (pending_count[x.op._id] > 0 and
                   control_flow_ops.IsLoopSwitch(x.op))
        if ready:
          queue.append(x.op)
      for x in op.control_inputs:
        pending_count[x._id] -= 1
        if pending_count[x._id] is 0:
          queue.append(x)
      # pylint: enable=protected-access
  return [_GetGrad(grads, x) for x in xs]
开发者ID:6779660,项目名称:tensorflow,代码行数:101,代码来源:gradients.py



注:本文中的tensorflow.python.ops.functional_ops._symbolic_gradient函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python functional_ops.foldl函数代码示例发布时间:2022-05-27
下一篇:
Python embedding_ops.embedding_lookup_sparse函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap