• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tape.push_new_tape函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.eager.tape.push_new_tape函数的典型用法代码示例。如果您正苦于以下问题:Python push_new_tape函数的具体用法?Python push_new_tape怎么用?Python push_new_tape使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了push_new_tape函数的18个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: grad_fn

  def grad_fn(*args):
    """Computes the gradient of the wrapped function."""
    tape.push_new_tape()
    try:
      end_node = f(*args)
      if end_node is None:
        raise ValueError("Cannot differentiate a function that returns None; "
                         "did you forget to return a value from {}?".format(
                             f.__name__))
    finally:
      popped_tape = tape.pop_tape()
    # Sorting variables by id, which is monotonically increasing in construction
    # order. This ensures unique order across executions.
    variables = list(sorted(popped_tape.watched_variables(),
                            key=lambda v: v.handle._id))  # pylint: disable=protected-access
    sources = [x.handle for x in variables]

    if not sources:
      raise ValueError("No trainable variables were accessed while the "
                       "function was being computed.")
    grad = imperative_grad.imperative_grad(_default_vspace,
                                           popped_tape,
                                           nest.flatten(end_node),
                                           sources)
    return end_node, list(zip(grad, variables))
开发者ID:autodrive,项目名称:tensorflow,代码行数:25,代码来源:backprop.py


示例2: decorated

 def decorated(*args, **kwds):
   """Computes the value and gradient of the decorated function."""
   parameter_positions = _get_arg_spec(f, params, args)
   assert not kwds, "The gradient function can't take keyword arguments."
   tape.push_new_tape()
   try:
     sources = []
     args = [
         ops.convert_to_tensor(args[i])
         if i in parameter_positions else args[i]
         for i in range(len(args))
     ]
     args = _ensure_unique_tensor_objects(parameter_positions, args)
     for i in parameter_positions:
       sources.append(args[i])
       tape.watch(args[i])
     result = f(*args)
     if result is None:
       raise ValueError("Cannot differentiate a function that returns None; "
                        "did you forget to return a value from {}?".format(
                            f.__name__))
     flat_result = nest.flatten(result)
     flat_result = [gen_array_ops.identity(x) for x in flat_result]
     result = nest.pack_sequence_as(result, flat_result)
   finally:
     t = tape.pop_tape()
   def vjp(dy=None):
     if dy is not None:
       dy = [ops.convert_to_tensor(x) for x in nest.flatten(dy)]
     return imperative_grad.imperative_grad(
         _default_vspace, t, nest.flatten(result), sources,
         output_gradients=dy)
   return result, vjp
开发者ID:SylChan,项目名称:tensorflow,代码行数:33,代码来源:backprop.py


示例3: grad_fn

 def grad_fn(*args):
   """Computes the gradient of the wrapped function."""
   tape.push_new_tape()
   end_node = f(*args)
   variables = tape.top_tape_watched_variables()
   sources = [x.handle for x in variables]
   grad = imperative_grad(end_node, sources)
   return end_node, list(zip(grad, variables))
开发者ID:Crazyonxh,项目名称:tensorflow,代码行数:8,代码来源:backprop.py


示例4: grad_fn

  def grad_fn(*args):
    """Computes the gradient of the wrapped function."""
    tape.push_new_tape()
    end_node = f(*args)
    variables = tape.top_tape_watched_variables()
    sources = [x.handle for x in variables]

    if not sources:
      raise ValueError("no trainable variables were accessed while the "
                       "function was being computed.")
    grad = imperative_grad.imperative_grad(_default_vspace,
                                           tape.pop_tape(),
                                           nest.flatten(end_node),
                                           sources)
    return end_node, list(zip(grad, variables))
开发者ID:DjangoPeng,项目名称:tensorflow,代码行数:15,代码来源:backprop.py


示例5: _defun_internal

def _defun_internal(name, func, args, kwds):
  """Defines and returns graph-mode version of func."""
  graph_key = ops.get_default_graph()._graph_key  # pylint: disable=protected-access
  with context.graph_mode():
    captures = {}
    tmp_graph = CapturingGraph(captures)
    # Inherit the graph key, since this is used for matching variables in
    # optimizers.
    tmp_graph._graph_key = graph_key  # pylint: disable=protected-access
    # Copy the graph collections to ensure summaries and other things work. This
    # lets the function access (but not mutate) collections of the containing
    # graph, such as the global step and the summary writer collections.
    curr_graph = ops.get_default_graph()
    for collection in curr_graph.collections:
      tmp_graph.get_collection_ref(collection)[:] = curr_graph.get_collection(
          collection)
    with tmp_graph.as_default():
      func_inputs = _get_defun_inputs(args)

      with capture_tensors(captures):
        this_tape = tape.push_new_tape()
        try:
          func_outputs = func(*func_inputs, **kwds)
        finally:
          tape.pop_tape(this_tape)
        variables = this_tape.watched_variables()

        # Returning a closed-over tensor as an output does not trigger a
        # call to convert_to_tensor, so we manually capture all such tensors.
        outputs_list = _flatten(func_outputs)
        func_def_outputs = [
            _convert_to_graph_tensor(x) for x in outputs_list if x is not None
        ]

      ids = list(sorted(captures.keys()))
      if ids:
        extra_inputs, extra_placeholders = zip(* [captures[x] for x in ids])
      else:
        extra_inputs = []
        extra_placeholders = []
      output_shapes = tuple(
          x.shape if isinstance(x, ops.Tensor) else None
          for x in outputs_list)

  flat_inputs = [x for x in nest.flatten(func_inputs)
                 if isinstance(x, ops.Tensor)]
  all_inputs = flat_inputs + list(extra_placeholders)
  all_ignored_ops = frozenset(x.op for x in all_inputs)
  fname = _inference_name(name)
  operations = tuple(x for x in tmp_graph.get_operations()
                     if x not in all_ignored_ops)
  # Register any other functions defined in the graph
  # TODO(ashankar): Oh lord, forgive me for this lint travesty.
  if context.in_eager_mode():
    for f in tmp_graph._functions.values():  # pylint: disable=protected-access
      # TODO(ashankar): What about the gradient registry?
      _register(f._c_func)  # pylint: disable=protected-access
  return GraphModeFunction(
      fname, all_inputs, extra_inputs, tmp_graph, operations, func_def_outputs,
      func_outputs, output_shapes, variables)
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:60,代码来源:function.py


示例6: decorated

 def decorated(*args, **kwds):
   """Computes the value and gradient of the decorated function."""
   dy = kwds.pop("dy", None)
   if dy is not None:
     dy = ops.convert_to_tensor(dy)
   assert not kwds, "The gradient function can't take keyword arguments."
   tape.push_new_tape()
   sources = []
   args = [ops.convert_to_tensor(x) for x in args]
   for i in parameter_positions:
     sources.append(args[i])
     tape.watch(args[i])
   result = f(*args)
   return result, imperative_grad(
       result,
       sources,
       output_gradients=dy)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:17,代码来源:backprop.py


示例7: grad_fn

 def grad_fn(*args, **kwds):
   """Computes the gradient of the wrapped function."""
   tape.push_new_tape()
   end_node = f(*args)
   start_node = tape.pop_tape()
   ag_core.active_progenitors.remove(start_node)
   if not ag_core.isnode(end_node):
     raise ValueError(
         "Target not part of a computation being traced. %s" % end_node)
   if start_node not in end_node.progenitors:
     raise ValueError("Target not derived from source. %s %s" %
                      (end_node.progenitors, repr(start_node)))
   output_gradients = kwds.get("output_gradients", None)
   if output_gradients is None:
     output_gradients = _ones(end_node.shape, end_node.dtype)
   grad = ag_core.backward_pass(output_gradients, end_node, start_node)
   return end_node.value, _aggregate_grads(grad.gradients)
开发者ID:keveman,项目名称:tensorflow,代码行数:17,代码来源:backprop.py


示例8: testTapeGC

  def testTapeGC(self):
    # TODO(apassos) figure out how to test this without using tape internal
    # APIs.
    tape.push_new_tape()

    def f():
      x = constant_op.constant(1.0)
      tape.watch(x)
      x = gradient_is_constant(x)
      x = gradient_is_constant(x)
      x = gradient_is_constant(x)

    f()
    t = tape.pop_tape()
    tensor_tape, op_tape = t.export()
    self.assertEqual(len(tensor_tape), 1)  # The watched tensor will remain on
                                           # the tape
    self.assertEqual(len(op_tape), 0)  # No operations should remain on the tape
开发者ID:DjangoPeng,项目名称:tensorflow,代码行数:18,代码来源:tape_test.py


示例9: _push_tape

 def _push_tape(self, existing_tape=False):
   if self._recording:
     raise ValueError("Tape is already recording.")
   if existing_tape:
     if self._tape is None:
       raise ValueError("There is no existing tape.")
     tape.push_tape(self._tape)
   else:
     self._tape = tape.push_new_tape(persistent=self._persistent)
   self._recording = True
开发者ID:Eagle732,项目名称:tensorflow,代码行数:10,代码来源:backprop.py


示例10: _push_tape

 def _push_tape(self):
   if self._recording:
     raise ValueError("Tape is already recording.")
   if self._tape is None:
     self._tape = tape.push_new_tape(
         persistent=self._persistent,
         watch_accessed_variables=self._watch_accessed_variables)
   else:
     tape.push_tape(self._tape)
   self._recording = True
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:10,代码来源:backprop.py


示例11: decorated

 def decorated(*args, **kwds):
   """Computes the value and gradient of the decorated function."""
   dy = kwds.pop("dy", None)
   if dy is not None:
     dy = ops.convert_to_tensor(dy)
   assert not kwds, "The gradient function can't take keyword arguments."
   tape.push_new_tape()
   sources = []
   args = [
       ops.convert_to_tensor(args[i]) if i in parameter_positions else args[i]
       for i in range(len(args))
   ]
   args = _ensure_unique_tensor_objects(parameter_positions, args)
   for i in parameter_positions:
     sources.append(args[i])
     tape.watch(args[i])
   result = f(*args)
   return result, imperative_grad.imperative_grad(
       _default_vspace, nest.flatten(result), sources,
       output_gradients=nest.flatten(dy) if dy is not None else None)
开发者ID:rajeev921,项目名称:tensorflow,代码行数:20,代码来源:backprop.py


示例12: grad_fn

  def grad_fn(*args):
    """Computes the gradient of the wrapped function."""
    tape.push_new_tape()
    try:
      end_node = f(*args)
      if end_node is None:
        raise ValueError("Cannot differentiate a function that returns None; "
                         "did you forget to return a value from {}?".format(
                             f.__name__))
    finally:
      popped_tape = tape.pop_tape()
      variables = popped_tape.watched_variables()
    sources = [x.handle for x in variables]

    if not sources:
      raise ValueError("No trainable variables were accessed while the "
                       "function was being computed.")
    grad = imperative_grad.imperative_grad(_default_vspace,
                                           popped_tape,
                                           nest.flatten(end_node),
                                           sources)
    return end_node, list(zip(grad, variables))
开发者ID:SylChan,项目名称:tensorflow,代码行数:22,代码来源:backprop.py


示例13: grad_fn

  def grad_fn(*args, **kwds):
    """Computes the gradient of the wrapped function."""
    this_tape = tape.push_new_tape()
    try:
      end_node = f(*args, **kwds)
      if end_node is None:
        raise ValueError("Cannot differentiate a function that returns None; "
                         "did you forget to return a value from {}?".format(
                             f.__name__))
    finally:
      tape.pop_tape(this_tape)
    # Note: variables are returned in construction order. This ensures unique
    # order across executions.
    variables = this_tape.watched_variables()
    if not variables:
      raise ValueError("No trainable variables were accessed while the "
                       "function was being computed.")

    sources = [v.handle for v in variables]
    grad = imperative_grad.imperative_grad(this_tape, nest.flatten(end_node),
                                           sources)
    return end_node, list(zip(grad, variables))
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:22,代码来源:backprop.py


示例14: _defun_internal

def _defun_internal(name, func, args, kwds):
  """Defines and returns graph-mode version of func."""
  container_prefix = ops.get_default_graph()._container_prefix  # pylint: disable=protected-access
  with context.graph_mode():
    captures = {}
    tmp_graph = CapturingGraph(captures)
    # Inherit the container prefix, since this is used for error checking when
    # isolating eager execution (the container prefix at creation must match the
    # container prefix when used, and variables accessed in the defun will be
    # used in the outside context).
    tmp_graph._container_prefix = container_prefix  # pylint: disable=protected-access
    # Copy the graph collections to ensure summaries and other things work. This
    # lets the function access (but not mutate) collections of the containing
    # graph, such as the global step and the summary writer collections.
    curr_graph = ops.get_default_graph()
    for collection in curr_graph.collections:
      tmp_graph.get_collection_ref(collection)[:] = curr_graph.get_collection(
          collection)
    with tmp_graph.as_default():
      func_inputs = _get_defun_inputs(args)

      with capture_tensors(captures):
        tape.push_new_tape()
        try:
          func_outputs = func(*func_inputs, **kwds)
        finally:
          variables = tape.pop_tape().watched_variables()
      ids = list(sorted(captures.keys()))
      if ids:
        extra_inputs, extra_placeholders = zip(* [captures[x] for x in ids])
      else:
        extra_inputs = []
        extra_placeholders = []
      outputs_list = nest.flatten(func_outputs)
      output_shapes = [x.shape for x in outputs_list if x is not None]

  flat_inputs = [
      x for x in nest.flatten(func_inputs) if isinstance(x, ops.Tensor)
  ]
  all_inputs = flat_inputs + list(extra_placeholders)

  func_def_outputs = [x for x in outputs_list if x is not None]
  inference_function_def = make_function_def(
      tmp_graph, tmp_graph.get_operations(), all_inputs, func_def_outputs)
  # Register any other functions defined in the graph
  # TODO(ashankar): Oh lord, forgive me for this lint travesty.
  for f in tmp_graph._functions.values():  # pylint: disable=protected-access
    # TODO(ashankar): What about the gradient registry?
    _register_with_name(f.name, f.definition)
  _register_with_name(_inference_name(name), inference_function_def)

  return GraphModeFunction(
      all_inputs,
      extra_inputs,
      inference_function_def,
      tmp_graph,
      tmp_graph.get_operations(),
      func_outputs,
      _map_sequence_obj_to_idx(func_def_outputs),
      output_shapes,
      variables=variables)
开发者ID:TianyouLi,项目名称:tensorflow,代码行数:61,代码来源:function.py


示例15: __enter__

 def __enter__(self):
   self._tape = tape.push_new_tape(persistent=self._persistent)
   return self
开发者ID:andrewharp,项目名称:tensorflow,代码行数:3,代码来源:backprop.py


示例16: __enter__

 def __enter__(self):
   tape.push_new_tape()
   return self
开发者ID:SylChan,项目名称:tensorflow,代码行数:3,代码来源:backprop.py


示例17: func_graph_from_py_func


#.........这里部分代码省略.........
        convert_structure_to_signature(func_args, arg_names),
        convert_structure_to_signature(func_kwargs))

    # Note: `nest.flatten` sorts by keys, as does `_deterministic_dict_values`.
    # Variables to help check whether mutation happens in calling the function
    # Copy the recursive list, tuple and map structure, but not base objects
    func_args_before = nest.pack_sequence_as(func_args, nest.flatten(func_args))
    func_kwargs_before = nest.pack_sequence_as(
        func_kwargs, nest.flatten(func_kwargs))

    def convert(x):
      """Converts a function output to a Tensor."""
      if x is None:
        return None
      if op_return_value is not None and isinstance(x, ops.Operation):
        # TODO(b/79881896): we currently can't capture external control deps, so
        # this won't work if x needs to be captured (i.e. if python_func returns
        # captured Operations).
        with ops.control_dependencies([x]):
          x = array_ops.identity(op_return_value)
      elif not isinstance(x, tensor_array_ops.TensorArray):
        try:
          x = ops.convert_to_tensor_or_composite(x)
        except (ValueError, TypeError):
          raise TypeError(
              "To be compatible with tf.contrib.eager.defun, Python functions "
              "must return zero or more Tensors; in compilation of %s, found "
              "return value of type %s, which is not a Tensor." %
              (str(python_func), type(x)))
      if add_control_dependencies:
        x = a.mark_as_return(x)
      return x

    this_tape = tape.push_new_tape()
    try:
      if autograph:
        from tensorflow.python import autograph  # pylint: disable=g-import-not-at-top
        _, original_func = tf_decorator.unwrap(python_func)

        def wrapper(*args, **kwargs):
          # Note: functions annotated with @tf.function should always be
          # converted even though they would meet autograph's whitelisting
          # criteria.
          # If this assumption is ever broken, converted_call will need to
          # handle the possibility of original_func still being a shim, e.g.
          # bound to WeakrefSelf.
          return autograph.converted_call(
              original_func, None,
              autograph.ConversionOptions(
                  verbose=autograph.Verbosity.BRIEF,
                  recursive=True,
                  strip_decorators=(def_function.function,),
                  optional_features=(),
                  force_conversion=True,
              ), *args, **kwargs)

        # Wrapping around a decorator allows checks like tf_inspect.getargspec
        # to be accurate.
        converted_func = tf_decorator.make_decorator(original_func, wrapper)
        tf_decorator.rewrap(python_func, original_func, converted_func)

      func_outputs = python_func(*func_args, **func_kwargs)

      # invariant: `func_outputs` contains only Tensors, IndexedSlices,
      # SparseTensors, TensorArrays and `None`s.
      func_outputs = nest.map_structure(convert, func_outputs)
开发者ID:rmlarsen,项目名称:tensorflow,代码行数:67,代码来源:func_graph.py


示例18: _push_tape

 def _push_tape(self):
   if self._recording:
     raise ValueError("Tape is already recording.")
   self._tape = tape.push_new_tape(persistent=self._persistent)
   self._recording = True
开发者ID:didukhle,项目名称:tensorflow,代码行数:5,代码来源:backprop.py



注:本文中的tensorflow.python.eager.tape.push_new_tape函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tape.record_operation函数代码示例发布时间:2022-05-27
下一篇:
Python tape.pop_tape函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap