• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python list_ops.tensor_list_pop_back函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.list_ops.tensor_list_pop_back函数的典型用法代码示例。如果您正苦于以下问题:Python tensor_list_pop_back函数的具体用法?Python tensor_list_pop_back怎么用?Python tensor_list_pop_back使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了tensor_list_pop_back函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: testTensorListFromTensor

 def testTensorListFromTensor(self):
   t = constant_op.constant([1.0, 2.0])
   l = list_ops.tensor_list_from_tensor(t, element_shape=[])
   l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
   self.assertAllEqual(self.evaluate(e), 2.0)
   l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
   self.assertAllEqual(self.evaluate(e), 1.0)
   self.assertAllEqual(self.evaluate(list_ops.tensor_list_length(l)), 0)
开发者ID:aeverall,项目名称:tensorflow,代码行数:8,代码来源:list_ops_test.py


示例2: testUnknownShape

 def testUnknownShape(self):
   l = list_ops.empty_tensor_list(
       element_dtype=dtypes.float32, element_shape=None)
   l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))
   l = list_ops.tensor_list_push_back(l, constant_op.constant([1.0, 2.0]))
   l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
   self.assertAllEqual(self.evaluate(e), [1.0, 2.0])
   l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
   self.assertAllEqual(self.evaluate(e), 1.0)
开发者ID:aeverall,项目名称:tensorflow,代码行数:9,代码来源:list_ops_test.py


示例3: testListFromTensor

 def testListFromTensor(self):
   with self.cached_session(), self.test_scope():
     t = constant_op.constant([1.0, 2.0])
     l = list_ops.tensor_list_from_tensor(t, element_shape=[])
     e = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
     self.assertAllEqual(e, 1.0)
     l, e0 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
     self.assertAllEqual(e0, 2.0)
     l, e1 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
     self.assertAllEqual(e1, 1.0)
     self.assertAllEqual(list_ops.tensor_list_length(l), 0)
开发者ID:jackd,项目名称:tensorflow,代码行数:11,代码来源:tensor_list_ops_test.py


示例4: testAccumulatorElementShape

  def testAccumulatorElementShape(self, shape):

    def MatchShape(actual_tensor_shape):
      # Compare the shapes, treating None dimensions as equal. We do not
      # directly check actual_tensor_shape and tf.TensorShape(shape) for
      # equality because tf.Dimension.__eq__ returns None if either dimension is
      # None.
      if shape is None:
        self.assertIsNone(actual_tensor_shape.dims)
      else:
        self.assertListEqual(actual_tensor_shape.as_list(), shape)

    def GetAccumulatorForInputAtIndex(while_op, idx):
      body_graph = while_v2._get_graph(while_op, "body")
      y_input_t = body_graph.inputs[idx]
      push_back_node = [c for c in y_input_t.consumers()
                        if c.type == "TensorListPushBack"][0]
      output_idx = body_graph.outputs.index(push_back_node.outputs[0])
      return while_op.outputs[output_idx]

    x = array_ops.placeholder(dtype=dtypes.float32, shape=shape)
    y = array_ops.placeholder(dtype=dtypes.float32, shape=shape)

    # Forward pass.
    ret = while_loop_v2(lambda v, u: v < 8.,
                        lambda v, u: (math_ops.pow(v, u), u),
                        [x, y],
                        return_same_structure=True)
    while_op = ret[0].op.inputs[0].op
    # Gradient pass.
    grad = gradients_impl.gradients(ret[0], x)
    # Note: There is an Identity b/w grad[0] and the While op.
    grad_while_op = grad[0].op.inputs[0].op

    # Get the TensorList output of While op containing the accumulated values
    # of y.
    x_input_index = [i for i, inp in enumerate(while_op.inputs) if x == inp][0]
    output = GetAccumulatorForInputAtIndex(while_op, x_input_index)
    _, val = list_ops.tensor_list_pop_back(output,
                                           element_dtype=dtypes.float32)
    MatchShape(val.shape)

    # Take second derivative to generate intermediate grad_while_op outputs
    gradients_impl.gradients(grad, x)

    # Get the TensorList output of gradient While op containing the accumulated
    # values of grad_x (note that grad_x is needed by the second derivative).
    # grad_while_op.inputs:
    grad_output_index = grad_while_op.outputs.index(grad[0].op.inputs[0])
    grad_output = GetAccumulatorForInputAtIndex(grad_while_op,
                                                grad_output_index)
    _, val = list_ops.tensor_list_pop_back(grad_output,
                                           element_dtype=dtypes.float32)
    MatchShape(val.shape)
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:54,代码来源:while_v2_test.py


示例5: testPushPop

 def testPushPop(self):
   with self.cached_session() as sess, self.test_scope():
     num = array_ops.placeholder(dtypes.int32)
     l = list_ops.tensor_list_reserve(
         element_shape=(7, 15), num_elements=num, element_dtype=dtypes.float32)
     l = list_ops.tensor_list_push_back(
         l, constant_op.constant(1.0, shape=(7, 15)))
     l = list_ops.tensor_list_push_back(
         l, constant_op.constant(2.0, shape=(7, 15)))
     l, e2 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
     _, e1 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
     self.assertAllEqual(sess.run(e2, {num: 10}), 2.0 * np.ones((7, 15)))
     self.assertAllEqual(sess.run(e1, {num: 10}), 1.0 * np.ones((7, 15)))
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:13,代码来源:tensor_list_ops_test.py


示例6: testPushPop

 def testPushPop(self):
   with self.cached_session() as sess, self.test_scope():
     l = list_ops.empty_tensor_list(
         element_shape=(7, 15),
         element_dtype=dtypes.float32,
         max_num_elements=10)
     l = list_ops.tensor_list_push_back(
         l, constant_op.constant(1.0, shape=(7, 15)))
     l = list_ops.tensor_list_push_back(
         l, constant_op.constant(2.0, shape=(7, 15)))
     l, e2 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
     _, e1 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
     self.assertAllEqual(sess.run(e2), 2.0 * np.ones((7, 15)))
     self.assertAllEqual(sess.run(e1), 1.0 * np.ones((7, 15)))
开发者ID:jackd,项目名称:tensorflow,代码行数:14,代码来源:tensor_list_ops_test.py


示例7: testAccumulatorElementShape

  def testAccumulatorElementShape(self, shape):

    def MatchShape(actual_tensor_shape):
      # Compare the shapes, treating None dimensions as equal. We do not
      # directly check actual_tensor_shape and tf.TensorShape(shape) for
      # equality because tf.Dimension.__eq__ returns None if either dimension is
      # None.
      if shape is None:
        self.assertIsNone(actual_tensor_shape.dims)
      else:
        self.assertListEqual(actual_tensor_shape.as_list(), shape)

    def GetAccumulatorForInputAtIndex(while_op, idx):
      body_graph = while_v2._get_body_graph(while_op)
      y_input_t = body_graph.inputs[idx]
      push_back_node = [c for c in y_input_t.consumers()
                        if c.type == "TensorListPushBack"][0]
      output_idx = body_graph.outputs.index(push_back_node.outputs[0])
      return while_op.outputs[output_idx]

    x = constant_op.constant(2.)
    y = array_ops.placeholder(dtype=dtypes.float32, shape=shape)

    # Forward pass.
    ret = while_loop_v2(
        lambda v, u: v < 8.,
        lambda v, u: (v * v, u), [x, y],
        return_same_structure=False)
    while_op = ret[0].op.inputs[0].op
    # Get the TensorList output of While op containing the accumulated values
    # of y.
    # while_op.inputs: [counter_arg, x_arg, y_arg, *accumulators]
    output = GetAccumulatorForInputAtIndex(while_op, 2)
    _, val = list_ops.tensor_list_pop_back(output,
                                           element_dtype=dtypes.float32)
    MatchShape(val.shape)

    # Gradient pass.
    grad = gradients_impl.gradients(ret[1], y)
    grad_while_op = grad[0].op.inputs[0].op
    # Get the TensorList output of gradient While op containing the accumulated
    # values of grad_y.
    # grad_while_op.inputs:
    # [counter_arg, total_iters_arg, grad_x_arg, grad_y_arg, *other_args]
    grad_output = GetAccumulatorForInputAtIndex(grad_while_op, 3)
    _, val = list_ops.tensor_list_pop_back(grad_output,
                                           element_dtype=dtypes.float32)
    MatchShape(val.shape)
开发者ID:aeverall,项目名称:tensorflow,代码行数:48,代码来源:while_v2_test.py


示例8: testSerialize

  def testSerialize(self):
    # pylint: disable=g-import-not-at-top
    try:
      import portpicker
    except ImportError:
      return
    with context.graph_mode():
      worker_port = portpicker.pick_unused_port()
      ps_port = portpicker.pick_unused_port()
      cluster_dict = {
          "worker": ["localhost:%s" % worker_port],
          "ps": ["localhost:%s" % ps_port]
      }
      cs = server_lib.ClusterSpec(cluster_dict)

      worker = server_lib.Server(
          cs, job_name="worker", protocol="grpc", task_index=0, start=True)
      unused_ps = server_lib.Server(
          cs, job_name="ps", protocol="grpc", task_index=0, start=True)
      with ops.Graph().as_default(), session.Session(target=worker.target):
        with ops.device("/job:worker"):
          t = constant_op.constant([[1.0], [2.0]])
          l = list_ops.tensor_list_from_tensor(t, element_shape=[1])
        with ops.device("/job:ps"):
          l_ps = array_ops.identity(l)
          l_ps, e = list_ops.tensor_list_pop_back(
              l_ps, element_dtype=dtypes.float32)
        with ops.device("/job:worker"):
          worker_e = array_ops.identity(e)
        self.assertAllEqual(worker_e.eval(), [2.0])
开发者ID:andrewharp,项目名称:tensorflow,代码行数:30,代码来源:list_ops_test.py


示例9: testPushPopSeparateLists

 def testPushPopSeparateLists(self):
   with self.cached_session() as sess, self.test_scope():
     l = list_ops.empty_tensor_list(
         element_shape=[],
         element_dtype=dtypes.float32,
         max_num_elements=20)
     l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))
     l2 = list_ops.tensor_list_push_back(l, constant_op.constant(2.0))
     l3 = list_ops.tensor_list_push_back(l, constant_op.constant(3.0))
     _, e11 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
     l2, e21 = list_ops.tensor_list_pop_back(l2, element_dtype=dtypes.float32)
     l2, e22 = list_ops.tensor_list_pop_back(l2, element_dtype=dtypes.float32)
     l3, e31 = list_ops.tensor_list_pop_back(l3, element_dtype=dtypes.float32)
     l3, e32 = list_ops.tensor_list_pop_back(l3, element_dtype=dtypes.float32)
     result = sess.run([e11, [e21, e22], [e31, e32]])
     self.assertEqual(result, [1.0, [2.0, 1.0], [3.0, 1.0]])
开发者ID:jackd,项目名称:tensorflow,代码行数:16,代码来源:tensor_list_ops_test.py


示例10: testCPUGPUCopy

 def testCPUGPUCopy(self):
   if not context.num_gpus():
     return
   t = constant_op.constant([1.0, 2.0])
   l = list_ops.tensor_list_from_tensor(t, element_shape=[])
   with context.device("gpu:0"):
     l_gpu = array_ops.identity(l)
     self.assertAllEqual(
         self.evaluate(
             list_ops.tensor_list_pop_back(
                 l_gpu, element_dtype=dtypes.float32)[1]), 2.0)
   l_cpu = array_ops.identity(l_gpu)
   self.assertAllEqual(
       self.evaluate(
           list_ops.tensor_list_pop_back(
               l_cpu, element_dtype=dtypes.float32)[1]), 2.0)
开发者ID:aeverall,项目名称:tensorflow,代码行数:16,代码来源:list_ops_test.py


示例11: _testPushPop

 def _testPushPop(self, max_num_elements):
   l = list_ops.empty_tensor_list(
       element_dtype=dtypes.float32,
       element_shape=[],
       max_num_elements=max_num_elements)
   l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))
   l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
   self.assertAllEqual(self.evaluate(e), 1.0)
开发者ID:aeverall,项目名称:tensorflow,代码行数:8,代码来源:list_ops_test.py


示例12: testPushPopSeparateLists

 def testPushPopSeparateLists(self):
   with self.cached_session() as sess, self.test_scope():
     num = array_ops.placeholder(dtypes.int32)
     l = list_ops.tensor_list_reserve(
         element_shape=scalar_shape(),
         num_elements=num,
         element_dtype=dtypes.float32)
     l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))
     l2 = list_ops.tensor_list_push_back(l, constant_op.constant(2.0))
     l3 = list_ops.tensor_list_push_back(l, constant_op.constant(3.0))
     _, e11 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
     l2, e21 = list_ops.tensor_list_pop_back(l2, element_dtype=dtypes.float32)
     l2, e22 = list_ops.tensor_list_pop_back(l2, element_dtype=dtypes.float32)
     l3, e31 = list_ops.tensor_list_pop_back(l3, element_dtype=dtypes.float32)
     l3, e32 = list_ops.tensor_list_pop_back(l3, element_dtype=dtypes.float32)
     result = sess.run([e11, [e21, e22], [e31, e32]], {num: 20})
     self.assertEqual(result, [1.0, [2.0, 1.0], [3.0, 1.0]])
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:17,代码来源:tensor_list_ops_test.py


示例13: testEmptyTensorListMax

 def testEmptyTensorListMax(self):
   with self.cached_session() as sess, self.test_scope():
     l = list_ops.empty_tensor_list(
         element_shape=(10, 15), element_dtype=dtypes.float32,
         max_num_elements=2)
     l = list_ops.tensor_list_push_back(
         l, array_ops.fill(value=3.0, dims=(10, 15)))
     _, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
     self.assertAllEqual(sess.run(e), 3.0 * np.ones((10, 15)))
开发者ID:jackd,项目名称:tensorflow,代码行数:9,代码来源:tensor_list_ops_test.py


示例14: testPopFromEmptyTensorListFails

 def testPopFromEmptyTensorListFails(self, max_num_elements):
   l = list_ops.empty_tensor_list(
       element_dtype=dtypes.float32,
       element_shape=[],
       max_num_elements=max_num_elements)
   with self.assertRaisesRegexp(errors.InvalidArgumentError,
                                "Trying to pop from an empty list"):
     l = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
     self.evaluate(l)
开发者ID:aeverall,项目名称:tensorflow,代码行数:9,代码来源:list_ops_test.py


示例15: testDoNotConstantFoldVariants

 def testDoNotConstantFoldVariants(self):
   with self.cached_session() as sess, self.test_scope():
     val = array_ops.placeholder(dtype=dtypes.float32)
     l = list_ops.empty_tensor_list(
         element_shape=(7, 15),
         element_dtype=dtypes.float32,
         max_num_elements=10)
     # Note: Pushing a Placeholder will force the constant folding code
     # to build a Const node with a DT_VARIANT output. This tests that XLA
     # passes a cf_consider_fn which prevent folding such nodes.
     l = list_ops.tensor_list_push_back(
         l, array_ops.fill(value=val, dims=(7, 15)))
     l = list_ops.tensor_list_push_back(
         l, constant_op.constant(2.0, shape=(7, 15)))
     l, e2 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
     _, e1 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
     self.assertAllEqual(sess.run(e2, {val: 1.0}), 2.0 * np.ones((7, 15)))
     self.assertAllEqual(sess.run(e1, {val: 1.0}), 1.0 * np.ones((7, 15)))
开发者ID:jackd,项目名称:tensorflow,代码行数:18,代码来源:tensor_list_ops_test.py


示例16: testEmptyTensorListNoMax

 def testEmptyTensorListNoMax(self):
   with self.cached_session() as sess, self.test_scope():
     l = list_ops.empty_tensor_list(
         element_shape=(7, 15), element_dtype=dtypes.float32)
     l = list_ops.tensor_list_push_back(
         l, constant_op.constant(1.0, shape=(7, 15)))
     _, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
     with self.assertRaisesRegexp(errors.InvalidArgumentError,
                                  "Set the max number of elements"):
       self.assertAllEqual(sess.run(e), 1.0 * np.ones((7, 15)))
开发者ID:jackd,项目名称:tensorflow,代码行数:10,代码来源:tensor_list_ops_test.py


示例17: testPushPopGradients

 def testPushPopGradients(self):
   with backprop.GradientTape() as tape:
     l = list_ops.empty_tensor_list(
         element_dtype=dtypes.float32, element_shape=[])
     c = constant_op.constant(1.0)
     tape.watch(c)
     l = list_ops.tensor_list_push_back(l, c)
     l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
     e = 2 * e
   self.assertAllEqual(self.evaluate(tape.gradient(e, [c])[0]), 2.0)
开发者ID:aeverall,项目名称:tensorflow,代码行数:10,代码来源:list_ops_test.py


示例18: _capture_helper

  def _capture_helper(self, tensor, name):
    if tensor.graph is not self._forward_graph:
      return super(_WhileBodyGradFuncGraph, self)._capture_helper(tensor, name)

    while tensor.op.type == "Identity":
      # We do not accumulate the output of identity nodes so we try to capture
      # the input of the Identity node instead.
      tensor = tensor.op.inputs[0]

    captured_tensor = self._indirect_captures.get(tensor)
    if captured_tensor is not None:
      return captured_tensor

    # Resource tensors are not accumulated and handled specially.
    if tensor.dtype == dtypes.resource:
      return self._resource_capture_helper(tensor)

    # Create or find an existing accumulator output for `tensor` in the forward
    # graph, and fetch from this accumulator in the gradient graph to get the
    # raw intermediate value.
    accumulator = _get_accumulator(tensor)
    if accumulator is None:
      # Create the initial empty tensor list.
      with self._forward_graph.outer_graph.as_default():
        tensor_list = list_ops.empty_tensor_list(
            element_dtype=tensor.dtype, element_shape=tensor.shape,
            max_num_elements=self._maximum_iterations)
      self.empty_tensor_lists.append(tensor_list)

      # Push the intermediate tensor to the tensor list. This captures
      # `tensor_list`.
      with self._forward_graph.as_default():
        accumulator = list_ops.tensor_list_push_back(tensor_list, tensor)
      # Add the modified tensor list to the list of outputs. This output will be
      # all the accumulated values.
      self._forward_graph.outputs.append(accumulator)

      # Capture in the cond graph as well so the forward cond and body inputs
      # match.
      with self._forward_cond_graph.as_default():
        self._forward_cond_graph.capture(tensor_list)

    # Capture the accumulator tensor list in the gradient graph directly from
    # the forward graph -- we'll later modify this to capture the final list
    # output by the forward While op instead.
    captured_accumulator = super(_WhileBodyGradFuncGraph, self)._capture_helper(
        accumulator, name)

    # Pop the intermediate value from the tensor list in the gradient graph.
    new_tensor_list, captured_tensor = list_ops.tensor_list_pop_back(
        captured_accumulator, element_dtype=tensor.dtype)

    self._indirect_captures[tensor] = captured_tensor
    self.popped_tensor_lists[captured_accumulator] = new_tensor_list
    return captured_tensor
开发者ID:ziky90,项目名称:tensorflow,代码行数:55,代码来源:while_v2.py


示例19: testEmptyTensorList

 def testEmptyTensorList(self):
   dim = 7
   with self.cached_session() as sess, self.test_scope():
     p = array_ops.placeholder(dtypes.int32)
     l = list_ops.empty_tensor_list(
         element_shape=(p, 15), element_dtype=dtypes.float32)
     l = list_ops.tensor_list_push_back(
         l, constant_op.constant(1.0, shape=(dim, 15)))
     _, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
     with self.assertRaisesRegexp(errors.InvalidArgumentError,
                                  "Use TensorListReserve instead"):
       self.assertEqual(sess.run(e, {p: dim}), 1.0 * np.ones((dim, 15)))
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:12,代码来源:tensor_list_ops_test.py


示例20: testSerialize

 def testSerialize(self):
   worker = test_util.create_local_cluster(num_workers=1, num_ps=1)[0][0]
   with ops.Graph().as_default(), session.Session(target=worker.target):
     with ops.device("/job:worker"):
       t = constant_op.constant([[1.0], [2.0]])
       l = list_ops.tensor_list_from_tensor(t, element_shape=[1])
     with ops.device("/job:ps"):
       l_ps = array_ops.identity(l)
       l_ps, e = list_ops.tensor_list_pop_back(
           l_ps, element_dtype=dtypes.float32)
     with ops.device("/job:worker"):
       worker_e = array_ops.identity(e)
     self.assertAllEqual(self.evaluate(worker_e), [2.0])
开发者ID:aeverall,项目名称:tensorflow,代码行数:13,代码来源:list_ops_test.py



注:本文中的tensorflow.python.ops.list_ops.tensor_list_pop_back函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python list_ops.tensor_list_push_back函数代码示例发布时间:2022-05-27
下一篇:
Python list_ops.tensor_list_length函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap