• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python gen_array_ops._concat_offset函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.gen_array_ops._concat_offset函数的典型用法代码示例。如果您正苦于以下问题:Python _concat_offset函数的具体用法?Python _concat_offset怎么用?Python _concat_offset使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了_concat_offset函数的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: testNegativeDim

  def testNegativeDim(self):
    with self.test_session(use_gpu=True) as sess:
      cdim = constant_op.constant(-2, dtypes.int32)
      s0 = constant_op.constant([2, 3, 5], dtypes.int32)
      s1 = constant_op.constant([2, 7, 5], dtypes.int32)
      s2 = constant_op.constant([2, 20, 5], dtypes.int32)
      off = gen_array_ops._concat_offset(cdim, [s0, s1, s2])
      ans = sess.run(off)
      self.assertAllEqual(ans, [[0, 0, 0], [0, 3, 0], [0, 10, 0]])

      cdim = constant_op.constant(-3, dtypes.int32)
      s0 = constant_op.constant([2, 3, 5], dtypes.int32)
      s1 = constant_op.constant([1, 3, 5], dtypes.int32)
      s2 = constant_op.constant([3, 3, 5], dtypes.int32)
      off = gen_array_ops._concat_offset(cdim, [s0, s1, s2])
      ans = sess.run(off)
      self.assertAllEqual(ans, [[0, 0, 0], [2, 0, 0], [3, 0, 0]])
开发者ID:1000sprites,项目名称:tensorflow,代码行数:17,代码来源:concat_op_test.py


示例2: testDimMismatch

 def testDimMismatch(self):
   with self.test_session() as sess:
     cdim = tf.constant(1, tf.int32)
     s0 = tf.constant([2, 3, 5], tf.int32)
     s1 = tf.constant([2, 7, 5, 10], tf.int32)
     off = gen_array_ops._concat_offset(cdim, [s0, s1])
     with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
                                  r"should contain 3 elem"):
       sess.run(off)
开发者ID:BloodD,项目名称:tensorflow,代码行数:9,代码来源:concat_op_test.py


示例3: testConcatDimOutOfRange

 def testConcatDimOutOfRange(self):
   with self.test_session() as sess:
     cdim = tf.constant(4, tf.int32)
     s0 = tf.constant([2, 3, 5], tf.int32)
     s1 = tf.constant([2, 7, 5], tf.int32)
     off = gen_array_ops._concat_offset(cdim, [s0, s1])
     with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
                                  r"Concat dim is out of range: 4 vs. 3"):
       sess.run(off)
开发者ID:BloodD,项目名称:tensorflow,代码行数:9,代码来源:concat_op_test.py


示例4: testNotVector

 def testNotVector(self):
   with self.test_session() as sess:
     cdim = tf.constant(1, tf.int32)
     s0 = tf.constant([[2, 3, 5]], tf.int32)
     s1 = tf.constant([[2, 7, 5]], tf.int32)
     off = gen_array_ops._concat_offset(cdim, [s0, s1])
     with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
                                  r"should be a vector"):
       sess.run(off)
开发者ID:BloodD,项目名称:tensorflow,代码行数:9,代码来源:concat_op_test.py


示例5: testBasic

 def testBasic(self):
   for use_gpu in [False, True]:
     with self.test_session(use_gpu=use_gpu) as sess:
       cdim = tf.constant(1, tf.int32)
       s0 = tf.constant([2, 3, 5], tf.int32)
       s1 = tf.constant([2, 7, 5], tf.int32)
       s2 = tf.constant([2, 20, 5], tf.int32)
       off = gen_array_ops._concat_offset(cdim, [s0, s1, s2])
       ans = sess.run(off)
       self.assertAllEqual(ans, [[0, 0, 0], [0, 3, 0], [0, 10, 0]])
开发者ID:BloodD,项目名称:tensorflow,代码行数:10,代码来源:concat_op_test.py


示例6: testBasic

 def testBasic(self):
   with self.test_session() as sess:
     with self.test_scope():
       cdim = constant_op.constant(1, dtypes.int32)
       s0 = constant_op.constant([2, 3, 5], dtypes.int32)
       s1 = constant_op.constant([2, 7, 5], dtypes.int32)
       s2 = constant_op.constant([2, 20, 5], dtypes.int32)
       off = gen_array_ops._concat_offset(cdim, [s0, s1, s2])
       ans = sess.run(off)
       self.assertAllEqual(ans, [[0, 0, 0], [0, 3, 0], [0, 10, 0]])
开发者ID:1000sprites,项目名称:tensorflow,代码行数:10,代码来源:concat_ops_test.py


示例7: testSizeMismatch

 def testSizeMismatch(self):
   with self.test_session() as sess:
     cdim = tf.constant(1, tf.int32)
     s0 = tf.constant([2, 3, 5], tf.int32)
     s1 = tf.constant([2, 7, 10], tf.int32)
     off = gen_array_ops._concat_offset(cdim, [s0, s1])
     with self.assertRaisesRegexp(
         tf.errors.InvalidArgumentError,
         r"All dimensions except 1 must match. Input 1 has shape \[2 7 10\] "
         r"and doesn't match input 0 with shape \[2 3 5\]."):
       sess.run(off)
开发者ID:BloodD,项目名称:tensorflow,代码行数:11,代码来源:concat_op_test.py


示例8: _ConcatGrad

def _ConcatGrad(op, grad):
  """Gradient for concat op."""

  def _CreateDenseMaskAndBegin(sizes, concat_dim):
    """Create variables for iteratively slicing a dense gradients tensor."""
    # Since shape is 1-D, shape_of_shape = [rank-of-inputs]
    shape_of_shape = array_ops.shape(sizes[0])
    # Make a vector of length equal to the input's dimensions,
    # with 0's everywhere and 1 in the concat dim position.
    # Note: Can't use sparse_to_dense since it isn't GPU-capable (for now)
    mask = array_ops.concat(0,
                            [array_ops.fill(
                                array_ops.expand_dims(concat_dim, 0), 0),
                             [1],
                             array_ops.fill(
                                 shape_of_shape - concat_dim - 1, 0)])
    begin = array_ops.fill(shape_of_shape, 0)
    return mask, begin

  # Degenerate concatenation, just return grad.
  if len(op.inputs) == 2:
    return [None, grad]

  concat_dim = op.inputs[0]
  out_grads = []
  if isinstance(grad, ops.Tensor):
    # Get the inputs' tensor shapes
    sizes = array_ops.shape_n(op.inputs[1:])
    # pylint: disable=protected-access
    offset = gen_array_ops._concat_offset(concat_dim, sizes)
    # pylint: enable=protected-access
    for (begin, size) in zip(offset, sizes):
      out_grads.append(array_ops.slice(grad, begin, size))
  elif isinstance(grad, ops.IndexedSlices):
    concat_dim_static = tensor_util.constant_value(concat_dim)
    if concat_dim_static is None:
      raise ValueError("Can only compute IndexedSlices gradient with "
                       "statically-known concat_dim")
    # Get the inputs' tensor shapes
    sizes = [array_ops.shape(x) for x in op.inputs[1:]]
    if concat_dim_static > 0:
      # IndexedSlices, concat_dim > 0. Each input gets IndexedSlices gradients
      # with all the indices, but with grad.values sliced accordingly. This
      # is like the Tensor case, except shape(grad.values)[0] is not equal to
      # shape(sizes[i])[0], since only a subset of the dim-0 values are stored.
      mask, begin = _CreateDenseMaskAndBegin(sizes, concat_dim)
      for size in sizes:
        new_values = array_ops.slice(
            grad.values,
            begin,
            array_ops.concat(0, [[-1], array_ops.slice(size, [1], [-1])]))
        out_grads.append(
            ops.IndexedSlices(new_values, grad.indices, size))
        # Lint complains begin = begin + ...
        begin = math_ops.add(begin, size * mask)
    else:
      # IndexedSlices, concat_dim == 0. Each input gets IndexedSlices gradients
      # only for the relevant indices.
      start = constant_op.constant(0, dtype=grad.indices.dtype)
      for size in sizes:
        size_concat_dim = array_ops.gather(size, concat_dim)
        if size_concat_dim.dtype != grad.indices.dtype:
          size_concat_dim = math_ops.cast(size_concat_dim,
                                          dtype=grad.indices.dtype)
        end = start + size_concat_dim
        # Compute the 1-D Tensor of indices relevant for this input.
        indices_to_select = array_ops.squeeze(
            array_ops.where(math_ops.logical_and(grad.indices >= start,
                                                 grad.indices < end)),
            squeeze_dims=[1])
        new_indices = array_ops.gather(grad.indices, indices_to_select) - start
        new_values = array_ops.gather(grad.values, indices_to_select)
        out_grads.append(
            ops.IndexedSlices(new_values, new_indices, size))
        start = end
  else:
    raise TypeError("Expected Tensor or IndexedSlices, got %s" % type(grad))

  return [None] + out_grads
开发者ID:0ruben,项目名称:tensorflow,代码行数:79,代码来源:array_grad.py


示例9: _ConcatGradHelper

def _ConcatGradHelper(op, grad, start_value_index, end_value_index, dim_index):
  """Gradient for concat op.

  Args:
    op: An operation.
    grad: `Tensor` or `IndexedSlices` representing the gradients with respect
      to each output of the op.
    start_value_index: An integer index of the first value in the op.inputs.
    end_value_index: An integer index of the last value in the op.inputs.
    dim_index: An interger index of concat_dim or axis parameter in op.inputs.

  Returns:
    Tensors represending the partial gradients with respect to each input
    of the op.

  Raises:
    ValueError: if concat_dim/axis is not statically known.
  """

  def _CreateDenseMaskAndBegin(sizes, concat_dim):
    """Create variables for iteratively slicing a dense gradients tensor."""
    # Since shape is 1-D, shape_of_shape = [rank-of-inputs]
    shape_of_shape = array_ops.shape(sizes[0])
    # Make a vector of length equal to the input's dimensions,
    # with 0's everywhere and 1 in the concat dim position.
    # Note: Can't use sparse_to_dense since it isn't GPU-capable (for now)
    mask = array_ops.concat([
        array_ops.fill(array_ops.expand_dims(concat_dim, 0), 0), [1],
        array_ops.fill(shape_of_shape - concat_dim - 1, 0)
    ], 0)
    begin = array_ops.fill(shape_of_shape, 0)
    return mask, begin

  def _ExtractInputShapes(inputs):
    """Extract the shapes of a set of input tensors."""
    sizes = []
    fully_known = True
    for x in inputs:
      input_shape = array_ops.shape(x)
      if not isinstance(input_shape,
                        ops.Tensor) or input_shape.op.type != "Const":
        fully_known = False
        break
      else:
        sizes.append(input_shape)

    if fully_known:
      return sizes
    else:
      return array_ops.shape_n(inputs)

  # Degenerate concatenation, just return grad.
  if len(op.inputs) == 2:
    return grad + [None] if end_value_index <= dim_index else [None] + grad

  concat_dim = op.inputs[dim_index]
  input_values = op.inputs[start_value_index:end_value_index]
  # Using mod here for convenience since concat_dim is already verified
  # in concat implementation to be within the allowed [-rank, rank) range.
  non_neg_concat_dim = concat_dim % array_ops.rank(input_values[0])

  out_grads = []
  if isinstance(grad, ops.Tensor):
    # Get the inputs' tensor shapes
    sizes = _ExtractInputShapes(input_values)
    # The magic number of 16 was found through benchmarking a range of sizes
    # on CPUs and a Maxwell TitanX.  A speedup was seen in a large majority of
    # cases when switching implementations at N=16, but it is possible that
    # there will be a small number of performance regressions.
    # pylint: disable=protected-access
    if len(sizes) > 16:
      # extract the size of each input along the concat dimension
      sizes = array_ops.squeeze(
          array_ops.slice(
              array_ops.stack(
                  sizes, axis=1), [non_neg_concat_dim, 0], [1, -1]))
      out_grads = array_ops.split(grad, sizes, non_neg_concat_dim)
    else:
      offset = gen_array_ops._concat_offset(non_neg_concat_dim, sizes)
      for (begin, size) in zip(offset, sizes):
        out_grads.append(array_ops.slice(grad, begin, size))
    # pylint: enable=protected-access
  elif isinstance(grad, ops.IndexedSlices):
    concat_dim_static = tensor_util.constant_value(concat_dim)
    if concat_dim_static is None:
      raise ValueError("Can only compute IndexedSlices gradient with "
                       "statically-known concat_dim")
    if concat_dim_static < 0:
      rank = tensor_util.constant_value(array_ops.rank(input_values[0]))
      if rank is None:
        raise ValueError("Can only compute IndexedSlices gradient with "
                         "negative concat_dim when first value rank is "
                         "statically-known.")
      concat_dim_static %= rank
    # Get the inputs' tensor shapes
    sizes = [array_ops.shape(x) for x in input_values]
    if concat_dim_static > 0:
      # IndexedSlices, non_neg_concat_dim > 0. Each input gets IndexedSlices
      # gradients with all the indices, but with grad.values sliced accordingly.
      # This is like the Tensor case, except shape(grad.values)[0] is not equal
#.........这里部分代码省略.........
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:101,代码来源:array_grad.py


示例10: _ConcatGradHelper

def _ConcatGradHelper(op, grad, start_value_index, end_value_index, dim_index):
  """Gradient for concat op.

  Args:
    op: An operation.
    grad: `Tensor` or `IndexedSlices` representing the gradients with respect
      to each output of the op.
    start_value_index: An integer index of the first value in the op.inputs.
    end_value_index: An integer index of the last value in the op.inputs.
    dim_index: An interger index of concat_dim or axis parameter in op.inputs.

  Returns:
    Tensors represending the partial gradients with respect to each input
    of the op.

  Raises:
    ValueError: if concat_dim/axis is not statically known.
  """

  def _CreateDenseMaskAndBegin(sizes, concat_dim):
    """Create variables for iteratively slicing a dense gradients tensor."""
    # Since shape is 1-D, shape_of_shape = [rank-of-inputs]
    shape_of_shape = array_ops.shape(sizes[dim_index])
    # Make a vector of length equal to the input's dimensions,
    # with 0's everywhere and 1 in the concat dim position.
    # Note: Can't use sparse_to_dense since it isn't GPU-capable (for now)
    mask = array_ops.concat_v2(
        [array_ops.fill(
            array_ops.expand_dims(concat_dim, 0), 0),
         [1],
         array_ops.fill(
             shape_of_shape - concat_dim - 1, 0)],
        0)
    begin = array_ops.fill(shape_of_shape, 0)
    return mask, begin

  def _ExtractInputShapes(inputs):
    """Extract the shapes of a set of input tensors."""
    sizes = []
    fully_known = True
    for x in inputs:
      input_shape = array_ops.shape(x)
      if not isinstance(input_shape,
                        ops.Tensor) or input_shape.op.type != "Const":
        fully_known = False
        break
      else:
        sizes.append(input_shape)

    if fully_known:
      return sizes
    else:
      return array_ops.shape_n(inputs)

  # Degenerate concatenation, just return grad.
  if len(op.inputs) == 2:
    return grad + [None] if end_value_index <= dim_index else [None] + grad

  concat_dim = op.inputs[dim_index]
  input_values = op.inputs[start_value_index:end_value_index]
  out_grads = []
  if isinstance(grad, ops.Tensor):
    # Get the inputs' tensor shapes
    sizes = _ExtractInputShapes(input_values)
    # The following line to be enabled once ready
    # if len(sizes) > 16:
    # sizes = array_ops.squeeze(array_ops.slice(
    # array_ops.pack(sizes, axis=1), [concat_dim, 0], [1, -1]))
    # out_grads = array_ops.split_v(grad, sizes, concat_dim)
    # else:
    # pylint: disable=protected-access
    offset = gen_array_ops._concat_offset(concat_dim, sizes)
    # pylint: enable=protected-access
    for (begin, size) in zip(offset, sizes):
      out_grads.append(array_ops.slice(grad, begin, size))
  elif isinstance(grad, ops.IndexedSlices):
    concat_dim_static = tensor_util.constant_value(concat_dim)
    if concat_dim_static is None:
      raise ValueError("Can only compute IndexedSlices gradient with "
                       "statically-known concat_dim")
    # Get the inputs' tensor shapes
    sizes = [array_ops.shape(x) for x in input_values]
    if concat_dim_static > 0:
      # IndexedSlices, concat_dim > 0. Each input gets IndexedSlices gradients
      # with all the indices, but with grad.values sliced accordingly. This
      # is like the Tensor case, except shape(grad.values)[0] is not equal to
      # shape(sizes[i])[0], since only a subset of the dim-0 values are stored.
      mask, begin = _CreateDenseMaskAndBegin(sizes, concat_dim)
      for size in sizes:
        new_values = array_ops.slice(
            grad.values,
            begin,
            array_ops.concat_v2(
                [[-1], array_ops.slice(size, [1], [-1])], 0))
        out_grads.append(
            ops.IndexedSlices(new_values, grad.indices, size))
        # Lint complains begin = begin + ...
        begin = math_ops.add(begin, size * mask)
    else:
      # IndexedSlices, concat_dim == 0. Each input gets IndexedSlices gradients
#.........这里部分代码省略.........
开发者ID:Hwhitetooth,项目名称:tensorflow,代码行数:101,代码来源:array_grad.py



注:本文中的tensorflow.python.ops.gen_array_ops._concat_offset函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap