• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python array_ops.reverse_sequence函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.array_ops.reverse_sequence函数的典型用法代码示例。如果您正苦于以下问题:Python reverse_sequence函数的具体用法?Python reverse_sequence怎么用?Python reverse_sequence使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了reverse_sequence函数的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _ReverseSequenceGrad

def _ReverseSequenceGrad(op, grad):
  seq_lengths = op.inputs[1]
  return [array_ops.reverse_sequence(grad,
                                     batch_dim=op.get_attr("batch_dim"),
                                     seq_dim=op.get_attr("seq_dim"),
                                     seq_lengths=seq_lengths),
          None]
开发者ID:0ruben,项目名称:tensorflow,代码行数:7,代码来源:array_grad.py


示例2: _reverse_seq

def _reverse_seq(input_seq, lengths):
    """Reverse a list of Tensors up to specified lengths.

  Args:
    input_seq: Sequence of seq_len tensors of dimension (batch_size, n_features)
    lengths:   A tensor of dimension batch_size, containing lengths for each
               sequence in the batch. If "None" is specified, simply reverses
               the list.

  Returns:
    time-reversed sequence
  """
    if lengths is None:
        return list(reversed(input_seq))

    input_shape = tensor_shape.unknown_shape(ndims=input_seq[0].get_shape().ndims)
    for input_ in input_seq:
        input_shape.merge_with(input_.get_shape())
        input_.set_shape(input_shape)

    # Join into (time, batch_size, depth)
    s_joined = array_ops.pack(input_seq)

    # TODO(schuster, ebrevdo): Remove cast when reverse_sequence takes int32
    if lengths is not None:
        lengths = math_ops.to_int64(lengths)

    # Reverse along dimension 0
    s_reversed = array_ops.reverse_sequence(s_joined, lengths, 0, 1)
    # Split again into list
    result = array_ops.unpack(s_reversed)
    for r in result:
        r.set_shape(input_shape)
    return result
开发者ID:chemelnucfin,项目名称:tensorflow,代码行数:34,代码来源:rnn.py


示例3: _reverse_seq

def _reverse_seq(input_seq, lengths):
  """Reverse a list of Tensors up to specified lengths.

  Args:
    input_seq: Sequence of seq_len tensors of dimension (batch_size, depth)
    lengths:   A tensor of dimension batch_size, containing lengths for each
               sequence in the batch. If "None" is specified, simply
               reverses the list.

  Returns:
    time-reversed sequence
  """
  if lengths is None:
    return list(reversed(input_seq))

  for input_ in input_seq:
    input_.set_shape(input_.get_shape().with_rank(2))

  # Join into (time, batch_size, depth)
  s_joined = array_ops_.pack(input_seq)

  # Reverse along dimension 0
  s_reversed = array_ops_.reverse_sequence(s_joined, lengths, 0, 1)
  # Split again into list
  result = array_ops_.unpack(s_reversed)
  return result
开发者ID:kdavis-mozilla,项目名称:tensorflow,代码行数:26,代码来源:models.py


示例4: _reverse

 def _reverse(input_, seq_lengths, seq_dim, batch_dim):
   if seq_lengths is not None:
     return array_ops.reverse_sequence(
         input=input_, seq_lengths=seq_lengths,
         seq_dim=seq_dim, batch_dim=batch_dim)
   else:
     return array_ops.reverse(input_, axis=[seq_dim])
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:7,代码来源:rnn.py


示例5: testFloatReverseSequenceGrad

  def testFloatReverseSequenceGrad(self):
    x = np.asarray(
        [[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12], [13, 14, 15, 16]],
         [[17, 18, 19, 20], [21, 22, 23, 24]]],
        dtype=np.float)
    x = x.reshape(3, 2, 4, 1, 1)
    x = x.transpose([2, 1, 0, 3, 4])  # transpose axes 0 <=> 2

    # reverse dim 0 up to (0:3, none, 0:4) along dim=2
    seq_axis = 0
    batch_axis = 2
    seq_lengths = np.asarray([3, 0, 4], dtype=np.int64)

    with self.cached_session():
      input_t = constant_op.constant(x, shape=x.shape)
      seq_lengths_t = constant_op.constant(seq_lengths, shape=seq_lengths.shape)
      reverse_sequence_out = array_ops.reverse_sequence(
          input_t,
          batch_axis=batch_axis,
          seq_axis=seq_axis,
          seq_lengths=seq_lengths_t)
      err = gradient_checker.compute_gradient_error(
          input_t, x.shape, reverse_sequence_out, x.shape, x_init_value=x)
    print("ReverseSequence gradient error = %g" % err)
    self.assertLess(err, 1e-8)
开发者ID:HughKu,项目名称:tensorflow,代码行数:25,代码来源:reverse_sequence_op_test.py


示例6: _reverse

 def _reverse(input_, seq_lengths, seq_axis, batch_axis):
   if seq_lengths is not None:
     return array_ops.reverse_sequence(
         input=input_,
         seq_lengths=seq_lengths,
         seq_axis=seq_axis,
         batch_axis=batch_axis)
   else:
     return array_ops.reverse(input_, axis=[seq_axis])
开发者ID:kylin9872,项目名称:tensorflow,代码行数:9,代码来源:rnn.py


示例7: _reverse

 def _reverse(input_, seq_lengths, seq_dim, batch_dim):
   if seq_lengths is not None:
     return array_ops.reverse_sequence(
         input=input_, seq_lengths=seq_lengths,
         seq_dim=seq_dim, batch_dim=batch_dim)
   else:
     # See b/69305369.
     assert not use_tpu, (
         'Bidirectional with variable sequence lengths unsupported on TPU')
     return array_ops.reverse(input_, axis=[seq_dim])
开发者ID:BhaskarNallani,项目名称:tensorflow,代码行数:10,代码来源:functional_rnn.py


示例8: testShapeFunctionEdgeCases

  def testShapeFunctionEdgeCases(self):
    t = array_ops.reverse_sequence(
        array_ops.placeholder(
            dtypes.float32, shape=None),
        seq_lengths=array_ops.placeholder(
            dtypes.int64, shape=(32,)),
        batch_axis=0,
        seq_axis=1)
    self.assertIs(t.get_shape().ndims, None)

    # Batch size mismatched between input and seq_lengths.
    with self.assertRaises(ValueError):
      array_ops.reverse_sequence(
          array_ops.placeholder(
              dtypes.float32, shape=(32, 2, 3)),
          seq_lengths=array_ops.placeholder(
              dtypes.int64, shape=(33,)),
          seq_axis=3)

    # seq_axis out of bounds.
    with self.assertRaisesRegexp(ValueError, "seq_dim must be < input rank"):
      array_ops.reverse_sequence(
          array_ops.placeholder(
              dtypes.float32, shape=(32, 2, 3)),
          seq_lengths=array_ops.placeholder(
              dtypes.int64, shape=(32,)),
          seq_axis=3)

    # batch_axis out of bounds.
    with self.assertRaisesRegexp(ValueError, "batch_dim must be < input rank"):
      array_ops.reverse_sequence(
          array_ops.placeholder(
              dtypes.float32, shape=(32, 2, 3)),
          seq_lengths=array_ops.placeholder(
              dtypes.int64, shape=(32,)),
          seq_axis=0,
          batch_axis=3)

    with self.cached_session():
      inputs = array_ops.placeholder(dtypes.float32, shape=(32, 2, 3))
      seq_lengths = array_ops.placeholder(dtypes.int64, shape=(32,))
      output = array_ops.reverse_sequence(
          inputs, seq_lengths=seq_lengths,
          seq_axis=0)  # batch_axis default is 0
      with self.assertRaisesOpError("batch_dim == seq_dim"):
        output.eval(feed_dict={
            inputs: np.random.rand(32, 2, 3),
            seq_lengths: xrange(32)
        })
开发者ID:HughKu,项目名称:tensorflow,代码行数:49,代码来源:reverse_sequence_op_test.py


示例9: _reverse_seq

def _reverse_seq(input_seq, lengths):
  """Reverse a list of Tensors up to specified lengths.

  Args:
    input_seq: Sequence of seq_len tensors of dimension (batch_size, n_features)
               or nested tuples of tensors.
    lengths:   A tensor of dimension batch_size, containing lengths for each
               sequence in the batch. If "None" is specified, simply reverses
               the list.

  Returns:
    time-reversed sequence
  """
  if lengths is None:
    return list(reversed(input_seq))

  input_is_tuple = nest.is_sequence(input_seq[0])
  flat_input_seq = (nest.flatten(input_) if input_is_tuple else [input_]
                    for input_ in input_seq)

  flat_results = [[] for _ in range(len(input_seq))]
  for sequence in zip(*flat_input_seq):
    input_shape = tensor_shape.unknown_shape(
        ndims=sequence[0].get_shape().ndims)
    for input_ in sequence:
      input_shape.merge_with(input_.get_shape())
      input_.set_shape(input_shape)

    # Join into (time, batch_size, depth)
    s_joined = array_ops.pack(sequence)

    # TODO(schuster, ebrevdo): Remove cast when reverse_sequence takes int32
    if lengths is not None:
      lengths = math_ops.to_int64(lengths)

    # Reverse along dimension 0
    s_reversed = array_ops.reverse_sequence(s_joined, lengths, 0, 1)
    # Split again into list
    result = array_ops.unpack(s_reversed)
    for r, flat_result in zip(result, flat_results):
      r.set_shape(input_shape)
      flat_result.append(r)

  results = [nest.pack_sequence_as(structure=input_, flat_sequence=flat_result)
             if input_is_tuple else flat_result[0]
             for input_, flat_result in zip(input_seq, flat_results)]
  return results
开发者ID:AntHar,项目名称:tensorflow,代码行数:47,代码来源:rnn.py


示例10: _testReverseSequence

 def _testReverseSequence(self,
                          x,
                          batch_axis,
                          seq_axis,
                          seq_lengths,
                          truth,
                          use_gpu=False,
                          expected_err_re=None):
   with self.test_session(use_gpu=use_gpu):
     ans = array_ops.reverse_sequence(
         x, batch_axis=batch_axis, seq_axis=seq_axis, seq_lengths=seq_lengths)
     if expected_err_re is None:
       tf_ans = ans.eval()
       self.assertAllClose(tf_ans, truth, atol=1e-10)
       self.assertShapeEqual(truth, ans)
     else:
       with self.assertRaisesOpError(expected_err_re):
         ans.eval()
开发者ID:HughKu,项目名称:tensorflow,代码行数:18,代码来源:reverse_sequence_op_test.py


示例11: _reverse

  def _reverse(self, t, lengths):
    """Time reverse the provided tensor or list of tensors.

    Assumes the top dimension is the time dimension.

    Args:
      t: 3D tensor or list of 2D tensors to be reversed
      lengths: 1D tensor of lengths, or None

    Returns:
      A reversed tensor or list of tensors
    """
    if isinstance(t, list):
      return list(reversed(t))
    else:
      if lengths is None:
        return array_ops.reverse(t, [True, False, False])
      else:
        return array_ops.reverse_sequence(t, lengths, 0, 1)
开发者ID:MostafaGazar,项目名称:tensorflow,代码行数:19,代码来源:fused_rnn_cell.py


示例12: _testReverseSequence

 def _testReverseSequence(self,
                          x,
                          batch_axis,
                          seq_axis,
                          seq_lengths,
                          truth,
                          expected_err_re=None):
   with self.cached_session():
     p = array_ops.placeholder(dtypes.as_dtype(x.dtype))
     lengths = array_ops.placeholder(dtypes.as_dtype(seq_lengths.dtype))
     with self.test_scope():
       ans = array_ops.reverse_sequence(
           p, batch_axis=batch_axis, seq_axis=seq_axis, seq_lengths=lengths)
     if expected_err_re is None:
       tf_ans = ans.eval(feed_dict={p: x, lengths: seq_lengths})
       self.assertAllClose(tf_ans, truth, atol=1e-10)
     else:
       with self.assertRaisesOpError(expected_err_re):
         ans.eval(feed_dict={p: x, lengths: seq_lengths})
开发者ID:AnishShah,项目名称:tensorflow,代码行数:19,代码来源:reverse_sequence_op_test.py


示例13: bidirectional_dynamic_rnn


#.........这里部分代码省略.........
        `[max_time, batch_size, input_size]`.
      [batch_size, input_size].
    sequence_length: An int32/int64 vector, size `[batch_size]`,
      containing the actual lengths for each of the sequences.
    initial_state_fw: (optional) An initial state for the forward RNN.
      This must be a tensor of appropriate type and shape
      `[batch_size, cell_fw.state_size]`.
      If `cell_fw.state_size` is a tuple, this should be a tuple of
      tensors having shapes `[batch_size, s] for s in cell_fw.state_size`.
    initial_state_bw: (optional) Same as for `initial_state_fw`, but using
      the corresponding properties of `cell_bw`.
    dtype: (optional) The data type for the initial states and expected output.
      Required if initial_states are not provided or RNN states have a
      heterogeneous dtype.
    parallel_iterations: (Default: 32).  The number of iterations to run in
      parallel.  Those operations which do not have any temporal dependency
      and can be run in parallel, will be.  This parameter trades off
      time for space.  Values >> 1 use more memory but take less time,
      while smaller values use less memory but computations take longer.
    swap_memory: Transparently swap the tensors produced in forward inference
      but needed for back prop from GPU to CPU.  This allows training RNNs
      which would typically not fit on a single GPU, with very minimal (or no)
      performance penalty.
    time_major: The shape format of the `inputs` and `outputs` Tensors.
      If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.
      If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.
      Using `time_major = True` is a bit more efficient because it avoids
      transposes at the beginning and end of the RNN calculation.  However,
      most TensorFlow data is batch-major, so by default this function
      accepts input and emits output in batch-major form.
    dtype: (optional) The data type for the initial state.  Required if
      either of the initial states are not provided.
    scope: VariableScope for the created subgraph; defaults to
      "bidirectional_rnn"

  Returns:
    A tuple (outputs, output_states) where:
      outputs: A tuple (output_fw, output_bw) containing the forward and
        the backward rnn output `Tensor`.
        If time_major == False (default),
          output_fw will be a `Tensor` shaped:
          `[batch_size, max_time, cell_fw.output_size]`
          and output_bw will be a `Tensor` shaped:
          `[batch_size, max_time, cell_bw.output_size]`.
        If time_major == True,
          output_fw will be a `Tensor` shaped:
          `[max_time, batch_size, cell_fw.output_size]`
          and output_bw will be a `Tensor` shaped:
          `[max_time, batch_size, cell_bw.output_size]`.
        It returns a tuple instead of a single concatenated `Tensor`, unlike
        in the `bidirectional_rnn`. If the concatenated one is preferred,
        the forward and backward outputs can be concatenated as
        `tf.concat_v2(outputs, 2)`.
      output_states: A tuple (output_state_fw, output_state_bw) containing
        the forward and the backward final states of bidirectional rnn.

  Raises:
    TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`.
  """

  if not isinstance(cell_fw, rnn_cell.RNNCell):
    raise TypeError("cell_fw must be an instance of RNNCell")
  if not isinstance(cell_bw, rnn_cell.RNNCell):
    raise TypeError("cell_bw must be an instance of RNNCell")

  with vs.variable_scope(scope or "bidirectional_rnn"):
    # Forward direction
    with vs.variable_scope("fw") as fw_scope:
      output_fw, output_state_fw = dynamic_rnn(
          cell=cell_fw, inputs=inputs, sequence_length=sequence_length,
          initial_state=initial_state_fw, dtype=dtype,
          parallel_iterations=parallel_iterations, swap_memory=swap_memory,
          time_major=time_major, scope=fw_scope)

    # Backward direction
    if not time_major:
      time_dim = 1
      batch_dim = 0
    else:
      time_dim = 0
      batch_dim = 1

    with vs.variable_scope("bw") as bw_scope:
      inputs_reverse = array_ops.reverse_sequence(
          input=inputs, seq_lengths=sequence_length,
          seq_dim=time_dim, batch_dim=batch_dim)
      tmp, output_state_bw = dynamic_rnn(
          cell=cell_bw, inputs=inputs_reverse, sequence_length=sequence_length,
          initial_state=initial_state_bw, dtype=dtype,
          parallel_iterations=parallel_iterations, swap_memory=swap_memory,
          time_major=time_major, scope=bw_scope)

  output_bw = array_ops.reverse_sequence(
      input=tmp, seq_lengths=sequence_length,
      seq_dim=time_dim, batch_dim=batch_dim)

  outputs = (output_fw, output_bw)
  output_states = (output_state_fw, output_state_bw)

  return (outputs, output_states)
开发者ID:tensorflow,项目名称:tensorflow,代码行数:101,代码来源:rnn.py


示例14: bidirectional_dynamic_rnn

def bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, sequence_length=None,
                              initial_state_fw=None, initial_state_bw=None,
                              dtype=None, parallel_iterations=None,
                              swap_memory=False, time_major=False, scope=None):
    """
    Creates a dynamic version of bidirectional recurrent neural network.
    The initial state for both directions is zero by default.
    :param cell_fw: An instance of RNNCell, to be used for forward direction
    :param cell_bw: An instance of RNNCell, to be used for backward direction
    :param inputs: The RNN inputs
    :param sequence_length: An int32/int64 vector
    :param initial_state_fw: An initial state for the forward RNN
    :param initial_state_bw: An initial state for the backward RNN
    :param dtype: The data type for the initial states and expected output
    :param parallel_iterations: The number of iterations in parallel
    :param swap_memory:
    :param time_major:
    :param scope:
    :return: A tuple (outputs, output_states)
    """
    if not isinstance(cell_fw, rnn_cell.RNNCell):
        raise TypeError("cell_fw must be an instance of RNNCell")
    if not isinstance(cell_bw, rnn_cell.RNNCell):
        raise TypeError("cell_bw must be an instance of RNNCell")

    with vs.variable_scope(scope or "bidirectional_rnn"):
        # Forward direction
        with vs.variable_scope("fw") as fw_scope:
            output_fw, output_state_fw = dynamic_rnn(
                cell=cell_fw, inputs=inputs, sequence_length=sequence_length,
                initial_state=initial_state_fw, dtype=dtype,
                parallel_iterations=parallel_iterations,
                swap_memory=swap_memory, time_major=time_major, scope=fw_scope)

        # Backward direction
        if not time_major:
            time_dim = 1
            batch_dim = 0
        else:
            time_dim = 0
            batch_dim = 1

        with vs.variable_scope("bw") as bw_scope:
            inputs_reverse = array_ops.reverse_sequence(
                input=inputs, seq_lengths=sequence_length,
                seq_dim=time_dim, batch_dim=batch_dim)
            tmp_output_bw, tmp_output_state_bw = dynamic_rnn(
                cell=cell_bw, inputs=inputs_reverse,
                sequence_length=sequence_length,
                initial_state=initial_state_bw, dtype=dtype,
                parallel_iterations=parallel_iterations,
                swap_memory=swap_memory, time_major=time_major,
                scope=bw_scope)

    output_bw = array_ops.reverse_sequence(
        input=tmp_output_bw, seq_lengths=sequence_length,
        seq_dim=time_dim, batch_dim=batch_dim)

    if Config.cell_type == "LSTMCell":
        tmp_output_state_bw_ = tmp_output_state_bw.c
    else:
        tmp_output_state_bw_ = tmp_output_state_bw

    output_state_bw = array_ops.reverse_sequence(
        input=tmp_output_state_bw_, seq_lengths=sequence_length,
        seq_dim=time_dim, batch_dim=batch_dim)

    outputs = (output_fw, output_bw)

    if Config.cell_type == "LSTMCell":
        output_states = (output_state_fw.c, output_state_bw)
    else:
        output_states = (output_state_fw, output_state_bw)

    return (outputs, output_states)
开发者ID:minhitbk,项目名称:data-science,代码行数:75,代码来源:mod_rnn.py



注:本文中的tensorflow.python.ops.array_ops.reverse_sequence函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python array_ops.reverse_v2函数代码示例发布时间:2022-05-27
下一篇:
Python array_ops.reverse函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap