• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python rnn.dynamic_rnn函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.rnn.dynamic_rnn函数的典型用法代码示例。如果您正苦于以下问题:Python dynamic_rnn函数的具体用法?Python dynamic_rnn怎么用?Python dynamic_rnn使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了dynamic_rnn函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _composition_function

 def _composition_function(self, inputs, length, init_state=None):
     if self._composition == "GRU":
         cell = GRUCell(self._size)
         return dynamic_rnn(cell, inputs, sequence_length=length, time_major=True,
                            initial_state=init_state, dtype=tf.float32)[0]
     elif self._composition == "LSTM":
         cell = BasicLSTMCell(self._size)
         init_state = tf.concat(1, [tf.zeros_like(init_state, tf.float32), init_state]) if init_state else None
         outs = dynamic_rnn(cell, inputs, sequence_length=length, time_major=True,
                            initial_state=init_state, dtype=tf.float32)[0]
         return outs
     elif self._composition == "BiGRU":
         cell = GRUCell(self._size // 2, self._size)
         init_state_fw, init_state_bw = tf.split(1, 2, init_state) if init_state else (None, None)
         with tf.variable_scope("forward"):
             fw_outs = dynamic_rnn(cell, inputs, sequence_length=length, time_major=True,
                                   initial_state=init_state_fw, dtype=tf.float32)[0]
         with tf.variable_scope("backward"):
             rev_inputs = tf.reverse_sequence(tf.pack(inputs), length, 0, 1)
             rev_inputs = [tf.reshape(x, [-1, self._size]) for x in tf.split(0, len(inputs), rev_inputs)]
             bw_outs = dynamic_rnn(cell, rev_inputs, sequence_length=length, time_major=True,
                                   initial_state=init_state_bw, dtype=tf.float32)[0]
             bw_outs = tf.reverse_sequence(tf.pack(bw_outs), length, 0, 1)
             bw_outs = [tf.reshape(x, [-1, self._size]) for x in tf.split(0, len(inputs), bw_outs)]
         return [tf.concat(1, [fw_out, bw_out]) for fw_out, bw_out in zip(fw_outs, bw_outs)]
     else:
         raise NotImplementedError("Other compositions not implemented yet.")
开发者ID:MorLong,项目名称:qa_network,代码行数:27,代码来源:qa_network.py


示例2: setup_target_encoder

    def setup_target_encoder(self):
        """
        This sets up an encoder that works on
        target sentence and produce a single label in the end
        encoder has attentions

        Returns
        -------
        """
        if self.num_layers > 1:
            self.tgt_encoder_cell = rnn_cell.GRUCell(self.size, input_size=self.embedding[1])
        self.attn_cell = GRUCellAttn(self.size, self.encoder_output, scope="EncoderAttnCell")

        out = self.decoder_inputs

        with vs.variable_scope("TgtEncoder"):
            inp = self.decoder_inputs
            for i in xrange(self.num_layers - 1):
                with vs.variable_scope("TgtEncoderCell%d" % i) as scope:
                    out, state_output = rnn.dynamic_rnn(self.tgt_encoder_cell, self.dropout(inp), time_major=False,
                                                        dtype=dtypes.float32, sequence_length=self.tgt_steps,
                                                        scope=scope, initial_state=self.tgt_encoder_state_output[i])
                    inp = out
                    self.tgt_encoder_state_output.append(state_output)

            with vs.variable_scope("TgtEncoderAttnCell") as scope:
                out, state_output = rnn.dynamic_rnn(self.attn_cell, self.dropout(inp), time_major=False,
                                                    dtype=dtypes.float32, sequence_length=self.tgt_steps,
                                                    scope=scope, initial_state=self.tgt_encoder_state_output[i + 1])
                self.tgt_encoder_output = out
                self.tgt_encoder_state_output.append(state_output)
开发者ID:windweller,项目名称:Trident,代码行数:31,代码来源:story_model.py


示例3: testBatchSizeFromInput

 def testBatchSizeFromInput(self):
   cell = Plus1RNNCell()
   # With static batch size
   inputs = array_ops.placeholder(dtypes.float32, shape=(3, 4, 5))
   # - Without initial_state
   outputs, state = rnn.dynamic_rnn(cell, inputs, dtype=dtypes.float32)
   self.assertEqual(3, outputs.shape[0].value)
   self.assertEqual(3, state.shape[0].value)
   # - With initial_state
   outputs, state = rnn.dynamic_rnn(
       cell,
       inputs,
       initial_state=array_ops.placeholder(dtypes.float32, shape=(3, 5)))
   self.assertEqual(3, outputs.shape[0].value)
   self.assertEqual(3, state.shape[0].value)
   # Without static batch size
   inputs = array_ops.placeholder(dtypes.float32, shape=(None, 4, 5))
   # - Without initial_state
   outputs, state = rnn.dynamic_rnn(cell, inputs, dtype=dtypes.float32)
   self.assertEqual(None, outputs.shape[0].value)
   self.assertEqual(None, state.shape[0].value)
   # - With initial_state
   outputs, state = rnn.dynamic_rnn(
       cell,
       inputs,
       initial_state=array_ops.placeholder(dtypes.float32, shape=(None, 5)))
   self.assertEqual(None, outputs.shape[0].value)
   self.assertEqual(None, state.shape[0].value)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:28,代码来源:rnn_test.py


示例4: setup_decoder

    def setup_decoder(self):
        """
        This sets up a decoder

        but we may need a double-encoder

        Returns
        -------
        """
        if self.num_layers > 1:
            self.decoder_cell = rnn_cell.GRUCell(self.size, input_size=self.embedding[1])
        self.attn_cell = GRUCellAttn(self.size, self.encoder_output, scope="DecoderAttnCell")

        out = self.decoder_inputs

        with vs.variable_scope("Decoder"):
            inp = self.decoder_inputs
            for i in xrange(self.num_layers - 1):
                with vs.variable_scope("DecoderCell%d" % i) as scope:
                    out, state_output = rnn.dynamic_rnn(self.decoder_cell, self.dropout(inp), time_major=False,
                                                        dtype=dtypes.float32, sequence_length=self.tgt_steps,
                                                        scope=scope, initial_state=self.decoder_state_input[i])
                    inp = out
                    self.decoder_state_output.append(state_output)

            with vs.variable_scope("DecoderAttnCell") as scope:
                out, state_output = rnn.dynamic_rnn(self.attn_cell, self.dropout(inp), time_major=False,
                                                    dtype=dtypes.float32, sequence_length=self.tgt_steps,
                                                    scope=scope, initial_state=self.decoder_state_input[i + 1])
                self.decoder_output = out
                self.decoder_state_output.append(state_output)
开发者ID:windweller,项目名称:Trident,代码行数:31,代码来源:story_model.py


示例5: sentence_embedding_rnn

def sentence_embedding_rnn(_encoder_inputs, vocab_size, cell, 
	embedding_size, mask=None, dtype=dtypes.float32, scope=None, reuse_scop=None):
	"""
	
	"""
	with variable_scope.variable_scope("embedding_rnn", reuse=reuse_scop):
		# encoder_cell = rnn_cell.EmbeddingWrapper(
		# 		cell, embedding_classes=vocab_size,
		# 		embedding_size=embedding_size)
		# Divde encoder_inputs by given input_mask
		if mask != None:
			encoder_inputs = [[] for _ in mask]
			_mask = 0
			for num in range(len(_encoder_inputs)):
				encoder_inputs[_mask].append(_encoder_inputs[num])
				if num == mask[_mask]:
					_mask += 1
		else:
			encoder_inputs = []
			encoder_inputs.append(_encoder_inputs)
		encoder_state = None	 
		encoder_states = []
		for encoder_input in encoder_inputs:
			if encoder_state == []:
				_, encoder_state = rnn.dynamic_rnn(encoder_cell, encoder_input, dtype=dtype)
			else:
				_, encoder_state = rnn.dynamic_rnn(encoder_cell, encoder_input, encoder_state, dtype=dtype)
			encoder_states.append(encoder_state)
		return encoder_states
开发者ID:sufengniu,项目名称:DMN-tensorflow,代码行数:29,代码来源:seq2seq.py


示例6: testBlockGRUToGRUCellMultiStep

  def testBlockGRUToGRUCellMultiStep(self):
    with self.session(use_gpu=True, graph=ops.Graph()) as sess:
      batch_size = 2
      cell_size = 3
      input_size = 3
      time_steps = 4

      # Random initializers.
      seed = 1994
      initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=seed)
      np.random.seed(seed)

      # Inputs
      concat_x = array_ops.placeholder(
          dtypes.float32, shape=(time_steps, batch_size, input_size))
      h = array_ops.zeros([batch_size, cell_size])

      # Values for the inputs.
      x_values = np.random.rand(time_steps, batch_size, input_size)
      h_value = np.random.rand(batch_size, cell_size)

      # Output from the block GRU cell implementation.
      with vs.variable_scope("block", initializer=initializer):
        cell = gru_ops.GRUBlockCell(cell_size)
        outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
            cell,
            inputs=concat_x,
            initial_state=h,
            time_major=True,
            dtype=dtypes.float32)
        feeds = {concat_x: x_values, h: h_value}
        sess.run([variables.global_variables_initializer()])
        block_res = sess.run([outputs_dynamic, state_dynamic], feeds)

      # Output from the basic GRU cell implementation.
      with vs.variable_scope("basic", initializer=initializer):
        cell = rnn_cell.GRUCell(cell_size)
        outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
            cell,
            inputs=concat_x,
            initial_state=h,
            time_major=True,
            dtype=dtypes.float32)
        feeds = {concat_x: x_values, h: h_value}
        sess.run([variables.global_variables_initializer()])
        basic_res = sess.run([outputs_dynamic, state_dynamic], feeds)

      # Check the lengths of the outputs_dynamic, and states.
      self.assertEqual(len(block_res), len(basic_res))
      self.assertEqual(len(block_res[0]), len(basic_res[0]))
      self.assertEqual(len(block_res[1]), len(basic_res[1]))

      # Check the outputs_dynamic values.
      for block_output, basic_output in zip(block_res[0], basic_res[0]):
        self.assertAllClose(block_output, basic_output)

      # Check the state_dynamic value.
      self.assertAllClose(block_res[1], block_res[1])
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:58,代码来源:gru_ops_test.py


示例7: testInvalidSequenceLengthShape

 def testInvalidSequenceLengthShape(self):
   cell = Plus1RNNCell()
   inputs = [array_ops.placeholder(dtypes.float32, shape=(3, 4))]
   with self.assertRaisesRegexp(ValueError, "must be a vector"):
     rnn.dynamic_rnn(
         cell,
         array_ops.stack(inputs),
         dtype=dtypes.float32,
         sequence_length=[[4]])
开发者ID:Immexxx,项目名称:tensorflow,代码行数:9,代码来源:rnn_test.py


示例8: inference_gru_block_vs_gru_cell

def inference_gru_block_vs_gru_cell(batch_size,
                                    cell_size,
                                    input_size,
                                    time_steps,
                                    use_gpu=False,
                                    iters=30):
  """Benchmark inference speed between GRUBlockCell vs GRUCell."""
  ops.reset_default_graph()
  with session.Session(graph=ops.Graph()) as sess:
    with benchmarking.device(use_gpu):

      # Random initializers.
      seed = 1994
      initializer = init_ops.random_uniform_initializer(-1, 1, seed=seed)
      np.random.seed(seed)

      # Inputs
      concat_x = vs.get_variable("concat_x",
                                 [time_steps, batch_size, input_size])
      h = vs.get_variable("h", [batch_size, cell_size])

      # Output from the basic GRU cell implementation.
      with vs.variable_scope("basic", initializer=initializer):
        cell = rnn_cell.GRUCell(cell_size)
        outputs_dynamic, _ = rnn.dynamic_rnn(
            cell,
            inputs=concat_x,
            initial_state=h,
            time_major=True,
            dtype=dtypes.float32)
        sess.run([variables.global_variables_initializer()])
        basic_time_inference = benchmarking.seconds_per_run(
            outputs_dynamic, sess, iters)

      # Output from the block GRU cell implementation.
      with vs.variable_scope("block", initializer=initializer):
        cell = gru_ops.GRUBlockCell(cell_size)
        outputs_dynamic, _ = rnn.dynamic_rnn(
            cell,
            inputs=concat_x,
            initial_state=h,
            time_major=True,
            dtype=dtypes.float32)
        sess.run([variables.global_variables_initializer()])
        block_time_inference = benchmarking.seconds_per_run(
            outputs_dynamic, sess, iters)

    performance_inference = (basic_time_inference - block_time_inference
                            ) * 100 / basic_time_inference
    print(",".join([
        str(batch_size), str(cell_size), str(input_size), str(time_steps), str(
            use_gpu), str(basic_time_inference), str(block_time_inference), str(
                performance_inference)
    ]))

    return basic_time_inference, block_time_inference
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:56,代码来源:gru_ops_test.py


示例9: crf_decode

def crf_decode(potentials, transition_params, sequence_length):
  """Decode the highest scoring sequence of tags in TensorFlow.

  This is a function for tensor.

  Args:
    potentials: A [batch_size, max_seq_len, num_tags] tensor of
              unary potentials.
    transition_params: A [num_tags, num_tags] matrix of
              binary potentials.
    sequence_length: A [batch_size] vector of true sequence lengths.

  Returns:
    decode_tags: A [batch_size, max_seq_len] matrix, with dtype `tf.int32`.
                Contains the highest scoring tag indicies.
    best_score: A [batch_size] vector, containing the score of `decode_tags`.
  """
  # For simplicity, in shape comments, denote:
  # 'batch_size' by 'B', 'max_seq_len' by 'T' , 'num_tags' by 'O' (output).
  num_tags = potentials.get_shape()[2].value

  # Computes forward decoding. Get last score and backpointers.
  crf_fwd_cell = CrfDecodeForwardRnnCell(transition_params)
  initial_state = array_ops.slice(potentials, [0, 0, 0], [-1, 1, -1])
  initial_state = array_ops.squeeze(initial_state, axis=[1])      # [B, O]
  inputs = array_ops.slice(potentials, [0, 1, 0], [-1, -1, -1])   # [B, T-1, O]
  backpointers, last_score = rnn.dynamic_rnn(
      crf_fwd_cell,
      inputs=inputs,
      sequence_length=sequence_length - 1,
      initial_state=initial_state,
      time_major=False,
      dtype=dtypes.int32)             # [B, T - 1, O], [B, O]
  backpointers = gen_array_ops.reverse_sequence(
      backpointers, sequence_length - 1, seq_dim=1)               # [B, T-1, O]

  # Computes backward decoding. Extract tag indices from backpointers.
  crf_bwd_cell = CrfDecodeBackwardRnnCell(num_tags)
  initial_state = math_ops.cast(math_ops.argmax(last_score, axis=1),
                                dtype=dtypes.int32)               # [B]
  initial_state = array_ops.expand_dims(initial_state, axis=-1)   # [B, 1]
  decode_tags, _ = rnn.dynamic_rnn(
      crf_bwd_cell,
      inputs=backpointers,
      sequence_length=sequence_length - 1,
      initial_state=initial_state,
      time_major=False,
      dtype=dtypes.int32)           # [B, T - 1, 1]
  decode_tags = array_ops.squeeze(decode_tags, axis=[2])           # [B, T - 1]
  decode_tags = array_ops.concat([initial_state, decode_tags], axis=1)  # [B, T]
  decode_tags = gen_array_ops.reverse_sequence(
      decode_tags, sequence_length, seq_dim=1)                     # [B, T]

  best_score = math_ops.reduce_max(last_score, axis=1)             # [B]
  return decode_tags, best_score
开发者ID:SylChan,项目名称:tensorflow,代码行数:55,代码来源:crf.py


示例10: testInvalidSequenceLengthShape

 def testInvalidSequenceLengthShape(self):
   cell = Plus1RNNCell()
   if context.in_graph_mode():
     inputs = [array_ops.placeholder(dtypes.float32, shape=(3, 4))]
   else:
     inputs = [constant_op.constant(np.ones((3, 4)))]
   with self.assertRaisesRegexp(ValueError, "must be a vector"):
     rnn.dynamic_rnn(
         cell,
         array_ops.stack(inputs),
         dtype=dtypes.float32,
         sequence_length=[[4]])
开发者ID:DjangoPeng,项目名称:tensorflow,代码行数:12,代码来源:rnn_test.py


示例11: RNN

def RNN(inputs, lens, name, reuse):
    print ("Building network " + name)
    # Define weights
    inputs = tf.gather(one_hots, inputs)
    weights = tf.Variable(tf.random_normal([__n_hidden, n_output]), name=name+"_weights")
    biases = tf.Variable(tf.random_normal([n_output]), name=name+"_biases")

    # Define a lstm cell with tensorflow

    enc_outputs, enc_states = rnn.dynamic_rnn(
        __cell_kind(__n_hidden),
        inputs,
        sequence_length=lens,
        dtype=tf.float32,
        scope=name,
        time_major=False)

    dec_outputs, dec_states = rnn.dynamic_rnn(
        __cell_kind(__n_hidden),
        enc_outputs,
        sequence_length=lens,
        dtype=tf.float32,
        scope=name,
        time_major=False)

    # Prepare data shape to match `rnn` function requirements
    # Current data input shape: (__batch_size, __n_steps, n_input)
    # Required shape: '__n_steps' tensors list of shape (__batch_size, n_input)

    '''dec_outputs, dec_states = rnn.rnn(
        __cell_kind(__n_hidden),
        tf.unpack(tf.transpose(inputs, [1, 0, 2])),
        sequence_length=lens,
        dtype=tf.float32,
        scope=name)
    outputs = tf.transpose(tf.pack(outputs), [1, 0, 2])'''
    print ("Done building network " + name)

    # Asserts are actually documentation: they can't be out of date
    assert dec_outputs.get_shape() == (__batch_size, __n_steps, __n_hidden)
    # Linear activation, using rnn output for each char
    # Reshaping here for a `batch` matrix multiply
    # It's faster than `batch_matmul` probably because it can guarantee a
    # static shape
    outputs = tf.reshape(dec_outputs, [__batch_size * __n_steps, __n_hidden])
    finals = tf.matmul(outputs, weights)
    finals = tf.reshape(finals, [__batch_size, __n_steps, n_output]) + biases
    return finals[:, :__n_steps-1, :]
开发者ID:SeanTater,项目名称:albemarle,代码行数:48,代码来源:12-seq2seq.py


示例12: testRNNWithKerasGRUCell

  def testRNNWithKerasGRUCell(self):
    with self.cached_session() as sess:
      input_shape = 10
      output_shape = 5
      timestep = 4
      batch = 100
      (x_train, y_train), _ = testing_utils.get_test_data(
          train_samples=batch,
          test_samples=0,
          input_shape=(timestep, input_shape),
          num_classes=output_shape)
      y_train = keras.utils.to_categorical(y_train)
      cell = keras.layers.GRUCell(output_shape)

      inputs = array_ops.placeholder(
          dtypes.float32, shape=(None, timestep, input_shape))
      predict = array_ops.placeholder(
          dtypes.float32, shape=(None, output_shape))

      outputs, state = rnn.dynamic_rnn(
          cell, inputs, dtype=dtypes.float32)
      self.assertEqual(outputs.shape.as_list(), [None, timestep, output_shape])
      self.assertEqual(state.shape.as_list(), [None, output_shape])
      loss = losses.softmax_cross_entropy(predict, state)
      train_op = training.GradientDescentOptimizer(0.001).minimize(loss)

      sess.run([variables_lib.global_variables_initializer()])
      _, outputs, state = sess.run(
          [train_op, outputs, state], {inputs: x_train, predict: y_train})

      self.assertEqual(len(outputs), batch)
      self.assertEqual(len(state), batch)
开发者ID:gunan,项目名称:tensorflow,代码行数:32,代码来源:rnn_test.py


示例13: ndlstm_base_dynamic

def ndlstm_base_dynamic(inputs, noutput, scope=None, reverse=False):
  """Run an LSTM, either forward or backward.

  This is a 1D LSTM implementation using dynamic_rnn and
  the TensorFlow LSTM op.

  Args:
    inputs: input sequence (length, batch_size, ninput)
    noutput: depth of output
    scope: optional scope name
    reverse: run LSTM in reverse

  Returns:
    Output sequence (length, batch_size, noutput)
  """
  with variable_scope.variable_scope(scope, "SeqLstm", [inputs]):
    # TODO(tmb) make batch size, sequence_length dynamic
    # example: sequence_length = tf.shape(inputs)[0]
    _, batch_size, _ = _shape(inputs)
    lstm_cell = core_rnn_cell_impl.BasicLSTMCell(noutput, state_is_tuple=False)
    state = array_ops.zeros([batch_size, lstm_cell.state_size])
    sequence_length = int(inputs.get_shape()[0])
    sequence_lengths = math_ops.to_int64(
        array_ops.fill([batch_size], sequence_length))
    if reverse:
      inputs = array_ops.reverse_v2(inputs, [0])
    outputs, _ = rnn.dynamic_rnn(
        lstm_cell, inputs, sequence_lengths, state, time_major=True)
    if reverse:
      outputs = array_ops.reverse_v2(outputs, [0])
    return outputs
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:31,代码来源:lstm1d.py


示例14: __call__

  def __call__(self,
               inputs,
               initial_state=None,
               dtype=None,
               sequence_length=None,
               scope=None):
    is_list = isinstance(inputs, list)
    if self._use_dynamic_rnn:
      if is_list:
        inputs = array_ops.pack(inputs)
      outputs, state = rnn.dynamic_rnn(
          self._cell,
          inputs,
          sequence_length=sequence_length,
          initial_state=initial_state,
          dtype=dtype,
          time_major=True,
          scope=scope)
      if is_list:
        # Convert outputs back to list
        outputs = array_ops.unpack(outputs)
    else:  # non-dynamic rnn
      if not is_list:
        inputs = array_ops.unpack(inputs)
      outputs, state = rnn.rnn(self._cell,
                               inputs,
                               initial_state=initial_state,
                               dtype=dtype,
                               sequence_length=sequence_length,
                               scope=scope)
      if not is_list:
        # Convert outputs back to tensor
        outputs = array_ops.pack(outputs)

    return outputs, state
开发者ID:MostafaGazar,项目名称:tensorflow,代码行数:35,代码来源:fused_rnn_cell.py


示例15: testTensorArrayStateIsAccepted

  def testTensorArrayStateIsAccepted(self):
    cell = TensorArrayStateRNNCell()
    in_graph_mode = context.in_graph_mode()

    if in_graph_mode:
      inputs = array_ops.placeholder(dtypes.float32, shape=(1, 4, 1))
    else:
      inputs = np.array([[[1], [2], [3], [4]]], dtype=np.float32)

    with self.test_session() as sess:
      outputs, state = rnn.dynamic_rnn(
          cell, inputs, dtype=dtypes.float32, sequence_length=[4])
      state = (state[0], state[1].stack())
      if in_graph_mode:
        outputs, state = sess.run(
            [outputs, state], feed_dict={
                inputs: [[[1], [2], [3], [4]]]
            })

    if in_graph_mode:
      self.assertAllEqual(outputs, np.array([[[1], [2], [3], [4]]]))
      self.assertEqual(state[0], 4)
      self.assertAllEqual(state[1], np.array([[[1]], [[2]], [[3]], [[4]]]))
    else:
      self.assertAllEqual(outputs.numpy(), np.array([[[1], [2], [3], [4]]]))
      self.assertEqual(state[0].numpy(), 4)
      self.assertAllEqual(state[1].numpy(),
                          np.array([[[1]], [[2]], [[3]], [[4]]]))
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:28,代码来源:rnn_test.py


示例16: testCustomizedAttention

  def testCustomizedAttention(self):
    batch_size = 2
    max_time = 3
    num_units = 2
    memory = constant_op.constant([[[1., 1.], [2., 2.], [3., 3.]],
                                   [[4., 4.], [5., 5.], [6., 6.]]])
    memory_sequence_length = constant_op.constant([3, 2])
    attention_mechanism = wrapper.BahdanauAttention(num_units, memory,
                                                    memory_sequence_length)

    # Sets all returned values to be all ones.
    def _customized_attention(unused_attention_mechanism, unused_cell_output,
                              unused_attention_state, unused_attention_layer):
      """Customized attention.

      Returns:
        attention: `Tensor` of shape [batch_size, num_units], attention output.
        alignments: `Tensor` of shape [batch_size, max_time], sigma value for
          each input memory (prob. function of input keys).
        next_attention_state: A `Tensor` representing the next state for the
          attention.
      """
      attention = array_ops.ones([batch_size, num_units])
      alignments = array_ops.ones([batch_size, max_time])
      next_attention_state = alignments
      return attention, alignments, next_attention_state

    attention_cell = wrapper.AttentionWrapper(
        rnn_cell.LSTMCell(2),
        attention_mechanism,
        attention_layer_size=None,  # don't use attention layer.
        output_attention=False,
        alignment_history=(),
        attention_fn=_customized_attention,
        name='attention')
    self.assertEqual(num_units, attention_cell.output_size)

    initial_state = attention_cell.zero_state(
        batch_size=2, dtype=dtypes.float32)
    source_input_emb = array_ops.ones([2, 3, 2])
    source_input_length = constant_op.constant([3, 2])

    # 'state' is a tuple of
    # (cell_state, h, attention, alignments, alignment_history, attention_state)
    output, state = rnn.dynamic_rnn(
        attention_cell,
        inputs=source_input_emb,
        sequence_length=source_input_length,
        initial_state=initial_state,
        dtype=dtypes.float32)

    with self.session() as sess:
      sess.run(variables.global_variables_initializer())
      output_value, state_value = sess.run([output, state], feed_dict={})
      self.assertAllEqual(np.array([2, 3, 2]), output_value.shape)
      self.assertAllClose(np.array([[1., 1.], [1., 1.]]), state_value.attention)
      self.assertAllClose(
          np.array([[1., 1., 1.], [1., 1., 1.]]), state_value.alignments)
      self.assertAllClose(
          np.array([[1., 1., 1.], [1., 1., 1.]]), state_value.attention_state)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:60,代码来源:attention_wrapper_test.py


示例17: bidirectional_rnn

  def bidirectional_rnn(self, cell, inputs, lengths, scope=None):
    name = scope.name or "BiRNN"
    # Forward direction
    with vs.variable_scope(name + "_FW") as fw_scope:
      output_fw, output_state_fw = rnn.dynamic_rnn(cell, inputs, time_major=True, dtype=dtypes.float32,
                                                   sequence_length=lengths, scope=fw_scope)
    # Backward direction
    with vs.variable_scope(name + "_BW") as bw_scope:
      output_bw, output_state_bw = rnn.dynamic_rnn(cell, inputs, time_major=True, dtype=dtypes.float32,
                                                   sequence_length=lengths, scope=bw_scope)
    output_bw = tf.reverse_sequence(output_bw, tf.to_int64(lengths), seq_dim=0, batch_dim=1)

    outputs = output_fw + output_bw
    output_state = output_state_fw + output_state_bw

    return (outputs, output_state)
开发者ID:nipengmath,项目名称:nlc,代码行数:16,代码来源:nlc_model.py


示例18: RNN

def RNN(inputs, lens, name, reuse):
    print ("Building network " + name)
    # Define weights
    weights = tf.Variable(tf.random_normal([__n_hidden, n_output]), name=name+"_weights")
    biases = tf.Variable(tf.random_normal([n_output]), name=name+"_biases")

    # Define a lstm cell with tensorflow
    outputs, states = rnn.dynamic_rnn(
        __cell_kind(__n_hidden),
        inputs,
        sequence_length=lens,
        dtype=tf.float32,
        scope=name,
        time_major=False)
    assert outputs.get_shape() == (__batch_size, __n_steps, __n_hidden)
    print ("Done building network " + name)

    #
    # All these asserts are actually documentation: they can't be out of date
    #

    outputs = tf.expand_dims(outputs, 2)
    assert outputs.get_shape() == (__batch_size, __n_steps, 1, __n_hidden)

    tiled_weights = tf.tile(tf.expand_dims(tf.expand_dims(weights, 0), 0), [__batch_size, __n_steps, 1, 1])
    assert tiled_weights.get_shape() == (__batch_size, __n_steps, __n_hidden, n_output)
    #assert tiled_weights.get_shape() == (1, 1, __n_hidden, n_output)
    # Linear activation, using rnn inner loop output for each char
    finals = tf.batch_matmul(outputs, tiled_weights) + biases
    assert finals.get_shape() == (__batch_size, __n_steps, 1, n_output)
    return tf.squeeze(finals)
开发者ID:SeanTater,项目名称:albemarle,代码行数:31,代码来源:09-lstm-tensorflow-char-pat.py


示例19: testKerasAndTFRNNLayerOutputComparison

  def testKerasAndTFRNNLayerOutputComparison(self):
    input_shape = 10
    output_shape = 5
    timestep = 4
    batch = 20
    (x_train, _), _ = testing_utils.get_test_data(
        train_samples=batch,
        test_samples=0,
        input_shape=(timestep, input_shape),
        num_classes=output_shape)
    fix_weights_generator = keras.layers.SimpleRNNCell(output_shape)
    fix_weights_generator.build((None, input_shape))
    weights = fix_weights_generator.get_weights()

    with self.session(graph=ops_lib.Graph()) as sess:
      inputs = array_ops.placeholder(
          dtypes.float32, shape=(None, timestep, input_shape))
      cell = keras.layers.SimpleRNNCell(output_shape)
      tf_out, tf_state = rnn.dynamic_rnn(
          cell, inputs, dtype=dtypes.float32)
      cell.set_weights(weights)
      [tf_out, tf_state] = sess.run([tf_out, tf_state], {inputs: x_train})
    with self.session(graph=ops_lib.Graph()) as sess:
      k_input = keras.Input(shape=(timestep, input_shape),
                            dtype=dtypes.float32)
      cell = keras.layers.SimpleRNNCell(output_shape)
      layer = keras.layers.RNN(cell, return_sequences=True, return_state=True)
      keras_out = layer(k_input)
      cell.set_weights(weights)
      k_out, k_state = sess.run(keras_out, {k_input: x_train})
    self.assertAllClose(tf_out, k_out)
    self.assertAllClose(tf_state, k_state)
开发者ID:gunan,项目名称:tensorflow,代码行数:32,代码来源:rnn_test.py


示例20: crf_log_norm

def crf_log_norm(inputs, sequence_lengths, transition_params):
  """Computes the normalization for a CRF.

  Args:
    inputs: A [batch_size, max_seq_len, num_tags] tensor of unary potentials
        to use as input to the CRF layer.
    sequence_lengths: A [batch_size] vector of true sequence lengths.
    transition_params: A [num_tags, num_tags] transition matrix.
  Returns:
    log_norm: A [batch_size] vector of normalizers for a CRF.
  """
  # Split up the first and rest of the inputs in preparation for the forward
  # algorithm.
  first_input = array_ops.slice(inputs, [0, 0, 0], [-1, 1, -1])
  first_input = array_ops.squeeze(first_input, [1])
  rest_of_input = array_ops.slice(inputs, [0, 1, 0], [-1, -1, -1])

  # Compute the alpha values in the forward algorithm in order to get the
  # partition function.
  forward_cell = CrfForwardRnnCell(transition_params)
  _, alphas = rnn.dynamic_rnn(
      cell=forward_cell,
      inputs=rest_of_input,
      sequence_length=sequence_lengths - 1,
      initial_state=first_input,
      dtype=dtypes.float32)
  log_norm = math_ops.reduce_logsumexp(alphas, [1])
  return log_norm
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:28,代码来源:crf.py



注:本文中的tensorflow.python.ops.rnn.dynamic_rnn函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python rnn.rnn函数代码示例发布时间:2022-05-27
下一篇:
Python resources.shared_resources函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap