• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensorflow.scan函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.scan函数的典型用法代码示例。如果您正苦于以下问题:Python scan函数的具体用法?Python scan怎么用?Python scan使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了scan函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: testScan_MultiOutputMismatchedInitializer

 def testScan_MultiOutputMismatchedInitializer(self):
     with self.test_session():
         elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
         initializer = np.array(1.0)
         # Multiply a * 1 each time
         with self.assertRaisesRegexp(ValueError, "two structures don't have the same number of elements"):
             tf.scan(lambda a, x: (a, -a), elems, initializer)
开发者ID:botonchou,项目名称:tensorflow,代码行数:7,代码来源:functional_ops_test.py


示例2: train

 def train(x=x, size_bt=size_bt, BV_t=BV_t, BH_t=BH_t):
     bv_init = tf.zeros([1, n_visible], tf.float32)
     bh_init = tf.zeros([1, n_hidden], tf.float32)
     u_t  = tf.scan(rnn_recurrence, x, initializer=u0)
     BV_t = tf.reshape(tf.scan(visible_bias_recurrence, u_t, bv_init), [size_bt, n_visible])
     BH_t = tf.reshape(tf.scan(hidden_bias_recurrence, u_t, bh_init), [size_bt, n_hidden])
     sample, cost = RBM.build_rbm(x, W, BV_t, BH_t, k=15)
     return x, sample, cost, params, size_bt            
开发者ID:atriedman,项目名称:Musical_Matrices,代码行数:8,代码来源:rnn_rbm.py


示例3: testScan_Simple

    def testScan_Simple(self):
        with self.test_session():
            elems = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="data")
            v = tf.constant(2.0, name="v")

            r = tf.scan(lambda a, x: tf.mul(a, x), elems)
            self.assertAllEqual([1.0, 2.0, 6.0, 24.0, 120.0, 720.0], r.eval())

            r = tf.scan(lambda a, x: tf.mul(a, x), elems, initializer=v)
            self.assertAllEqual([2.0, 4.0, 12.0, 48.0, 240.0, 1440.0], r.eval())
开发者ID:gavinsherry,项目名称:tensorflow,代码行数:10,代码来源:functional_ops_test.py


示例4: testScanVaryingShape

    def testScanVaryingShape(self):
        with self.test_session() as sess:
            x = tf.placeholder(dtype=tf.float32, shape=[None, 2])
            x_t = tf.transpose(x)
            # scan over dimension 0 (with shape None)
            result = tf.scan(lambda a, x: a + x, x)
            # scanned over transposed dimension 0 (with shape 2)
            result_t = tf.scan(lambda a, x: a + x, x_t, infer_shape=False)
            # ensure gradients can be calculated
            result_grad = tf.gradients(result, [x])[0]
            result_t_grad = tf.gradients(result_t, [x_t])[0]

            # smoke test to ensure they all evaluate
            sess.run([result, result_t, result_grad, result_t_grad], feed_dict={x: [[1.0, 2.0]]})
开发者ID:botonchou,项目名称:tensorflow,代码行数:14,代码来源:functional_ops_test.py


示例5: feature

    def feature(self, input_x, name = ''):
        if len(input_x.get_shape()) == 2:
            # incase input_x : batch_size x seq_length [tokens]
            input_x = tf.nn.embedding_lookup(self.embbeding_mat, input_x)
        # input_x:  batch_size x seq_length x g_emb_dim
        pooled_outputs = []
        index = -1
        embedded_chars = tf.scan(lambda a, x: tf.matmul(x, self.W), input_x)
        embedded_chars_expanded = tf.expand_dims(embedded_chars, -1)
        for filter_size, num_filter in zip(self.filter_sizes, self.num_filters):
            index += 1
            with tf.name_scope("conv-maxpool-%s-midterm" % filter_size):
                # Convolution Layer
                conv = tf.nn.conv2d(
                    embedded_chars_expanded,
                    self.W_conv[index],
                    strides=[1, 1, 1, 1],
                    padding="VALID",
                    name="conv")
                # Apply nonlinearity
                h = tf.nn.relu(tf.nn.bias_add(conv, self.b_conv[index]), name="relu")
                # Maxpooling over the outputs
                pooled = tf.nn.max_pool(
                    h,
                    ksize=[1, self.sequence_length - filter_size + 1, 1, 1],
                    strides=[1, 1, 1, 1],
                    padding='VALID',
                    name="pool")
                pooled_outputs.append(pooled)

        # Combine all the pooled features
        num_filters_total = sum(self.num_filters)
        h_pool = tf.concat(pooled_outputs, 3)
        h_pool_flat = tf.reshape(h_pool, [-1, num_filters_total])
        return h_pool_flat
开发者ID:IshJ,项目名称:Texygen,代码行数:35,代码来源:TextganDiscriminator.py


示例6: testScan_MultiInputSingleOutput

 def testScan_MultiInputSingleOutput(self):
     with self.test_session():
         elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
         initializer = np.array(1.0)
         # Multiply a * 1 each time
         r = tf.scan(lambda a, x: a * (x[0] + x[1]), (elems + 1, -elems), initializer)
         self.assertAllEqual([1.0, 1.0, 1.0, 1.0, 1.0, 1.0], r.eval())
开发者ID:botonchou,项目名称:tensorflow,代码行数:7,代码来源:functional_ops_test.py


示例7: MiniminibatchLayer

def MiniminibatchLayer(name, n_in, dim_b, dim_c, group_size, inputs):
    inputs = tf.random_shuffle(inputs)
    inputs = tf.reshape(inputs, [-1, group_size, n_in])
    def f(a,x):
        return MinibatchLayer(name, n_in, dim_b, dim_c, x)
    outputs = tf.scan(f, inputs)
    return tf.reshape(outputs, [-1, n_in+dim_b])
开发者ID:igul222,项目名称:nn,代码行数:7,代码来源:speech_rnn_gan.py


示例8: _marginal_hidden_probs

  def _marginal_hidden_probs(self):
    """Compute marginal pdf for each individual observable."""

    initial_log_probs = tf.broadcast_to(self._log_init,
                                        tf.concat([self.batch_shape_tensor(),
                                                   [self._num_states]],
                                                  axis=0))
    # initial_log_probs :: batch_shape num_states

    if self._num_steps > 1:
      transition_log_probs = self._log_trans

      def forward_step(log_probs, _):
        return _log_vector_matrix(log_probs, transition_log_probs)

      dummy_index = tf.zeros(self._num_steps - 1, dtype=tf.float32)

      forward_log_probs = tf.scan(forward_step, dummy_index,
                                  initializer=initial_log_probs,
                                  name="forward_log_probs")

      forward_log_probs = tf.concat([[initial_log_probs], forward_log_probs],
                                    axis=0)
    else:
      forward_log_probs = initial_log_probs[tf.newaxis, ...]

    # returns :: num_steps batch_shape num_states

    return tf.exp(forward_log_probs)
开发者ID:asudomoeva,项目名称:probability,代码行数:29,代码来源:hidden_markov_model.py


示例9: tensorflow_test

def tensorflow_test():
    import tensorflow as tf
    nested_input = tf.placeholder(tf.float32, shape=[outer_len, inner_len, input_dim])

    variable = tf.Variable(np.float32(1.0))

    def inner_func(curr, prev):
        return curr + prev# + variable

    def outer_func(curr, prev):
        inner_res = tf.scan(
                fn=inner_func,
                elems=curr,
                initializer=tf.zeros([input_dim])
            )
        return prev + inner_res

    # nested_input.set_shape
    outputs = tf.scan(
            fn=outer_func,
            elems=nested_input,
            initializer=tf.zeros([inner_len, input_dim])
        )

    loss = tf.reduce_sum(outputs)
    # optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
    # train_op = optimizer.minimize(loss)
    grad = tf.gradients(loss, [variable])

    init_op = tf.initialize_all_variables()

    with tf.Session() as sess:
        sess.run(init_op)
开发者ID:zihangdai,项目名称:tensorflow_feature_test,代码行数:33,代码来源:nested_while_loop.py


示例10: outer_func

 def outer_func(curr, prev):
     inner_res = tf.scan(
             fn=inner_func,
             elems=curr,
             initializer=tf.zeros([input_dim])
         )
     return prev + inner_res
开发者ID:zihangdai,项目名称:tensorflow_feature_test,代码行数:7,代码来源:nested_while_loop.py


示例11: omniglot

def omniglot():

    sess = tf.InteractiveSession()

    """    def wrapper(v):
        return tf.Print(v, [v], message="Printing v")

    v = tf.Variable(initial_value=np.arange(0, 36).reshape((6, 6)), dtype=tf.float32, name='Matrix')

    sess.run(tf.global_variables_initializer())
    sess.run(tf.local_variables_initializer())

    temp = tf.Variable(initial_value=np.arange(0, 36).reshape((6, 6)), dtype=tf.float32, name='temp')
    temp = wrapper(v)
    #with tf.control_dependencies([temp]):
    temp.eval()
    print 'Hello'"""

    def update_tensor(V, dim2, val):  # Update tensor V, with index(:,dim2[:]) by val[:]
        val = tf.cast(val, V.dtype)
        def body(_, (v, d2, chg)):
            d2_int = tf.cast(d2, tf.int32)
            return tf.slice(tf.concat_v2([v[:d2_int],[chg] ,v[d2_int+1:]], axis=0), [0], [v.get_shape().as_list()[0]])
        Z = tf.scan(body, elems=(V, dim2, val), initializer=tf.constant(1, shape=V.get_shape().as_list()[1:], dtype=tf.float32), name="Scan_Update")
        return Z
开发者ID:jayvischeng,项目名称:NTM-One-Shot-TF,代码行数:25,代码来源:TestUpd.py


示例12: build

    def build(self, preSoftmaxPi, preSoftmaxA, preSoftmaxB):
        M, V = preSoftmaxB.shape

        self.preSoftmaxPi = tf.Variable(preSoftmaxPi)
        self.preSoftmaxA = tf.Variable(preSoftmaxA)
        self.preSoftmaxB = tf.Variable(preSoftmaxB)

        pi = tf.nn.softmax(self.preSoftmaxPi)
        A = tf.nn.softmax(self.preSoftmaxA)
        B = tf.nn.softmax(self.preSoftmaxB)

        # define cost
        self.tfx = tf.placeholder(tf.int32, shape=(None,), name='x')
        def recurrence(old_a_old_s, x_t):
            old_a = tf.reshape(old_a_old_s[0], (1, M))
            a = tf.matmul(old_a, A) * B[:, x_t]
            a = tf.reshape(a, (M,))
            s = tf.reduce_sum(a)
            return (a / s), s

        # remember, tensorflow scan is going to loop through
        # all the values!
        # we treat the first value differently than the rest
        # so we only want to loop through tfx[1:]
        # the first scale being 1 doesn't affect the log-likelihood
        # because log(1) = 0
        alpha, scale = tf.scan(
            fn=recurrence,
            elems=self.tfx[1:],
            initializer=(pi*B[:,self.tfx[0]], np.float32(1.0)),
        )

        self.cost = -tf.reduce_sum(tf.log(scale))
        self.train_op = tf.train.AdamOptimizer(1e-2).minimize(self.cost)
开发者ID:cmagnusb,项目名称:machine_learning_examples,代码行数:34,代码来源:hmmd_tf.py


示例13: testScan_MultiInputSameTypeOutput

 def testScan_MultiInputSameTypeOutput(self):
     with self.test_session() as sess:
         elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
         r = tf.scan(lambda a, x: (a[0] + x[0], a[1] + x[1]), (elems, -elems))
         r_value = sess.run(r)
         self.assertAllEqual(np.cumsum(elems), r_value[0])
         self.assertAllEqual(np.cumsum(-elems), r_value[1])
开发者ID:botonchou,项目名称:tensorflow,代码行数:7,代码来源:functional_ops_test.py


示例14: cummax

def cummax(x, reverse=False, name=None):
    """Compute the cumulative maximum of the tensor `x` along `axis`. This
    operation is similar to the more classic `cumsum`. Only support 1D Tensor
    for now.

    Args:
    x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
       `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
       `complex128`, `qint8`, `quint8`, `qint32`, `half`.
       axis: A `Tensor` of type `int32` (default: 0).
       reverse: A `bool` (default: False).
       name: A name for the operation (optional).
    Returns:
    A `Tensor`. Has the same type as `x`.
    """
    with ops.name_scope(name, "Cummax", [x]) as name:
        x = ops.convert_to_tensor(x, name="x")
        # Not very optimal: should directly integrate reverse into tf.scan.
        if reverse:
            x = tf.reverse(x, axis=[0])
        # 'Accumlating' maximum: ensure it is always increasing.
        cmax = tf.scan(lambda a, y: tf.maximum(a, y), x,
                       initializer=None, parallel_iterations=1,
                       back_prop=False, swap_memory=False)
        if reverse:
            cmax = tf.reverse(cmax, axis=[0])
        return cmax
开发者ID:bowrian,项目名称:SSD-Tensorflow,代码行数:27,代码来源:math.py


示例15: testScanUnknownShape

 def testScanUnknownShape(self):
   x = tf.placeholder(tf.float32)
   initializer = tf.placeholder(tf.float32)
   def fn(_, current_input):
     return current_input
   y = tf.scan(fn, x, initializer=initializer)
   self.assertIs(None, y.get_shape().dims)
开发者ID:285219011,项目名称:hello-world,代码行数:7,代码来源:functional_ops_test.py


示例16: define_ppo_epoch

def define_ppo_epoch(memory, policy_factory, config):
  """PPO epoch."""
  observation, reward, done, action, old_pdf, value = memory

  # This is to avoid propagating gradients though simulation of simulation
  observation = tf.stop_gradient(observation)
  action = tf.stop_gradient(action)
  reward = tf.stop_gradient(reward)
  done = tf.stop_gradient(done)
  value = tf.stop_gradient(value)
  old_pdf = tf.stop_gradient(old_pdf)

  ppo_step_rets = tf.scan(
      lambda _1, _2: define_ppo_step(  # pylint: disable=g-long-lambda
          observation, action, reward, done, value,
          old_pdf, policy_factory, config),
      tf.range(config.optimization_epochs),
      [0., 0., 0., 0., 0., 0.],
      parallel_iterations=1)

  ppo_summaries = [tf.reduce_mean(ret) for ret in ppo_step_rets]
  summaries_names = ["policy_loss", "value_loss", "entropy_loss",
                     "policy_gradient", "value_gradient", "entropy_gradient"]

  summaries = [tf.summary.scalar(summary_name, summary)
               for summary_name, summary in zip(summaries_names, ppo_summaries)]
  losses_summary = tf.summary.merge(summaries)

  for summary_name, summary in zip(summaries_names, ppo_summaries):
    losses_summary = tf.Print(losses_summary, [summary], summary_name + ": ")

  return losses_summary
开发者ID:chqiwang,项目名称:tensor2tensor,代码行数:32,代码来源:ppo.py


示例17: get_states_b

    def get_states_b(self):
        """
        Iterates through time/ sequence to get all hidden state
        """

        all_hidden_states, all_memory_states = self.get_states_f()

        # Reversing the hidden and memory state to get the final hidden and
        # memory state
        last_hidden_states = tf.reverse(
            all_hidden_states, [True, False, False])[0, :, :]
        last_memory_states = tf.reverse(
            all_memory_states, [True, False, False])[0, :, :]

        # For backward pass using the last hidden and memory of the forward
        # pass
        initial_hidden = tf.pack([last_hidden_states, last_memory_states])

        # Getting all hidden state throuh time
        all_hidden_memory_states = tf.scan(self.Lstm_b,
                                           self.processed_input_rev,
                                           initializer=initial_hidden,
                                           name='states')

        # Now reversing the states to keep those in original order
        all_hidden_states = tf.reverse(all_hidden_memory_states[
                                       :, 0, :, :], [True, False, False])
        all_memory_states = tf.reverse(all_hidden_memory_states[
                                       :, 1, :, :], [True, False, False])

        return all_hidden_states, all_memory_states
开发者ID:RoGoSo,项目名称:Tensorflow-tutorial,代码行数:31,代码来源:bi_directional_lstm.py


示例18: diagonal_neural_gpu

def diagonal_neural_gpu(inputs, hparams, name=None):
  """Improved Neural GPU as in https://arxiv.org/abs/1702.08727."""
  with tf.variable_scope(name, "diagonal_neural_gpu"):

    def step(state_tup, inp):
      """Single step of the improved Neural GPU."""
      state, _ = state_tup
      x = state
      for layer in xrange(hparams.num_hidden_layers):
        x, new_loss = common_layers.diagonal_conv_gru(
            x, (hparams.kernel_height, hparams.kernel_width),
            hparams.hidden_size,
            dropout=hparams.dropout,
            name="dcgru_%d" % layer)
      # Padding input is zeroed-out in the modality, we check this by summing.
      padding_inp = tf.less(tf.reduce_sum(tf.abs(inp), axis=[1, 2]), 0.00001)
      new_state = tf.where(padding_inp, state, x)  # No-op where inp is padding.
      return new_state, new_loss

    final_state, losses = tf.scan(
        step,
        tf.transpose(inputs, [1, 0, 2, 3]),
        initializer=(inputs, tf.constant(0.0)),
        parallel_iterations=1,
        swap_memory=True)
    return final_state[0, :, :, :, :], 2.0 * tf.reduce_mean(losses)
开发者ID:AranKomat,项目名称:tensor2tensor,代码行数:26,代码来源:neural_gpu.py


示例19: fast_dlstm

    def fast_dlstm(s_t, state_in):
        def dilate_one_time_step(one_h, switcher, num_chunks):
            h_slices = []
            h_size = 256
            chunk_step_size = h_size // num_chunks
            for switch_step, h_step in zip(range(num_chunks), range(0, h_size, chunk_step_size)):
                one_switch = switcher[switch_step]
                h_s = conditional_backprop(one_switch, one_h[h_step: h_step + chunk_step_size])
                h_slices.append(h_s)
            dh = tf.stack(h_slices)
            dh = tf.reshape(dh, [-1, 256])
            return dh

        lstm = rnn.LSTMCell(256, state_is_tuple=True)
        chunks = 8

        def dlstm_scan_fn(previous_output, current_input):
            out, state_out = lstm(current_input, previous_output[1])
            i = previous_output[2]
            basis_i = tf.one_hot(i, depth=chunks)
            state_out_dilated = dilate_one_time_step(tf.squeeze(state_out[0]), basis_i, chunks)
            state_out = rnn.LSTMStateTuple(state_out_dilated, state_out[1])
            i += tf.constant(1)
            new_i = tf.mod(i, chunks)
            return out, state_out, new_i

        rnn_outputs, final_states, mod_idxs = tf.scan(dlstm_scan_fn,
                                                      tf.transpose(s_t, [1, 0, 2]),
                                                      initializer=(
                                                      state_in[1], rnn.LSTMStateTuple(*state_in), tf.constant(0)))

        state_out = [final_states[0][-1, 0, :], final_states[1][-1, 0, :]]
        cell_states = final_states[0][:, 0, :]
        out_states = final_states[1][:, 0, :]
        return out_states, cell_states, state_out
开发者ID:ioanachelu,项目名称:turi,代码行数:35,代码来源:test_dlstm.py


示例20: define_ppo_epoch

def define_ppo_epoch(memory, hparams):
  """PPO epoch."""
  observation, reward, done, action, old_pdf, value = memory

  # This is to avoid propagating gradients through simulated environment.
  observation = tf.stop_gradient(observation)
  action = tf.stop_gradient(action)
  reward = tf.stop_gradient(reward)
  if hasattr(hparams, "rewards_preprocessing_fun"):
    reward = hparams.rewards_preprocessing_fun(reward)
  done = tf.stop_gradient(done)
  value = tf.stop_gradient(value)
  old_pdf = tf.stop_gradient(old_pdf)

  advantage = calculate_generalized_advantage_estimator(
      reward, value, done, hparams.gae_gamma, hparams.gae_lambda)

  discounted_reward = tf.stop_gradient(advantage + value)

  advantage_mean, advantage_variance = tf.nn.moments(advantage, axes=[0, 1],
                                                     keep_dims=True)
  advantage_normalized = tf.stop_gradient(
      (advantage - advantage_mean)/(tf.sqrt(advantage_variance) + 1e-8))

  add_lists_elementwise = lambda l1, l2: [x + y for x, y in zip(l1, l2)]

  number_of_batches = (hparams.epoch_length * hparams.optimization_epochs
                       / hparams.optimization_batch_size)

  dataset = tf.data.Dataset.from_tensor_slices(
      (observation, action, discounted_reward, advantage_normalized, old_pdf))
  dataset = dataset.shuffle(buffer_size=hparams.epoch_length,
                            reshuffle_each_iteration=True)
  dataset = dataset.repeat(hparams.optimization_epochs)
  dataset = dataset.batch(hparams.optimization_batch_size)
  iterator = dataset.make_initializable_iterator()
  optimizer = get_optimiser(hparams)

  with tf.control_dependencies([iterator.initializer]):
    ppo_step_rets = tf.scan(
        lambda a, i: add_lists_elementwise(  # pylint: disable=g-long-lambda
            a, define_ppo_step(iterator.get_next(), optimizer, hparams)),
        tf.range(number_of_batches),
        [0., 0., 0., 0., 0., 0.],
        parallel_iterations=1)

  ppo_summaries = [tf.reduce_mean(ret) / number_of_batches
                   for ret in ppo_step_rets]
  summaries_names = ["policy_loss", "value_loss", "entropy_loss",
                     "policy_gradient", "value_gradient", "entropy_gradient"]

  summaries = [tf.summary.scalar(summary_name, summary)
               for summary_name, summary in zip(summaries_names, ppo_summaries)]
  losses_summary = tf.summary.merge(summaries)

  for summary_name, summary in zip(summaries_names, ppo_summaries):
    losses_summary = tf.Print(losses_summary, [summary], summary_name + ": ")

  return losses_summary
开发者ID:kltony,项目名称:tensor2tensor,代码行数:59,代码来源:ppo.py



注:本文中的tensorflow.scan函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.scatter_nd函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.scalar_summary函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap