• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python variables.trainable_variables函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.variables.trainable_variables函数的典型用法代码示例。如果您正苦于以下问题:Python trainable_variables函数的具体用法?Python trainable_variables怎么用?Python trainable_variables使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了trainable_variables函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: testFunctionalConv3DTransposeNoReuse

 def testFunctionalConv3DTransposeNoReuse(self):
   depth, height, width = 5, 7, 9
   volumes = random_ops.random_uniform((5, depth, height, width, 32), seed=1)
   conv_layers.conv3d_transpose(volumes, 4, [3, 3, 3])
   self.assertEqual(len(variables.trainable_variables()), 2)
   conv_layers.conv3d_transpose(volumes, 4, [3, 3, 3])
   self.assertEqual(len(variables.trainable_variables()), 4)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:7,代码来源:convolutional_test.py


示例2: testTensorLearningRateAndMomentum

  def testTensorLearningRateAndMomentum(self):
    for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
      with self.cached_session():
        var0 = variables.Variable([1.0, 2.0], dtype=dtype)
        var1 = variables.Variable([3.0, 4.0], dtype=dtype)
        grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
        grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
        mom_opt = momentum_lib.MomentumOptimizer(
            learning_rate=constant_op.constant(2.0),
            momentum=constant_op.constant(0.9))
        mom_update = mom_opt.apply_gradients(
            zip([grads0, grads1], [var0, var1]))
        variables.global_variables_initializer().run()
        # Check we have slots
        self.assertEqual(["momentum"], mom_opt.get_slot_names())
        slot0 = mom_opt.get_slot(var0, "momentum")
        self.assertEquals(slot0.get_shape(), var0.get_shape())
        self.assertFalse(slot0 in variables.trainable_variables())
        slot1 = mom_opt.get_slot(var1, "momentum")
        self.assertEquals(slot1.get_shape(), var1.get_shape())
        self.assertFalse(slot1 in variables.trainable_variables())

        # Fetch params to validate initial values
        self.assertAllClose([1.0, 2.0], self.evaluate(var0))
        self.assertAllClose([3.0, 4.0], self.evaluate(var1))
        # Step 1: the momentum accumulators where 0. So we should see a normal
        # update: v -= grad * learning_rate
        mom_update.run()
        # Check that the momentum accumulators have been updated.
        self.assertAllCloseAccordingToType(
            np.array([0.1, 0.1]), self.evaluate(slot0))
        self.assertAllCloseAccordingToType(
            np.array([0.01, 0.01]), self.evaluate(slot1))
        # Check that the parameters have been updated.
        self.assertAllCloseAccordingToType(
            np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]),
            self.evaluate(var0))
        self.assertAllCloseAccordingToType(
            np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]),
            self.evaluate(var1))
        # Step 2: the momentum accumulators contain the previous update.
        mom_update.run()
        # Check that the momentum accumulators have been updated.
        self.assertAllCloseAccordingToType(
            np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]),
            self.evaluate(slot0))
        self.assertAllCloseAccordingToType(
            np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
            self.evaluate(slot1))
        # Check that the parameters have been updated.
        self.assertAllCloseAccordingToType(
            np.array([
                1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
                2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)
            ]), self.evaluate(var0))
        self.assertAllCloseAccordingToType(
            np.array([
                2.98 - ((0.9 * 0.01 + 0.01) * 2.0),
                3.98 - ((0.9 * 0.01 + 0.01) * 2.0)
            ]), self.evaluate(var1))
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:60,代码来源:momentum_test.py


示例3: testFunctionalConv1DNoReuse

 def testFunctionalConv1DNoReuse(self):
   length = 10
   data = random_ops.random_uniform((5, length, 3), seed=1)
   conv_layers.separable_conv1d(data, 32, 3)
   self.assertEqual(len(variables.trainable_variables()), 3)
   conv_layers.separable_conv1d(data, 32, 3)
   self.assertEqual(len(variables.trainable_variables()), 6)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:7,代码来源:convolutional_test.py


示例4: testMap_Scoped

  def testMap_Scoped(self):
    with self.cached_session() as sess:

      def double_scoped(x):
        """2x with a dummy 2 that is scoped."""
        with variable_scope.variable_scope("body"):
          # Dummy variable, just to check that scoping works as intended.
          two = variable_scope.get_variable(
              "two", [],
              dtype=dtypes.int32,
              initializer=init_ops.constant_initializer(2))
          return math_ops.multiply(x, two)

      with variable_scope.variable_scope("root") as varscope:
        elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
        doubles = np.array([2 * x for x in [1, 2, 3, 4, 5, 6]])

        r = functional_ops.map_fn(double_scoped, elems)
        # Check that we have the one variable we asked for here.
        self.assertEqual(len(variables.trainable_variables()), 1)
        self.assertEqual(variables.trainable_variables()[0].name,
                         "root/body/two:0")
        sess.run([variables.global_variables_initializer()])
        self.assertAllEqual(doubles, self.evaluate(r))

        # Now let's reuse our single variable.
        varscope.reuse_variables()
        r = functional_ops.map_fn(double_scoped, elems)
        self.assertEqual(len(variables.trainable_variables()), 1)
        self.assertAllEqual(doubles, self.evaluate(r))
开发者ID:ThunderQi,项目名称:tensorflow,代码行数:30,代码来源:functional_ops_test.py


示例5: testFunctionalConv2DTransposeNoReuse

 def testFunctionalConv2DTransposeNoReuse(self):
   height, width = 7, 9
   images = random_ops.random_uniform((5, height, width, 3), seed=1)
   conv_layers.conv2d_transpose(images, 32, [3, 3])
   self.assertEqual(len(variables.trainable_variables()), 2)
   conv_layers.conv2d_transpose(images, 32, [3, 3])
   self.assertEqual(len(variables.trainable_variables()), 4)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:7,代码来源:convolutional_test.py


示例6: testFunctionalDenseTwiceReuse

 def testFunctionalDenseTwiceReuse(self):
   inputs = random_ops.random_uniform((5, 3), seed=1)
   core_layers.dense(inputs, 2, name='my_dense')
   vars1 = variables.trainable_variables()
   core_layers.dense(inputs, 2, name='my_dense', reuse=True)
   vars2 = variables.trainable_variables()
   self.assertEqual(vars1, vars2)
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:7,代码来源:core_test.py


示例7: testFunctionalConv2DReuse

 def testFunctionalConv2DReuse(self):
   height, width = 7, 9
   images = random_ops.random_uniform((5, height, width, 3), seed=1)
   conv_layers.conv2d(images, 32, [3, 3], name='conv1')
   self.assertEqual(len(variables.trainable_variables()), 2)
   conv_layers.conv2d(images, 32, [3, 3], name='conv1', reuse=True)
   self.assertEqual(len(variables.trainable_variables()), 2)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:7,代码来源:convolutional_test.py


示例8: testFunctionalDenseTwice

 def testFunctionalDenseTwice(self):
   inputs = random_ops.random_uniform((5, 3), seed=1)
   core_layers.dense(inputs, 2)
   vars1 = variables.trainable_variables()
   core_layers.dense(inputs, 2)
   vars2 = variables.trainable_variables()
   self.assertEqual(len(vars1), 2)
   self.assertEqual(len(vars2), 4)
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:8,代码来源:core_test.py


示例9: _CheckDecay

  def _CheckDecay(self, ema, actual_decay, dim):
    tens = _Repeat(10.0, dim)
    thirties = _Repeat(30.0, dim)
    var0 = variables.Variable(tens, name="v0")
    var1 = variables.Variable(thirties, name="v1")
    variables.initialize_all_variables().run()
    # Note that tensor2 is not a Variable but just a plain Tensor resulting
    # from the sum operation.
    tensor2 = var0 + var1
    update = ema.apply([var0, var1, tensor2])
    avg0 = ema.average(var0)
    avg1 = ema.average(var1)
    avg2 = ema.average(tensor2)

    self.assertFalse(avg0 in variables.trainable_variables())
    self.assertFalse(avg1 in variables.trainable_variables())
    self.assertFalse(avg2 in variables.trainable_variables())
    variables.initialize_all_variables().run()

    self.assertEqual("v0/ExponentialMovingAverage:0", avg0.name)
    self.assertEqual("v1/ExponentialMovingAverage:0", avg1.name)
    self.assertEqual("add/ExponentialMovingAverage:0", avg2.name)

    # Check initial values.
    self.assertAllClose(tens, var0.eval())
    self.assertAllClose(thirties, var1.eval())
    self.assertAllClose(_Repeat(10.0 + 30.0, dim), tensor2.eval())

    # Check that averages are initialized correctly.
    self.assertAllClose(tens, avg0.eval())
    self.assertAllClose(thirties, avg1.eval())
    # Note that averages of Tensor's initialize to zeros_like since no value
    # of the Tensor is known because the Op has not been run (yet).
    self.assertAllClose(_Repeat(0.0, dim), avg2.eval())

    # Update the averages and check.
    update.run()
    dk = actual_decay

    expected = _Repeat(10.0 * dk + 10.0 * (1 - dk), dim)
    self.assertAllClose(expected, avg0.eval())
    expected = _Repeat(30.0 * dk + 30.0 * (1 - dk), dim)
    self.assertAllClose(expected, avg1.eval())
    expected = _Repeat(0.0 * dk + (10.0 + 30.0) * (1 - dk), dim)
    self.assertAllClose(expected, avg2.eval())

    # Again, update the averages and check.
    update.run()
    expected = _Repeat((10.0 * dk + 10.0 * (1 - dk)) * dk + 10.0 * (1 - dk),
                       dim)
    self.assertAllClose(expected, avg0.eval())
    expected = _Repeat((30.0 * dk + 30.0 * (1 - dk)) * dk + 30.0 * (1 - dk),
                       dim)
    self.assertAllClose(expected, avg1.eval())
    expected = _Repeat(((0.0 * dk + (10.0 + 30.0) * (1 - dk)) * dk +
                        (10.0 + 30.0) * (1 - dk)),
                       dim)
    self.assertAllClose(expected, avg2.eval())
开发者ID:ray2020,项目名称:tensorflow,代码行数:58,代码来源:moving_averages_test.py


示例10: testFunctionalConv3DTransposeReuseFromScope

 def testFunctionalConv3DTransposeReuseFromScope(self):
   with variable_scope.variable_scope('scope'):
     depth, height, width = 5, 7, 9
     volumes = random_ops.random_uniform((5, depth, height, width, 32), seed=1)
     conv_layers.conv3d_transpose(volumes, 4, [3, 3, 3], name='deconv1')
     self.assertEqual(len(variables.trainable_variables()), 2)
   with variable_scope.variable_scope('scope', reuse=True):
     conv_layers.conv3d_transpose(volumes, 4, [3, 3, 3], name='deconv1')
     self.assertEqual(len(variables.trainable_variables()), 2)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:9,代码来源:convolutional_test.py


示例11: testFunctionalConv1DReuseFromScope

 def testFunctionalConv1DReuseFromScope(self):
   with variable_scope.variable_scope('scope'):
     length = 10
     data = random_ops.random_uniform((5, length, 3), seed=1)
     conv_layers.separable_conv1d(data, 32, 3, name='sepconv1')
     self.assertEqual(len(variables.trainable_variables()), 3)
   with variable_scope.variable_scope('scope', reuse=True):
     conv_layers.separable_conv1d(data, 32, 3, name='sepconv1')
     self.assertEqual(len(variables.trainable_variables()), 3)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:9,代码来源:convolutional_test.py


示例12: testFunctionalConv2DTransposeReuseFromScope

 def testFunctionalConv2DTransposeReuseFromScope(self):
   with variable_scope.variable_scope('scope'):
     height, width = 7, 9
     images = random_ops.random_uniform((5, height, width, 3), seed=1)
     conv_layers.conv2d_transpose(images, 32, [3, 3], name='deconv1')
     self.assertEqual(len(variables.trainable_variables()), 2)
   with variable_scope.variable_scope('scope', reuse=True):
     conv_layers.conv2d_transpose(images, 32, [3, 3], name='deconv1')
     self.assertEqual(len(variables.trainable_variables()), 2)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:9,代码来源:convolutional_test.py


示例13: testTimeReversedFusedRNN

  def testTimeReversedFusedRNN(self):
    with self.test_session() as sess:
      initializer = init_ops.random_uniform_initializer(
          -0.01, 0.01, seed=19890213)
      fw_cell = core_rnn_cell_impl.BasicRNNCell(10)
      bw_cell = core_rnn_cell_impl.BasicRNNCell(10)
      batch_size = 5
      input_size = 20
      timelen = 15
      inputs = constant_op.constant(
          np.random.randn(timelen, batch_size, input_size))

      # test bi-directional rnn
      with variable_scope.variable_scope("basic", initializer=initializer):
        unpacked_inputs = array_ops.unstack(inputs)
        outputs, fw_state, bw_state = core_rnn.static_bidirectional_rnn(
            fw_cell, bw_cell, unpacked_inputs, dtype=dtypes.float64)
        packed_outputs = array_ops.stack(outputs)
        basic_vars = [
            v for v in variables.trainable_variables()
            if v.name.startswith("basic/")
        ]
        sess.run([variables.global_variables_initializer()])
        basic_outputs, basic_fw_state, basic_bw_state = sess.run(
            [packed_outputs, fw_state, bw_state])
        basic_grads = sess.run(gradients_impl.gradients(packed_outputs, inputs))
        basic_wgrads = sess.run(
            gradients_impl.gradients(packed_outputs, basic_vars))

      with variable_scope.variable_scope("fused", initializer=initializer):
        fused_cell = fused_rnn_cell.FusedRNNCellAdaptor(
            core_rnn_cell_impl.BasicRNNCell(10))
        fused_bw_cell = fused_rnn_cell.TimeReversedFusedRNN(
            fused_rnn_cell.FusedRNNCellAdaptor(
                core_rnn_cell_impl.BasicRNNCell(10)))
        fw_outputs, fw_state = fused_cell(
            inputs, dtype=dtypes.float64, scope="fw")
        bw_outputs, bw_state = fused_bw_cell(
            inputs, dtype=dtypes.float64, scope="bw")
        outputs = array_ops.concat([fw_outputs, bw_outputs], 2)
        fused_vars = [
            v for v in variables.trainable_variables()
            if v.name.startswith("fused/")
        ]
        sess.run([variables.global_variables_initializer()])
        fused_outputs, fused_fw_state, fused_bw_state = sess.run(
            [outputs, fw_state, bw_state])
        fused_grads = sess.run(gradients_impl.gradients(outputs, inputs))
        fused_wgrads = sess.run(gradients_impl.gradients(outputs, fused_vars))

      self.assertAllClose(basic_outputs, fused_outputs)
      self.assertAllClose(basic_fw_state, fused_fw_state)
      self.assertAllClose(basic_bw_state, fused_bw_state)
      self.assertAllClose(basic_grads, fused_grads)
      for basic, fused in zip(basic_wgrads, fused_wgrads):
        self.assertAllClose(basic, fused, rtol=1e-2, atol=1e-2)
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:56,代码来源:fused_rnn_cell_test.py


示例14: _rnn_get_variable

 def _rnn_get_variable(self, getter, *args, **kwargs):
   variable = getter(*args, **kwargs)
   trainable = (variable in tf_variables.trainable_variables() or
                (isinstance(variable, tf_variables.PartitionedVariable) and
                 list(variable)[0] in tf_variables.trainable_variables()))
   if trainable and variable not in self._trainable_weights:
     self._trainable_weights.append(variable)
   elif not trainable and variable not in self._non_trainable_weights:
     self._non_trainable_weights.append(variable)
   return variable
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:10,代码来源:rnn_cell_impl.py


示例15: testFunctionalDenseTwiceReuseFromScope

 def testFunctionalDenseTwiceReuseFromScope(self):
   with self.test_session():
     with variable_scope.variable_scope('scope'):
       inputs = random_ops.random_uniform((5, 3), seed=1)
       core_layers.dense(inputs, 2, name='my_dense')
       vars1 = variables.trainable_variables()
     with variable_scope.variable_scope('scope', reuse=True):
       core_layers.dense(inputs, 2, name='my_dense')
       vars2 = variables.trainable_variables()
     self.assertEqual(vars1, vars2)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:10,代码来源:core_test.py


示例16: compute_gradients

  def compute_gradients(self,
                        loss,
                        var_list=None,
                        gate_gradients=optimizer.Optimizer.GATE_OP,
                        aggregation_method=None,
                        colocate_gradients_with_ops=False,
                        grad_loss=None):
    """Compute gradients of `loss` for the variables in `var_list`.

    Add rho*elastic_difference to loss to control the exploration
    This is the first part of `minimize()`.  It returns a list
    of (gradient, variable) pairs where "gradient" is the gradient
    for "variable".  Note that "gradient" can be a `Tensor`, an
    `IndexedSlices`, or `None` if there is no gradient for the
    given variable.

    Args:
      loss: A Tensor containing the value to minimize.
      var_list: Optional list or tuple of `tf.Variable` to update to minimize
        `loss`.  Defaults to the list of variables collected in the graph under
        the key `GraphKey.TRAINABLE_VARIABLES`.
      gate_gradients: How to gate the computation of gradients.  Can be
        `GATE_NONE`, `GATE_OP`, or `GATE_GRAPH`.
      aggregation_method: Specifies the method used to combine gradient terms.
        Valid values are defined in the class `AggregationMethod`.
      colocate_gradients_with_ops: If True, try colocating gradients with the
        corresponding op.
      grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.

    Returns:
      A list of (gradient, variable) pairs. Variable is always present, but
      gradient can be `None`.

    Raises:
      TypeError: If `var_list` contains anything else than `Variable` objects.
      ValueError: If some arguments are invalid.
    """
    if not var_list:
      var_list = variables.trainable_variables()

    elastic_difference = [
        math_ops.subtract(v, lv)
        for v, lv in zip(variables.trainable_variables(),
                         [self._local_map[var] for var in var_list])
    ]

    distance_loss = self._rho * math_ops.add_n(
        [gen_nn_ops.l2_loss(ed) for ed in elastic_difference])

    total_loss = loss + distance_loss
    return self._opt.compute_gradients(total_loss, var_list, gate_gradients,
                                       aggregation_method,
                                       colocate_gradients_with_ops, grad_loss)
开发者ID:ahmedsaiduk,项目名称:tensorflow,代码行数:53,代码来源:elastic_average_optimizer.py


示例17: _rnn_get_variable

 def _rnn_get_variable(self, getter, *args, **kwargs):
   variable = getter(*args, **kwargs)
   if context.in_graph_mode():
     trainable = (variable in tf_variables.trainable_variables() or
                  (isinstance(variable, tf_variables.PartitionedVariable) and
                   list(variable)[0] in tf_variables.trainable_variables()))
   else:
     trainable = variable._trainable  # pylint: disable=protected-access
   if trainable and variable not in self._trainable_weights:
     self._trainable_weights.append(variable)
   elif not trainable and variable not in self._non_trainable_weights:
     self._non_trainable_weights.append(variable)
   return variable
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:13,代码来源:rnn_cell_impl.py


示例18: testCollectionsWithScope

  def testCollectionsWithScope(self):
    with self.cached_session():
      with ops.name_scope("scope_1"):
        var_x = variables.VariableV1(2.0)
      with ops.name_scope("scope_2"):
        var_y = variables.VariableV1(2.0)

      self.assertEqual([var_x, var_y], variables.global_variables())
      self.assertEqual([var_x], variables.global_variables("scope_1"))
      self.assertEqual([var_y], variables.global_variables("scope_2"))

      self.assertEqual([var_x, var_y], variables.trainable_variables())
      self.assertEqual([var_x], variables.trainable_variables("scope_1"))
      self.assertEqual([var_y], variables.trainable_variables("scope_2"))
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:14,代码来源:variables_test.py


示例19: testLayerBasic

  def testLayerBasic(self):
    num_layers = 4
    num_units = 2
    batch_size = 8
    direction = CUDNN_RNN_UNIDIRECTION
    dir_count = 1

    with vs.variable_scope("main"):
      kernel_initializer = init_ops.constant_initializer(0.)
      bias_initializer = init_ops.constant_initializer(0.)
      inputs = random_ops.random_uniform([
          num_layers * dir_count, batch_size, num_units], dtype=dtypes.float32)

      lstm = cudnn_rnn.CudnnLSTM(num_layers, num_units,
                                 direction=direction,
                                 kernel_initializer=kernel_initializer,
                                 bias_initializer=bias_initializer,
                                 name="awesome_lstm")

      # Build the layer
      outputs1, _ = lstm(inputs)
      # Reuse the layer
      outputs2, _ = lstm(inputs)

      total_sum1 = math_ops.reduce_sum(outputs1)
      total_sum2 = math_ops.reduce_sum(outputs2)

    with vs.variable_scope("main", reuse=True):
      lstm = cudnn_rnn.CudnnLSTM(num_layers, num_units,
                                 direction=direction,
                                 kernel_initializer=kernel_initializer,
                                 bias_initializer=bias_initializer,
                                 name="awesome_lstm")

      # Reuse the layer
      outputs3, _ = lstm(inputs)
      total_sum3 = math_ops.reduce_sum(outputs3)

    self.assertEqual(1, len(variables.trainable_variables()))
    self.assertEqual(1, len(ops.get_collection(ops.GraphKeys.SAVEABLE_OBJECTS)))
    self.assertEqual("main/awesome_lstm/opaque_kernel",
                     variables.trainable_variables()[0].op.name)

    with self.test_session(use_gpu=True) as sess:
      sess.run(variables.global_variables_initializer())
      (total_sum1_v, total_sum2_v, total_sum3_v) = sess.run(
          [total_sum1, total_sum2, total_sum3])
      self.assertEqual(0, total_sum1_v)
      self.assertEqual(0, total_sum2_v)
      self.assertEqual(0, total_sum3_v)
开发者ID:AnddyWang,项目名称:tensorflow,代码行数:50,代码来源:cudnn_rnn_test.py


示例20: _create_multi_lstm_cell_ops

def _create_multi_lstm_cell_ops(batch_size, num_units, input_depth,
                                num_layers, max_time, compiled):
  with variable_scope.variable_scope(
      "root",
      initializer=init_ops.random_uniform_initializer(-0.1, 0.1, seed=2)):
    inputs = variable_scope.get_variable(
        "inputs", initializer=random_ops.random_uniform(
            (max_time, batch_size, input_depth), seed=1))
    maybe_xla = lambda c: rnn_cell.CompiledWrapper(c) if compiled else c
    cell = core_rnn_cell_impl.MultiRNNCell(
        [maybe_xla(core_rnn_cell_impl.LSTMCell(num_units))
         for _ in range(num_layers)])
    initial_state = cell.zero_state(
        batch_size=batch_size, dtype=dtypes.float32)
    outputs, final_state = rnn.dynamic_rnn(
        cell=cell, inputs=inputs, initial_state=initial_state,
        time_major=True)
    flat_final_state = nest.flatten(final_state)
    trainable_variables = variables.trainable_variables()
    outputs_grad = gradients_impl.gradients(
        [outputs],
        trainable_variables + [inputs] + nest.flatten(initial_state))
    final_state_grad = gradients_impl.gradients(
        flat_final_state,
        trainable_variables + [inputs] + nest.flatten(initial_state))

    return {"outputs": outputs,
            "final_state": flat_final_state,
            "outputs_grad": outputs_grad,
            "final_state_grad": final_state_grad}
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:30,代码来源:rnn_cell_test.py



注:本文中的tensorflow.python.ops.variables.trainable_variables函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap