• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python variables.global_variables_initializer函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.variables.global_variables_initializer函数的典型用法代码示例。如果您正苦于以下问题:Python global_variables_initializer函数的具体用法?Python global_variables_initializer怎么用?Python global_variables_initializer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了global_variables_initializer函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: testFractionalExampleLabel

  def testFractionalExampleLabel(self):
    # Setup test data with 1 positive, and 1 mostly-negative example.
    example_protos = [
        make_example_proto({
            'age': [0],
            'gender': [0]
        }, 0.1),
        make_example_proto({
            'age': [1],
            'gender': [1]
        }, 1),
    ]
    example_weights = [1.0, 1.0]
    for num_shards in _SHARD_NUMBERS:
      with self._single_threaded_test_session():
        examples = make_example_dict(example_protos, example_weights)
        variables = make_variable_dict(1, 1)
        options = dict(
            symmetric_l2_regularization=1,
            symmetric_l1_regularization=0,
            num_table_shards=num_shards,
            loss_type='logistic_loss')

        lr = SdcaModel(examples, variables, options)
        variables_lib.global_variables_initializer().run()
        with self.assertRaisesOpError(
            'Only labels of 0.0 or 1.0 are supported right now.'):
          lr.minimize().run()
开发者ID:Immexxx,项目名称:tensorflow,代码行数:28,代码来源:sdca_ops_test.py


示例2: testLoad

  def testLoad(self):
    with self.cached_session():
      var = variables.Variable(np.zeros((5, 5), np.float32))
      variables.global_variables_initializer().run()
      var.load(np.ones((5, 5), np.float32))

      self.assertAllClose(np.ones((5, 5), np.float32), self.evaluate(var))
开发者ID:aeverall,项目名称:tensorflow,代码行数:7,代码来源:variables_test.py


示例3: testCondNested

  def testCondNested(self):
    with context.graph_mode(), self.test_session():
      v = resource_variable_ops.ResourceVariable(1.0)
      variables.global_variables_initializer().run()
      p = array_ops.placeholder(dtype=dtypes.bool)
      q = array_ops.placeholder(dtype=dtypes.bool)
      with function.AutomaticControlDependencies() as c:

        def true_fn():
          v.assign(v + 1, name='true')
          return 1.0

        def false_fn():

          def inner_true_fn():
            v.assign(v * 2, name='false_true')
            return 2.0

          def inner_false_fn():
            v.assign(v * 3, name='false_false')
            return 3.0

          control_flow_ops.cond(q, inner_true_fn, inner_false_fn)
          return 1.0

        control_flow_ops.cond(p, true_fn, false_fn)
        with ops.name_scope('final'):
          val = v.read_value()
        val = c.mark_as_return(val)
      self.assertAllEqual(val.eval(feed_dict={p: False, q: False}), 3.0)
      self.assertAllEqual(val.eval(feed_dict={p: False, q: True}), 6.0)
      self.assertAllEqual(val.eval(feed_dict={p: True, q: True}), 7.0)
      self.assertAllEqual(val.eval(feed_dict={p: True, q: False}), 8.0)
开发者ID:StephenOman,项目名称:tensorflow,代码行数:33,代码来源:function_test.py


示例4: testSharing

  def testSharing(self):
    for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
      with self.test_session():
        var0 = variables.Variable([1.0, 2.0], dtype=dtype)
        var1 = variables.Variable([3.0, 4.0], dtype=dtype)
        grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
        grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
        ada_opt = adagrad.AdagradOptimizer(3.0)
        # Apply the optimizer twice.  Both applications will use
        # the same accums.
        ada_update1 = ada_opt.apply_gradients(
            zip([grads0, grads1], [var0, var1]))
        ada_update2 = ada_opt.apply_gradients(
            zip([grads0, grads1], [var0, var1]))
        self.assertEqual(["accumulator"], ada_opt.get_slot_names())
        slot0 = ada_opt.get_slot(var0, "accumulator")
        self.assertEquals(slot0.get_shape(), var0.get_shape())
        slot1 = ada_opt.get_slot(var1, "accumulator")
        self.assertEquals(slot1.get_shape(), var1.get_shape())
        variables.global_variables_initializer().run()

        # Fetch params to validate initial values.
        self.assertAllClose([1.0, 2.0], var0.eval())
        self.assertAllClose([3.0, 4.0], var1.eval())
        # Mix the first and the second adagrad for 3 steps.
        ada_update1.run()
        ada_update2.run()
        ada_update1.run()
        # Validate updated params (the same as with only 1 Adagrad).
        self.assertAllCloseAccordingToType(
            np.array([-1.6026098728179932, -0.6026098728179932]), var0.eval())
        self.assertAllCloseAccordingToType(
            np.array([2.715679168701172, 3.715679168701172]), var1.eval())
开发者ID:Huoxubeiyin,项目名称:tensorflow,代码行数:33,代码来源:adagrad_test.py


示例5: _countUpToTest

  def _countUpToTest(self, dtype):
    with self.cached_session():
      zero = constant_op.constant(0, dtype=dtype)
      var = variables.Variable(zero)
      count_up_to = var.count_up_to(3)

      variables.global_variables_initializer().run()
      self.assertEqual(0, self.evaluate(var))

      self.assertEqual(0, self.evaluate(count_up_to))
      self.assertEqual(1, self.evaluate(var))

      self.assertEqual(1, self.evaluate(count_up_to))
      self.assertEqual(2, self.evaluate(var))

      self.assertEqual(2, self.evaluate(count_up_to))
      self.assertEqual(3, self.evaluate(var))

      with self.assertRaisesOpError("Reached limit of 3"):
        self.evaluate(count_up_to)
      self.assertEqual(3, self.evaluate(var))

      with self.assertRaisesOpError("Reached limit of 3"):
        self.evaluate(count_up_to)
      self.assertEqual(3, self.evaluate(var))
开发者ID:aeverall,项目名称:tensorflow,代码行数:25,代码来源:variables_test.py


示例6: benchmarkMatrixExponentialOp

  def benchmarkMatrixExponentialOp(self):
    for shape in self.shapes:
      with ops.Graph().as_default(), \
          session.Session() as sess, \
          ops.device("/cpu:0"):
        matrix = self._GenerateMatrix(shape)
        expm = linalg_impl.matrix_exponential(matrix)
        variables.global_variables_initializer().run()
        self.run_op_benchmark(
            sess,
            control_flow_ops.group(expm),
            min_iters=25,
            name="matrix_exponential_cpu_{shape}".format(
                shape=shape))

      if test.is_gpu_available(True):
        with ops.Graph().as_default(), \
            session.Session() as sess, \
            ops.device("/gpu:0"):
          matrix = self._GenerateMatrix(shape)
          expm = linalg_impl.matrix_exponential(matrix)
          variables.global_variables_initializer().run()
          self.run_op_benchmark(
              sess,
              control_flow_ops.group(expm),
              min_iters=25,
              name="matrix_exponential_gpu_{shape}".format(
                  shape=shape))
开发者ID:AnishShah,项目名称:tensorflow,代码行数:28,代码来源:matrix_exponential_op_test.py


示例7: testTensorLearningRate

  def testTensorLearningRate(self):
    for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
      with self.cached_session():
        # Initialize variables for numpy implementation.
        m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
        var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
        grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
        var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
        grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)

        var0 = variables.Variable(var0_np)
        var1 = variables.Variable(var1_np)
        grads0 = constant_op.constant(grads0_np)
        grads1 = constant_op.constant(grads1_np)
        opt = adamax.Adamax(constant_op.constant(0.001))
        update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
        variables.global_variables_initializer().run()

        # Fetch params to validate initial values
        self.assertAllClose([1.0, 2.0], var0.eval())
        self.assertAllClose([3.0, 4.0], var1.eval())

        beta1_power = get_beta_accumulators(opt, dtype)

        # Run 3 steps of Adamax
        for t in range(3):
          self.assertAllCloseAccordingToType(0.9**(t + 1), beta1_power.eval())
          update.run()

          var0_np, m0, v0 = adamax_update_numpy(var0_np, grads0_np, t, m0, v0)
          var1_np, m1, v1 = adamax_update_numpy(var1_np, grads1_np, t, m1, v1)

          # Validate updated params
          self.assertAllCloseAccordingToType(var0_np, var0.eval())
          self.assertAllCloseAccordingToType(var1_np, var1.eval())
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:35,代码来源:adamax_test.py


示例8: testDenseFeaturesWeightedExamples

  def testDenseFeaturesWeightedExamples(self):
    with self._single_threaded_test_session():
      examples, variables = make_dense_examples_and_variables_dicts(
          dense_features_values=[[[1.0], [1.0]], [[0.5], [-0.5]]],
          weights=[3.0, 1.0],
          labels=[1.0, 0.0])
      options = dict(
          symmetric_l2_regularization=1.0,
          symmetric_l1_regularization=0,
          loss_type='hinge_loss')
      model = SdcaModel(examples, variables, options)
      variables_lib.global_variables_initializer().run()
      predictions = model.predictions(examples)
      binary_predictions = get_binary_predictions_for_hinge(predictions)
      train_op = model.minimize()
      for _ in range(_MAX_ITERATIONS):
        train_op.run()
      model.update_weights(train_op).run()

      # Point (1.0, 0.5) has higher weight than (1.0, -0.5) so the model will
      # try to increase the margin from (1.0, 0.5). Due to regularization,
      # (1.0, -0.5) will be within the margin. For these points and example
      # weights, the optimal weights are w_1~=0.4 and w_2~=1.2 which give an L2
      # loss of 0.5 * 0.25 * 0.25 * 1.6 = 0.2. The binary predictions will be
      # correct, but the boundary will be much closer to the 2nd point than the
      # first one.
      self.assertAllClose([1.0, -0.2], predictions.eval(), atol=0.05)
      self.assertAllEqual([1, 0], binary_predictions.eval())
      unregularized_loss = model.unregularized_loss(examples)
      regularized_loss = model.regularized_loss(examples)
      self.assertAllClose(0.2, unregularized_loss.eval(), atol=0.02)
      self.assertAllClose(0.4, regularized_loss.eval(), atol=0.02)
开发者ID:Immexxx,项目名称:tensorflow,代码行数:32,代码来源:sdca_ops_test.py


示例9: testMetaGraphSaveLoad

  def testMetaGraphSaveLoad(self):
    save_prefix = os.path.join(self.get_temp_dir(), "ckpt")
    save_graph = ops.Graph()
    with save_graph.as_default(), self.test_session(
        graph=save_graph) as session:
      partitioner = partitioned_variables.fixed_size_partitioner(5, axis=0)
      with variable_scope.variable_scope("root", partitioner=partitioner):
        v0 = variable_scope.get_variable(
            "v0", dtype=dtypes.float32, shape=(10, 10))
        v0_list = v0._get_variable_list()
        v0_part = v0._get_partitions()
        self.assertEqual(len(v0_list), 5)
        self.assertAllEqual(v0_part, (5, 1))
        variables.global_variables_initializer().run()

        save_graph.get_collection_ref("partvar").append(v0)
        saver = saver_lib.Saver()
        save_graph.finalize()
        save_path = saver.save(sess=session, save_path=save_prefix)
        previous_value = session.run(
            save_graph.get_tensor_by_name(v0.name + ":0"))

    restore_graph = ops.Graph()
    with restore_graph.as_default(), self.test_session(
        graph=restore_graph) as session:
      saver = saver_lib.import_meta_graph(save_path + ".meta")
      saver.restore(sess=session, save_path=save_path)
      v0, = save_graph.get_collection_ref("partvar")
      self.assertIsInstance(v0, variables.PartitionedVariable)
      self.assertAllEqual(
          previous_value,
          session.run(restore_graph.get_tensor_by_name(v0.name + ":0")))
开发者ID:AnishShah,项目名称:tensorflow,代码行数:32,代码来源:partitioned_variables_test.py


示例10: testDenseFeaturesPerfectlySeparable

  def testDenseFeaturesPerfectlySeparable(self):
    with self._single_threaded_test_session():
      examples, variables = make_dense_examples_and_variables_dicts(
          dense_features_values=[[1.0, 1.0], [1.0, -1.0]],
          weights=[1.0, 1.0],
          labels=[1.0, 0.0])
      options = dict(
          symmetric_l2_regularization=1.0,
          symmetric_l1_regularization=0,
          loss_type='hinge_loss')
      model = SdcaModel(examples, variables, options)
      variables_lib.global_variables_initializer().run()
      predictions = model.predictions(examples)
      binary_predictions = get_binary_predictions_for_hinge(predictions)

      train_op = model.minimize()
      for _ in range(_MAX_ITERATIONS):
        train_op.run()
      model.update_weights(train_op).run()

      self.assertAllClose([1.0, -1.0], predictions.eval(), atol=0.05)
      self.assertAllEqual([1, 0], binary_predictions.eval())

      # (1.0, 1.0) and (1.0, -1.0) are perfectly separable by x-axis (that is,
      # the SVM's functional margin >=1), so the unregularized loss is ~0.0.
      # There is only loss due to l2-regularization. For these datapoints, it
      # turns out that w_1~=0.0 and w_2~=1.0 which means that l2 loss is ~0.25.
      unregularized_loss = model.unregularized_loss(examples)
      regularized_loss = model.regularized_loss(examples)
      self.assertAllClose(0.0, unregularized_loss.eval(), atol=0.02)
      self.assertAllClose(0.25, regularized_loss.eval(), atol=0.02)
开发者ID:Immexxx,项目名称:tensorflow,代码行数:31,代码来源:sdca_ops_test.py


示例11: testDenseFeaturesSeparableWithinMargins

  def testDenseFeaturesSeparableWithinMargins(self):
    with self._single_threaded_test_session():
      examples, variables = make_dense_examples_and_variables_dicts(
          dense_features_values=[[[1.0, 0.5], [1.0, -0.5]]],
          weights=[1.0, 1.0],
          labels=[1.0, 0.0])
      options = dict(
          symmetric_l2_regularization=1.0,
          symmetric_l1_regularization=0,
          loss_type='hinge_loss')
      model = SdcaModel(examples, variables, options)
      variables_lib.global_variables_initializer().run()
      predictions = model.predictions(examples)
      binary_predictions = get_binary_predictions_for_hinge(predictions)

      train_op = model.minimize()
      for _ in range(_MAX_ITERATIONS):
        train_op.run()
      model.update_weights(train_op).run()

      # (1.0, 0.5) and (1.0, -0.5) are separable by x-axis but the datapoints
      # are within the margins so there is unregularized loss (1/2 per example).
      # For these datapoints, optimal weights are w_1~=0.0 and w_2~=1.0 which
      # gives an L2 loss of ~0.25.
      self.assertAllClose([0.5, -0.5], predictions.eval(), rtol=0.05)
      self.assertAllEqual([1, 0], binary_predictions.eval())
      unregularized_loss = model.unregularized_loss(examples)
      regularized_loss = model.regularized_loss(examples)
      self.assertAllClose(0.5, unregularized_loss.eval(), atol=0.02)
      self.assertAllClose(0.75, regularized_loss.eval(), atol=0.02)
开发者ID:Immexxx,项目名称:tensorflow,代码行数:30,代码来源:sdca_ops_test.py


示例12: testDenseFeaturesWithArbitraryWeights

  def testDenseFeaturesWithArbitraryWeights(self):
    with self._single_threaded_test_session():
      examples, variables = make_dense_examples_and_variables_dicts(
          dense_features_values=[[[1.0, 0.0], [0.0, 1.0]]],
          weights=[20.0, 10.0],
          labels=[10.0, -5.0])
      options = dict(
          symmetric_l2_regularization=5.0,
          symmetric_l1_regularization=0,
          loss_type='squared_loss')
      lr = SdcaModel(examples, variables, options)
      variables_lib.global_variables_initializer().run()
      predictions = lr.predictions(examples)

      train_op = lr.minimize()
      for _ in range(_MAX_ITERATIONS):
        train_op.run()
      lr.update_weights(train_op).run()

      # The loss function for these particular features is given by:
      # 1/2 s_1 (label_1-w_1)^2 + 1/2 s_2(label_2-w_2)^2 +
      # \lambda/2 (w_1^2 + w_2^2) where s_1, s_2 are the *example weights. It
      # turns out that the optimal (variable) weights are given by:
      # w_1* = label_1 \cdot s_1/(\lambda + s_1)= 8.0 and
      # w_2* =label_2 \cdot s_2/(\lambda + s_2)= -10/3.
      # In this case the (unnormalized regularized) loss will be:
      # s_1/2(8-10)^2 + s_2/2(5-10/3)^2 + 5.0/2(8^2 + (10/3)^2) = 2175.0/9. The
      # actual loss should be further normalized by the sum of example weights.
      self.assertAllClose([8.0, -10.0 / 3], predictions.eval(), rtol=0.01)
      loss = lr.regularized_loss(examples)
      self.assertAllClose(2175.0 / 270.0, loss.eval(), atol=0.01)
开发者ID:Immexxx,项目名称:tensorflow,代码行数:31,代码来源:sdca_ops_test.py


示例13: testDenseFeaturesWithDefaultWeights

  def testDenseFeaturesWithDefaultWeights(self):
    with self._single_threaded_test_session():
      examples, variables = make_dense_examples_and_variables_dicts(
          dense_features_values=[[[1.0], [0.0]], [0.0, 1.0]],
          weights=[1.0, 1.0],
          labels=[10.0, -5.0])
      options = dict(
          symmetric_l2_regularization=1.0,
          symmetric_l1_regularization=0,
          loss_type='squared_loss')
      lr = SdcaModel(examples, variables, options)
      variables_lib.global_variables_initializer().run()
      predictions = lr.predictions(examples)

      train_op = lr.minimize()
      for _ in range(_MAX_ITERATIONS):
        train_op.run()
      lr.update_weights(train_op).run()

      # The loss function for these particular features is given by:
      # 1/2(label_1-w_1)^2 + 1/2(label_2-w_2)^2 + \lambda/2 (w_1^2 + w_2^2). So,
      # differentiating wrt to w_1, w_2 yields the following optimal values:
      # w_1* = label_1/(\lambda + 1)= 10/2, w_2* =label_2/(\lambda + 1)= -5/2.
      # In this case the (unnormalized regularized) loss will be:
      # 1/2(10-5)^2 + 1/2(5-5/2)^2 + 1/2(5^2 + (5/2)^2) = 125.0/4. The actual
      # loss should be further normalized by the sum of example weights.
      self.assertAllClose([5.0, -2.5], predictions.eval(), rtol=0.01)
      loss = lr.regularized_loss(examples)
      self.assertAllClose(125.0 / 8.0, loss.eval(), atol=0.01)
开发者ID:Immexxx,项目名称:tensorflow,代码行数:29,代码来源:sdca_ops_test.py


示例14: testOutOfRangeSparseFeatures

  def testOutOfRangeSparseFeatures(self):
    # Setup test data
    example_protos = [
        make_example_proto({
            'age': [0],
            'gender': [0]
        }, 0),
        make_example_proto({
            'age': [1],
            'gender': [1]
        }, 1),
    ]
    example_weights = [1.0, 1.0]
    with self._single_threaded_test_session():
      examples = make_example_dict(example_protos, example_weights)
      variables = make_variable_dict(0, 0)
      options = dict(
          symmetric_l2_regularization=1,
          symmetric_l1_regularization=0,
          loss_type='logistic_loss')

      lr = SdcaModel(examples, variables, options)
      variables_lib.global_variables_initializer().run()
      train_op = lr.minimize()
      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
                                   'indices.*'):
        train_op.run()
开发者ID:Immexxx,项目名称:tensorflow,代码行数:27,代码来源:sdca_ops_test.py


示例15: testAdaptiveLearningRate

  def testAdaptiveLearningRate(self):
    for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
      var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
      var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)

      def loss():
        return 5 * var0 + 3 * var1  # pylint: disable=cell-var-from-loop

      sgd = gradient_descent.SGD(1.0)

      self.evaluate(variables.global_variables_initializer())
      # Fetch params to validate initial values
      self.assertAllClose([1.0, 2.0], self.evaluate(var0))
      self.assertAllClose([3.0, 4.0], self.evaluate(var1))
      # Run 1 step of sgd through optimizer
      opt_op = sgd.minimize(loss, [var0, var1])
      self.evaluate(variables.global_variables_initializer())
      self.evaluate(opt_op)
      # Validate updated params
      # var0 = [1., 2.] - 1.0 * [5, 5]
      self.assertAllClose([-4., -3.], self.evaluate(var0))
      # var1 = [3., 4.] - 1.0 * [3, 3]
      self.assertAllClose([0., 1.], self.evaluate(var1))

      sgd.learning_rate = 0.5
      if context.executing_eagerly():
        sgd.minimize(loss, [var0, var1])
      else:
        self.evaluate(opt_op)
      # Validate updated params
      # var0 = [-4., -3.] - 0.5 * [5, 5]
      self.assertAllClose([-6.5, -5.5], self.evaluate(var0))
      # var1 = [0., 1.] - 0.5 * [3, 3]
      self.assertAllClose([-1.5, -0.5], self.evaluate(var1))
开发者ID:abhinav-upadhyay,项目名称:tensorflow,代码行数:34,代码来源:optimizer_v2_test.py


示例16: testConfig

 def testConfig(self):
   opt = gradient_descent.SGD(learning_rate=1.0, momentum=0.9, nesterov=True)
   config = opt.get_config()
   opt2 = gradient_descent.SGD.from_config(config)
   lr = opt.lr
   lr2 = opt2.lr
   self.evaluate(variables.global_variables_initializer())
   self.assertAllClose(self.evaluate(lr), self.evaluate(lr2))
   self.assertAllClose(
       self.evaluate(opt._get_hyper("momentum")),
       self.evaluate(opt2._get_hyper("momentum")))
   self.assertAllClose(
       self.evaluate(opt._get_hyper("decay")),
       self.evaluate(opt2._get_hyper("decay")))
   var0 = variables.Variable([[1.0], [2.0]], dtype=dtypes.float32)
   loss = lambda: 3 * var0
   # learning rate variable created when calling minimize.
   opt.minimize(loss, [var0])
   self.evaluate(variables.global_variables_initializer())
   config = opt.get_config()
   opt3 = gradient_descent.SGD.from_config(config)
   lr3 = opt3.lr
   self.evaluate(variables.global_variables_initializer())
   self.assertAllClose(self.evaluate(lr), self.evaluate(lr3))
   self.assertAllClose(
       self.evaluate(opt._get_hyper("momentum")),
       self.evaluate(opt3._get_hyper("momentum")))
   self.assertAllClose(
       self.evaluate(opt._get_hyper("decay")),
       self.evaluate(opt3._get_hyper("decay")))
   self.assertTrue(opt3.nesterov)
开发者ID:kylin9872,项目名称:tensorflow,代码行数:31,代码来源:gradient_descent_test.py


示例17: testGRUCell

 def testGRUCell(self):
   with self.test_session() as sess:
     with variable_scope.variable_scope(
         "root", initializer=init_ops.constant_initializer(0.5)):
       x = array_ops.zeros([1, 2])
       m = array_ops.zeros([1, 2])
       g, _ = rnn_cell_impl.GRUCell(2)(x, m)
       sess.run([variables_lib.global_variables_initializer()])
       res = sess.run(
           [g], {x.name: np.array([[1., 1.]]),
                 m.name: np.array([[0.1, 0.1]])})
       # Smoke test
       self.assertAllClose(res[0], [[0.175991, 0.175991]])
     with variable_scope.variable_scope(
         "other", initializer=init_ops.constant_initializer(0.5)):
       x = array_ops.zeros(
           [1, 3])  # Test GRUCell with input_size != num_units.
       m = array_ops.zeros([1, 2])
       g, _ = rnn_cell_impl.GRUCell(2)(x, m)
       sess.run([variables_lib.global_variables_initializer()])
       res = sess.run(
           [g],
           {x.name: np.array([[1., 1., 1.]]),
            m.name: np.array([[0.1, 0.1]])})
       # Smoke test
       self.assertAllClose(res[0], [[0.156736, 0.156736]])
开发者ID:ggaziv,项目名称:tensorflow,代码行数:26,代码来源:core_rnn_cell_test.py


示例18: testVariableInput

 def testVariableInput(self):
   with self.test_session():
     v = variable_scope.get_variable(
         'X', initializer=init_ops.zeros_initializer(), shape=(1, 1))
     x = core_layers.Dense(1)(v)
     variables.global_variables_initializer().run()
     self.assertAllEqual(x.eval(), [[0.0]])
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:7,代码来源:core_test.py


示例19: testConvertVariablesToConsts

  def testConvertVariablesToConsts(self):
    with ops.Graph().as_default():
      variable_node = variables.Variable(1.0, name="variable_node")
      _ = variables.Variable(1.0, name="unused_variable_node")
      output_node = math_ops_lib.multiply(
          variable_node, 2.0, name="output_node")
      with session.Session() as sess:
        init = variables.initialize_variables([variable_node])
        sess.run(init)
        output = sess.run(output_node)
        self.assertNear(2.0, output, 0.00001)
        variable_graph_def = sess.graph.as_graph_def()
        # First get the constant_graph_def when variable_names_whitelist is set,
        # note that if variable_names_whitelist is not set an error will be
        # thrown because unused_variable_node is not initialized.
        constant_graph_def = graph_util.convert_variables_to_constants(
            sess,
            variable_graph_def, ["output_node"],
            variable_names_whitelist=set(["variable_node"]))

        # Then initialize the unused variable, and get another
        # constant_graph_def when variable_names_whitelist is not set.
        sess.run(variables.global_variables_initializer())
        constant_graph_def_without_variable_whitelist = (
            graph_util.convert_variables_to_constants(sess, variable_graph_def,
                                                      ["output_node"]))

        # The unused variable should be cleared so the two graphs should be
        # equivalent.
        self.assertEqual(
            str(constant_graph_def),
            str(constant_graph_def_without_variable_whitelist))

        # Test variable name black list. This should result in the variable not
        # being a const.
        sess.run(variables.global_variables_initializer())
        constant_graph_def_with_blacklist = (
            graph_util.convert_variables_to_constants(
                sess,
                variable_graph_def, ["output_node"],
                variable_names_blacklist=set(["variable_node"])))
        variable_node = None
        for node in constant_graph_def_with_blacklist.node:
          if node.name == "variable_node":
            variable_node = node
        self.assertIsNotNone(variable_node)
        self.assertEqual(variable_node.op, "VariableV2")

    # Now we make sure the variable is now a constant, and that the graph still
    # produces the expected result.
    with ops.Graph().as_default():
      _ = importer.import_graph_def(constant_graph_def, name="")
      self.assertEqual(4, len(constant_graph_def.node))
      for node in constant_graph_def.node:
        self.assertNotEqual("Variable", node.op)
        self.assertNotEqual("VariableV2", node.op)
      with session.Session() as sess:
        output_node = sess.graph.get_tensor_by_name("output_node:0")
        output = sess.run(output_node)
        self.assertNear(2.0, output, 0.00001)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:60,代码来源:graph_util_test.py


示例20: benchmarkCholeskyOp

  def benchmarkCholeskyOp(self):
    for shape in self.shapes:
      with ops.Graph().as_default(), \
          session.Session() as sess, \
          ops.device("/cpu:0"):
        matrix = variables.Variable(self._GenerateMatrix(shape))
        l = linalg_ops.cholesky(matrix)
        variables.global_variables_initializer().run()
        self.run_op_benchmark(
            sess,
            control_flow_ops.group(
                l,),
            min_iters=25,
            name="cholesky_cpu_{shape}".format(shape=shape))

      if test.is_gpu_available(True):
        with ops.Graph().as_default(), \
            session.Session() as sess, \
            ops.device("/device:GPU:0"):
          matrix = variables.Variable(self._GenerateMatrix(shape))
          l = linalg_ops.cholesky(matrix)
          variables.global_variables_initializer().run()
          self.run_op_benchmark(
              sess,
              control_flow_ops.group(
                  l,),
              min_iters=25,
              name="cholesky_gpu_{shape}".format(shape=shape))
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:28,代码来源:cholesky_op_test.py



注:本文中的tensorflow.python.ops.variables.global_variables_initializer函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python variables.initialize_all_variables函数代码示例发布时间:2022-05-27
下一篇:
Python variables.global_variables函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap