• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python variables.get_variables_by_name函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.contrib.framework.python.ops.variables.get_variables_by_name函数的典型用法代码示例。如果您正苦于以下问题:Python get_variables_by_name函数的具体用法?Python get_variables_by_name怎么用?Python get_variables_by_name使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了get_variables_by_name函数的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: testGetVariableGivenNameScoped

 def testGetVariableGivenNameScoped(self):
   with self.test_session():
     with variable_scope.variable_scope('A'):
       a = variables_lib2.variable('a', [5])
       b = variables_lib2.variable('b', [5])
       self.assertEquals([a], variables_lib2.get_variables_by_name('a'))
       self.assertEquals([b], variables_lib2.get_variables_by_name('b'))
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:7,代码来源:variables_test.py


示例2: testEmptyUpdateOps

  def testEmptyUpdateOps(self):
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = batchnorm_classifier(tf_inputs)
      loss_ops.log_loss(tf_predictions, tf_labels)
      total_loss = loss_ops.get_total_loss()
      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = training.create_train_op(total_loss, optimizer, update_ops=[])

      moving_mean = variables_lib.get_variables_by_name('moving_mean')[0]
      moving_variance = variables_lib.get_variables_by_name('moving_variance')[
          0]

      with session_lib.Session() as sess:
        # Initialize all variables
        sess.run(variables_lib2.global_variables_initializer())
        mean, variance = sess.run([moving_mean, moving_variance])
        # After initialization moving_mean == 0 and moving_variance == 1.
        self.assertAllClose(mean, [0] * 4)
        self.assertAllClose(variance, [1] * 4)

        for _ in range(10):
          sess.run([train_op])
        mean = moving_mean.eval()
        variance = moving_variance.eval()

        # Since we skip update_ops the moving_vars are not updated.
        self.assertAllClose(mean, [0] * 4)
        self.assertAllClose(variance, [1] * 4)
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:33,代码来源:training_test.py


示例3: testCreateVariables

 def testCreateVariables(self):
   height, width = 3, 3
   images = random_ops.random_uniform((5, height, width, 3), seed=1)
   normalization.instance_norm(images, center=True, scale=True)
   beta = contrib_variables.get_variables_by_name('beta')[0]
   gamma = contrib_variables.get_variables_by_name('gamma')[0]
   self.assertEqual('InstanceNorm/beta', beta.op.name)
   self.assertEqual('InstanceNorm/gamma', gamma.op.name)
开发者ID:BhaskarNallani,项目名称:tensorflow,代码行数:8,代码来源:normalization_test.py


示例4: testReuseVariables

 def testReuseVariables(self):
   height, width = 3, 3
   images = random_ops.random_uniform((5, height, width, 3), seed=1)
   normalization.instance_norm(images, scale=True, scope='IN')
   normalization.instance_norm(images, scale=True, scope='IN', reuse=True)
   beta = contrib_variables.get_variables_by_name('beta')
   gamma = contrib_variables.get_variables_by_name('gamma')
   self.assertEqual(1, len(beta))
   self.assertEqual(1, len(gamma))
开发者ID:BhaskarNallani,项目名称:tensorflow,代码行数:9,代码来源:normalization_test.py


示例5: testCreateOpNoScaleCenter

 def testCreateOpNoScaleCenter(self):
   height, width, groups = 3, 3, 7
   images = random_ops.random_uniform(
       (5, height, width, 3*groups), dtype=dtypes.float32, seed=1)
   output = normalization.group_norm(images, groups=groups, center=False,
                                     scale=False)
   self.assertListEqual([5, height, width, 3*groups], output.shape.as_list())
   self.assertEqual(0, len(contrib_variables.get_variables_by_name('beta')))
   self.assertEqual(0, len(contrib_variables.get_variables_by_name('gamma')))
开发者ID:BhaskarNallani,项目名称:tensorflow,代码行数:9,代码来源:normalization_test.py


示例6: testGetVariableWithoutScope

 def testGetVariableWithoutScope(self):
   with self.test_session():
     a = variables_lib2.variable('a', [5])
     fooa = variables_lib2.variable('fooa', [5])
     b_a = variables_lib2.variable('B/a', [5])
     matched_variables = variables_lib2.get_variables_by_name('a')
     self.assertEquals([a, b_a], matched_variables)
     matched_variables = variables_lib2.get_variables_by_name('fooa')
     self.assertEquals([fooa], matched_variables)
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:9,代码来源:variables_test.py


示例7: testCreateVariables_NCHW

 def testCreateVariables_NCHW(self):
   height, width, groups = 3, 3, 4
   images = random_ops.random_uniform((5, 2*groups, height, width), seed=1)
   normalization.group_norm(images, groups=4,
                            channels_axis=-3, reduction_axes=(-2, -1),
                            center=True, scale=True)
   beta = contrib_variables.get_variables_by_name('beta')[0]
   gamma = contrib_variables.get_variables_by_name('gamma')[0]
   self.assertEqual('GroupNorm/beta', beta.op.name)
   self.assertEqual('GroupNorm/gamma', gamma.op.name)
开发者ID:BhaskarNallani,项目名称:tensorflow,代码行数:10,代码来源:normalization_test.py


示例8: testGetVariableWithScope

 def testGetVariableWithScope(self):
   with self.test_session():
     with variable_scope.variable_scope('A'):
       a = variables_lib2.variable('a', [5])
       fooa = variables_lib2.variable('fooa', [5])
     with variable_scope.variable_scope('B'):
       a2 = variables_lib2.variable('a', [5])
     matched_variables = variables_lib2.get_variables_by_name('a')
     self.assertEquals([a, a2], matched_variables)
     matched_variables = variables_lib2.get_variables_by_name('fooa')
     self.assertEquals([fooa], matched_variables)
     matched_variables = variables_lib2.get_variables_by_name('/a')
     self.assertEquals([], matched_variables)
     matched_variables = variables_lib2.get_variables_by_name('a', scope='A')
     self.assertEquals([a], matched_variables)
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:15,代码来源:variables_test.py


示例9: testTrainAllVarsHasLowerLossThanTrainSubsetOfVars

  def testTrainAllVarsHasLowerLossThanTrainSubsetOfVars(self):
    logdir1 = os.path.join(
        tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs1')

    # First, train only the weights of the model.
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      total_loss = self.ModelLoss()
      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
      weights = variables_lib2.get_variables_by_name('weights')

      train_op = learning.create_train_op(
          total_loss, optimizer, variables_to_train=weights)

      loss = learning.train(
          train_op, logdir1, number_of_steps=200, log_every_n_steps=10)
      self.assertGreater(loss, .015)
      self.assertLess(loss, .05)

    # Next, train the biases of the model.
    with ops.Graph().as_default():
      random_seed.set_random_seed(1)
      total_loss = self.ModelLoss()
      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
      biases = variables_lib2.get_variables_by_name('biases')

      train_op = learning.create_train_op(
          total_loss, optimizer, variables_to_train=biases)

      loss = learning.train(
          train_op, logdir1, number_of_steps=300, log_every_n_steps=10)
      self.assertGreater(loss, .015)
      self.assertLess(loss, .05)

    # Finally, train both weights and bias to get lower loss.
    with ops.Graph().as_default():
      random_seed.set_random_seed(2)
      total_loss = self.ModelLoss()
      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer)
      loss = learning.train(
          train_op, logdir1, number_of_steps=400, log_every_n_steps=10)

      self.assertIsNotNone(loss)
      self.assertLess(loss, .015)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:46,代码来源:learning_test.py


示例10: testUseUpdateOps

  def testUseUpdateOps(self):
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      expected_mean = np.mean(self._inputs, axis=(0))
      expected_var = np.var(self._inputs, axis=(0))
      expected_var = self._addBesselsCorrection(16, expected_var)

      tf_predictions = BatchNormClassifier(tf_inputs)
      loss_ops.log_loss(tf_predictions, tf_labels)
      total_loss = loss_ops.get_total_loss()
      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer)

      moving_mean = variables_lib2.get_variables_by_name('moving_mean')[0]
      moving_variance = variables_lib2.get_variables_by_name('moving_variance')[
          0]

      with session.Session() as sess:
        # Initialize all variables
        sess.run(variables_lib.global_variables_initializer())
        mean, variance = sess.run([moving_mean, moving_variance])
        # After initialization moving_mean == 0 and moving_variance == 1.
        self.assertAllClose(mean, [0] * 4)
        self.assertAllClose(variance, [1] * 4)

        for _ in range(10):
          sess.run([train_op])
        mean = moving_mean.eval()
        variance = moving_variance.eval()
        # After 10 updates with decay 0.1 moving_mean == expected_mean and
        # moving_variance == expected_var.
        self.assertAllClose(mean, expected_mean)
        self.assertAllClose(variance, expected_var)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:37,代码来源:learning_test.py


示例11: testTrainAllVarsHasLowerLossThanTrainSubsetOfVars

  def testTrainAllVarsHasLowerLossThanTrainSubsetOfVars(self):
    logdir = os.path.join(self.get_temp_dir(), 'tmp_logs3/')
    if gfile.Exists(logdir):  # For running on jenkins.
      gfile.DeleteRecursively(logdir)

    # First, train only the weights of the model.
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      total_loss = self.ModelLoss()
      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
      weights = variables_lib.get_variables_by_name('weights')

      train_op = training.create_train_op(
          total_loss, optimizer, variables_to_train=weights)

      saver = saver_lib.Saver()
      loss = training.train(
          train_op,
          logdir,
          hooks=[
              basic_session_run_hooks.CheckpointSaverHook(
                  logdir, save_steps=1, saver=saver),
              basic_session_run_hooks.StopAtStepHook(num_steps=200),
          ])
      self.assertGreater(loss, .015)
      self.assertLess(loss, .05)

    # Next, train the biases of the model.
    with ops.Graph().as_default():
      random_seed.set_random_seed(1)
      total_loss = self.ModelLoss()
      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
      biases = variables_lib.get_variables_by_name('biases')

      train_op = training.create_train_op(
          total_loss, optimizer, variables_to_train=biases)

      saver = saver_lib.Saver()
      loss = training.train(
          train_op,
          logdir,
          hooks=[
              basic_session_run_hooks.CheckpointSaverHook(
                  logdir, save_steps=1, saver=saver),
              basic_session_run_hooks.StopAtStepHook(num_steps=300),
          ])
      self.assertGreater(loss, .015)
      self.assertLess(loss, .05)

    # Finally, train both weights and bias to get lower loss.
    with ops.Graph().as_default():
      random_seed.set_random_seed(2)
      total_loss = self.ModelLoss()
      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = training.create_train_op(total_loss, optimizer)
      saver = saver_lib.Saver()
      loss = training.train(
          train_op,
          logdir,
          hooks=[
              basic_session_run_hooks.CheckpointSaverHook(
                  logdir, save_steps=1, saver=saver),
              basic_session_run_hooks.StopAtStepHook(num_steps=400),
          ])
      self.assertIsNotNone(loss)
      self.assertLess(loss, .015)
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:67,代码来源:training_test.py



注:本文中的tensorflow.contrib.framework.python.ops.variables.get_variables_by_name函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python variables.local_variable函数代码示例发布时间:2022-05-27
下一篇:
Python variables.get_trainable_variables函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap