• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python variable_scope.variable函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.variable_scope.variable函数的典型用法代码示例。如果您正苦于以下问题:Python variable函数的具体用法?Python variable怎么用?Python variable使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了variable函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _create_slots

    def _create_slots(self, var_list):
        # Create the beta1 and beta2 accumulators on the same device as the first
        # variable. Sort the var_list to make sure this device is consistent across
        # workers (these need to go on the same PS, otherwise some updates are
        # silently ignored).
        first_var = min(var_list, key=lambda x: x.name)

        create_new = self._iterations is None
        if not create_new and context.in_graph_mode():
            create_new = (self._iterations.graph is not first_var.graph)

        if create_new:
            with ops.colocate_with(first_var):
                self._beta1_power = variable_scope.variable(self._beta1,
                                                            name="beta1_power",
                                                            trainable=False)
                self._beta2_power = variable_scope.variable(self._beta2,
                                                            name="beta2_power",
                                                            trainable=False)
                self._iterations = variable_scope.variable(0.,
                                                           name="iterations",
                                                           trainable=False)
                self._m_schedule = variable_scope.variable(1.,
                                                           name="m_schedule",
                                                           trainable=False)
        # Create slots for the first and second moments.
        for v in var_list:
            self._zeros_slot(v, "m", self._name)
            self._zeros_slot(v, "v", self._name)
开发者ID:jkhlot,项目名称:tensorflow-XNN,代码行数:29,代码来源:optimizer.py


示例2: __call__

  def __call__(self, getter, name, trainable, collections, *args, **kwargs):
    if trainable:
      with ops.device(self._worker_device):
        local_var = getter(name, trainable=True,
                           collections=[ops.GraphKeys.LOCAL_VARIABLES], 
                           *args, **kwargs)
        
      global_center_variable = variable_scope.variable(
        name='%s/%s' %
             (GLOBAL_VARIABLE_NAME,
              name),
        initial_value=local_var.initialized_value(),
        trainable=False,
        collections=[ops.GraphKeys.GLOBAL_VARIABLES])

      with ops.device(self._worker_device):
        local_center_variable = variable_scope.variable(
          name='%s/%s' % (LOCAL_VARIABLE_NAME, name),
          initial_value=local_var.initialized_value(),
          trainable=False,
          collections=[ops.GraphKeys.LOCAL_VARIABLES])
        
      self._local_map[local_var] = local_center_variable
      self._global_map[local_var] = global_center_variable
      return local_var
    else:
      return getter(name, trainable, collections, *args, **kwargs)
开发者ID:Kongsea,项目名称:tensorflow,代码行数:27,代码来源:elastic_average_optimizer.py


示例3: __init__

  def __init__(self,
               init_loss_scale,
               incr_every_n_steps,
               decr_every_n_nan_or_inf=2,
               incr_ratio=2,
               decr_ratio=0.8):
    """Constructor of exponential-update loss scale manager.

    Args:
      init_loss_scale: A Python float.  The loss scale to use at the beginning.
      incr_every_n_steps: Increases loss scale every n consecutive steps with
        finite gradients.
      decr_every_n_nan_or_inf: Decreases loss scale every n accumulated steps
        with nan or inf gradients.
      incr_ratio: The multiplier to use when increasing the loss scale.
      decr_ratio: The less-than-one-multiplier to use when decreasing the loss
        scale.
    """
    self._incr_every_n_steps = incr_every_n_steps
    self._decr_every_n_nan_or_inf = decr_every_n_nan_or_inf
    self._incr_ratio = incr_ratio
    self._decr_ratio = decr_ratio
    self._loss_scale = variable_scope.variable(
        name="loss_scale",
        initial_value=ops.convert_to_tensor(init_loss_scale, dtypes.float32),
        dtype=dtypes.float32,
        trainable=False)
    self._num_good_steps = variable_scope.variable(
        name="good_steps", initial_value=0, dtype=dtypes.int32, trainable=False)
    self._num_bad_steps = variable_scope.variable(
        name="bad_steps", initial_value=0, dtype=dtypes.int32, trainable=False)
开发者ID:BhaskarNallani,项目名称:tensorflow,代码行数:31,代码来源:loss_scale_manager.py


示例4: model_fn

 def model_fn():
   vs = []
   vs.append(variable_scope.variable(1.0, name="foo/bar"))
   vs.append(variable_scope.variable(1.0, name="foo_1/bar"))
   vs.append(variable_scope.variable(1.0, name="foo_1/bar_1"))
   vs.append(variable_scope.variable(1.0, name="foo/bar_1"))
   distribute_lib.get_tower_context().merge_call(lambda _: _)
   return vs
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:8,代码来源:mirrored_strategy_multigpu_test.py


示例5: testInvalidSynchronizationWithVariable

 def testInvalidSynchronizationWithVariable(self):
   self._skip_eager_if_gpus_less_than(1)
   devices = ["/device:CPU:0", "/device:GPU:0"]
   dist = mirrored_strategy.MirroredStrategy(devices)
   with dist.scope():
     with self.assertRaisesRegexp(
         ValueError, "Invalid variable synchronization mode: Invalid for "
         "variable: v"):
       variable_scope.variable(1.0, name="v", synchronization="Invalid")
开发者ID:ChristinaEricka,项目名称:tensorflow,代码行数:9,代码来源:mirrored_strategy_multigpu_test.py


示例6: testNoneSynchronizationWithVariable

 def testNoneSynchronizationWithVariable(self):
   self._skip_eager_if_gpus_less_than(1)
   devices = ["/device:CPU:0", "/device:GPU:0"]
   dist = mirrored_strategy.MirroredStrategy(devices)
   with dist.scope():
     with self.assertRaisesRegexp(
         ValueError, "`NONE` variable synchronization mode is not "
         "supported with `Mirrored` distribution strategy. Please change "
         "the `synchronization` for variable: v"):
       variable_scope.variable(
           1.0,
           name="v",
           synchronization=variable_scope.VariableSynchronization.NONE)
开发者ID:ChristinaEricka,项目名称:tensorflow,代码行数:13,代码来源:mirrored_strategy_multigpu_test.py


示例7: _create_slots

    def _create_slots(self, var_list):
        first_var = min(var_list, key=lambda x: x.name)

        create_new = self._beta1_power is None

        if create_new:
            with ops.colocate_with(first_var):
                self._beta1_power = variable_scope.variable(self._beta1, name="beta1_power", trainable=False)
                self._beta2_power = variable_scope.variable(self._beta2, name="beta2_power", trainable=False)
        # Create slots for the first and second moments.
        for v in var_list :
            self._zeros_slot(v, "m", self._name)
            self._zeros_slot(v, "v", self._name)
            self._zeros_slot(v, "vhat", self._name)
开发者ID:255BITS,项目名称:hyperchamber-gan,代码行数:14,代码来源:amsgrad.py


示例8: _create_non_slot_variable

  def _create_non_slot_variable(self, initial_value, name, colocate_with):
    """Add an extra variable, not associated with a slot."""
    in_graph_mode = context.in_graph_mode()
    if in_graph_mode:
      graph = colocate_with.graph
    else:
      graph = None

    key = (name, graph)
    v = self._non_slot_dict.get(key, None)
    if v is None:
      self._maybe_initialize_checkpointable()
      with ops.colocate_with(colocate_with):
        if not in_graph_mode:
          restored_initial_value = self._preload_simple_restoration(
              name=name, shape=None)
          if restored_initial_value is not None:
            initial_value = restored_initial_value
        v = variable_scope.variable(initial_value, name=name, trainable=False)
        # Restore this variable by name if necessary, but don't add a
        # Checkpointable dependency. Optimizers return the current graph's
        # non-slot variables from _checkpoint_dependencies explicitly rather
        # than unconditionally adding dependencies (since there may be multiple
        # non-slot variables with the same name in different graphs, trying to
        # save all of them would result in errors).
        self._handle_deferred_dependencies(name=name, checkpointable=v)
      self._non_slot_dict[key] = v

    return v
开发者ID:QiangCai,项目名称:tensorflow,代码行数:29,代码来源:optimizer.py


示例9: _create_non_slot_variable

  def _create_non_slot_variable(self, initial_value, name, colocate_with):
    """Add an extra variable, not associated with a slot."""
    # Recommendation: Use OptimizerV2 if your optimizer uses non-slot variables.
    eager = context.executing_eagerly()
    graph = None if eager else colocate_with.graph

    key = (name, graph)
    v = self._non_slot_dict.get(key, None)
    if v is None:
      self._maybe_initialize_trackable()
      distribution_strategy = distribute_ctx.get_strategy()
      with distribution_strategy.extended.colocate_vars_with(colocate_with):
        if eager:
          restored_initial_value = self._preload_simple_restoration(
              name=name, shape=None)
          if restored_initial_value is not None:
            initial_value = restored_initial_value
        v = variable_scope.variable(
            initial_value, name=name, trainable=False,
            use_resource=resource_variable_ops.is_resource_variable(
                colocate_with))
      # Restore this variable by name if necessary, but don't add a
      # Trackable dependency. Optimizers return the current graph's
      # non-slot variables from _checkpoint_dependencies explicitly rather
      # than unconditionally adding dependencies (since there may be multiple
      # non-slot variables with the same name in different graphs, trying to
      # save all of them would result in errors).
      self._handle_deferred_dependencies(name=name, trackable=v)
      self._non_slot_dict[key] = v

    return v
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:31,代码来源:optimizer.py


示例10: testNameScopeWithVariable

  def testNameScopeWithVariable(self):
    def in_cross_tower(_):
      c = variable_scope.variable(1.0, name="c")
      return c

    def model_fn():
      b = variable_scope.variable(1.0, name="b")
      with ops.name_scope("foo"):
        c = distribute_lib.get_tower_context().merge_call(in_cross_tower)
      return b, c

    dist = mirrored_strategy.MirroredStrategy(
        ["/device:GPU:0", "/device:CPU:0"])

    with context.graph_mode(), dist.scope():
      with ops.name_scope("main"):
        a = variable_scope.variable(1.0, name="a")
        result = dist.call_for_each_tower(model_fn, run_concurrently=False)
      result_b = result[0]
      result_c = result[1]
      self.assertIsInstance(result_b, values.DistributedValues)
      self.assertIsInstance(result_c, values.DistributedValues)
      a0, a1 = dist.unwrap(a)
      b0, b1 = dist.unwrap(result_b)
      c0, c1 = dist.unwrap(result_c)
      self.assertEquals("main/a:0", a0.name)
      self.assertEquals("main/a/replica_1:0", a1.name)
      self.assertEquals("main/b:0", b0.name)
      self.assertEquals("main/b/replica_1:0", b1.name)
      self.assertEquals("main/foo/c:0", c0.name)
      self.assertEquals("main/foo/c/replica_1:0", c1.name)
开发者ID:Huoxubeiyin,项目名称:tensorflow,代码行数:31,代码来源:mirrored_strategy_multigpu_test.py


示例11: model_fn

 def model_fn():
   v_sum = variable_scope.variable(
       1.0,
       synchronization=variable_scope.VariableSynchronization.ON_READ,
       aggregation=variable_scope.VariableAggregation.SUM)
   self.assertTrue(isinstance(v_sum, values.TowerLocalVariable))
   return v_sum
开发者ID:ChristinaEricka,项目名称:tensorflow,代码行数:7,代码来源:mirrored_strategy_multigpu_test.py


示例12: __call__

  def __call__(self, getter, name, trainable, collections, *args, **kwargs):
    if trainable:
      with ops.device(self._worker_device):
        local_var = getter(
            name,
            trainable=True,
            collections=[ops.GraphKeys.LOCAL_VARIABLES],
            *args,
            **kwargs)

      global_variable = variable_scope.variable(
          name="%s/%s" % (GLOBAL_VARIABLE_NAME, name),
          initial_value=local_var.initialized_value(),
          trainable=False,
          collections=[ops.GraphKeys.GLOBAL_VARIABLES])

      self._local_2_global[local_var] = global_variable
      return local_var
    else:
      kwargs['trainable'] = trainable
      kwargs['collections'] = collections
      if ops.GraphKeys.LOCAL_VARIABLES in collections:
        with ops.device(self._worker_device):
          return getter(name, *args, **kwargs)
      else:
        return getter(name, *args, **kwargs)
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:26,代码来源:model_average_optimizer.py


示例13: create_metric_variable

 def create_metric_variable(self, initial_value, name):
   return variable_scope.variable(
       initial_value,
       trainable=False,
       collections=[ops_lib.GraphKeys.METRIC_VARIABLES],
       validate_shape=True,
       name=name)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:7,代码来源:replicate_model_fn_test.py


示例14: _identity_metric_single

def _identity_metric_single(name, input_tensor):
  """A metric which takes on its last updated value.

  This keeps evaluation metrics in sync with one another, since update ops are
  run separately from their result Tensors. Simply returning (input_tensor,
  no_op) as a metric with a value but no update means that a metric will come
  from a different batch of data than metrics which cache values in a Variable
  (e.g. the default loss metric).

  Args:
    name: A name for the metric.
    input_tensor: Any Tensor.
  Returns:
    A tuple of (value, update_op).
  """
  metric_variable = variable_scope.variable(
      name="{}_identity_metric".format(name),
      initial_value=array_ops.zeros([], dtype=input_tensor.dtype),
      collections=[ops.GraphKeys.LOCAL_VARIABLES],
      validate_shape=False)
  update_op = state_ops.assign(
      metric_variable, input_tensor, validate_shape=False)
  # This shape will be correct once the first update runs (but may be
  # incomplete, so is not helpful for initializing the variable).
  metric_variable.set_shape(input_tensor.get_shape())
  return (metric_variable.value(), update_op)
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:26,代码来源:head.py


示例15: _create_factors

  def _create_factors(cls, rows, cols, num_shards, init, name):
    """Helper function to create row and column factors."""
    if callable(init):
      init = init()
    if isinstance(init, list):
      assert len(init) == num_shards
    elif isinstance(init, str) and init == "random":
      pass
    elif num_shards == 1:
      init = [init]
    sharded_matrix = []
    sizes = cls._shard_sizes(rows, num_shards)
    assert len(sizes) == num_shards

    def make_initializer(i, size):

      def initializer():
        if init == "random":
          return random_ops.random_normal([size, cols])
        else:
          return init[i]

      return initializer

    for i, size in enumerate(sizes):
      var_name = "%s_shard_%d" % (name, i)
      var_init = make_initializer(i, size)
      sharded_matrix.append(
          variable_scope.variable(
              var_init, dtype=dtypes.float32, name=var_name))

    return sharded_matrix
开发者ID:Joetz,项目名称:tensorflow,代码行数:32,代码来源:factorization_ops.py


示例16: _transient_var

 def _transient_var(name):
   """Helper function to create a Variable."""
   return variable_scope.variable(
       1.0,
       trainable=False,
       collections=[ops.GraphKeys.LOCAL_VARIABLES],
       validate_shape=False,
       name=name)
开发者ID:Joetz,项目名称:tensorflow,代码行数:8,代码来源:factorization_ops.py


示例17: run_fn

 def run_fn():
   tower_context = distribute.get_tower_context()
   self.assertTrue(tower_context is not None)
   self.assertIs(None, distribute.get_cross_tower_context())
   self.assertTrue(distribute.has_distribution_strategy())
   self.assertIs(dist, distribute.get_distribution_strategy())
   self.assertEqual("foo", tower_context.merge_call(None, test_arg="foo"))
   self.assertEqual("bar", variable_scope.variable(1.0, name="bar"))
开发者ID:Huoxubeiyin,项目名称:tensorflow,代码行数:8,代码来源:distribute_test.py


示例18: _local_variable

def _local_variable(tensor, name=None):
  """Stores a tensor as a local Variable for faster read."""
  return variable_scope.variable(
      initial_value=tensor,
      trainable=False,
      collections=[ops.GraphKeys.LOCAL_VARIABLES],
      validate_shape=False,
      name=name)
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:8,代码来源:boosted_trees.py


示例19: _create_weights

  def _create_weights(cls, wt_init, num_wts, num_shards, name):
    """Helper function to create sharded weight vector.

    Args:
      wt_init: init value for the weight. If None, weights are not created. This
        can be one of the None, a list of non-negative real numbers or a single
        non-negative real number (or equivalent iterables).
      num_wts: total size of all the weight shards
      num_shards: number of shards for the weights
      name: name for the new Variables.

    Returns:
      A list of weight shard Tensors.

    Raises:
      ValueError: If wt_init is not the right format.
    """

    if wt_init is None:
      return None

    init_mode = "list"
    if isinstance(wt_init, collections.Iterable):
      if num_shards == 1 and len(wt_init) == num_wts:
        wt_init = [wt_init]
      assert len(wt_init) == num_shards
    elif isinstance(wt_init, numbers.Real) and wt_init >= 0:
      init_mode = "scalar"
    else:
      raise ValueError(
          "Invalid weight initialization argument. Must be one of these: "
          "None, a real non-negative real number, or a list of lists of "
          "non-negative real numbers (or equivalent iterables) corresponding "
          "to sharded factors.")

    sizes = cls._shard_sizes(num_wts, num_shards)
    assert len(sizes) == num_shards

    with ops.name_scope(name):
      def make_wt_initializer(i, size):

        def initializer():
          if init_mode == "scalar":
            return wt_init * array_ops.ones([size])
          else:
            return wt_init[i]

        return initializer

      sharded_weight = []
      for i, size in enumerate(sizes):
        var_name = "%s_shard_%d" % (name, i)
        var_init = make_wt_initializer(i, size)
        sharded_weight.append(
            variable_scope.variable(
                var_init, dtype=dtypes.float32, name=var_name))

      return sharded_weight
开发者ID:jinxin0924,项目名称:tensorflow,代码行数:58,代码来源:factorization_ops.py


示例20: test_creating_var_with_numpy_arrays

 def test_creating_var_with_numpy_arrays(self):
   with self.cached_session() as session:
     x = np.asarray(np.random.random((64, 3)), dtype=np.float32)
     initial = np.zeros_like(x)
     var_x = variable_scope.variable(initial)
     numpy_dataset.init_var_from_numpy(var_x, x, session)
     val = self.evaluate(var_x.value())
     # Verify that the numpy value is copied to the variable.
     self.assertAllEqual(x, val)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:9,代码来源:numpy_dataset_test.py



注:本文中的tensorflow.python.ops.variable_scope.variable函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap