• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python init_ops.zeros_initializer函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.init_ops.zeros_initializer函数的典型用法代码示例。如果您正苦于以下问题:Python zeros_initializer函数的具体用法?Python zeros_initializer怎么用?Python zeros_initializer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了zeros_initializer函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: __init__

 def __init__(self,
              axis=-1,
              momentum=0.99,
              epsilon=1e-3,
              center=True,
              scale=True,
              beta_initializer=init_ops.zeros_initializer(),
              gamma_initializer=init_ops.ones_initializer(),
              moving_mean_initializer=init_ops.zeros_initializer(),
              moving_variance_initializer=init_ops.ones_initializer(),
              beta_regularizer=None,
              gamma_regularizer=None,
              trainable=True,
              name=None,
              **kwargs):
   super(BatchNormalization, self).__init__(
       name=name, trainable=trainable, **kwargs)
   self.axis = axis
   self.momentum = momentum
   self.epsilon = epsilon
   self.center = center
   self.scale = scale
   self.beta_initializer = beta_initializer
   self.gamma_initializer = gamma_initializer
   self.moving_mean_initializer = moving_mean_initializer
   self.moving_variance_initializer = moving_variance_initializer
   self.beta_regularizer = beta_regularizer
   self.gamma_regularizer = gamma_regularizer
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:28,代码来源:normalization.py


示例2: testKernelStateList

  def testKernelStateList(self):
    """Test that transition kernel works with list input to `state`."""
    num_chains = 2
    loc_one = variable_scope.get_variable(
        "loc_one", [num_chains],
        initializer=init_ops.zeros_initializer())
    loc_two = variable_scope.get_variable(
        "loc_two", [num_chains], initializer=init_ops.zeros_initializer())

    def target_log_prob_fn(loc_one, loc_two):
      loc = array_ops.stack([loc_one, loc_two])
      log_prob = mvn_tril_lib.MultivariateNormalTriL(
          loc=constant_op.constant([0., 0.]),
          scale_tril=constant_op.constant([[0.1, 0.1], [0.0, 0.1]])).log_prob(
              loc)
      return math_ops.reduce_sum(log_prob, 0)

    def proposal_fn(loc_one, loc_two):
      loc_one_proposal = mh.proposal_normal(scale=0.05)
      loc_two_proposal = mh.proposal_normal(scale=0.05)
      loc_one_sample, _ = loc_one_proposal(loc_one)
      loc_two_sample, _ = loc_two_proposal(loc_two)
      return [loc_one_sample, loc_two_sample], None

    new_state, _ = mh.kernel(
        target_log_prob_fn=target_log_prob_fn,
        proposal_fn=proposal_fn,
        current_state=[loc_one, loc_two],
        seed=12415)
    loc_one_update = loc_one.assign(new_state[0])
    loc_two_update = loc_two.assign(new_state[1])

    init = variables.initialize_all_variables()
    with self.test_session() as sess:
      sess.run(init)
      loc_one_samples = []
      loc_two_samples = []
      for _ in range(10000):
        loc_one_sample, loc_two_sample = sess.run(
            [loc_one_update, loc_two_update])
        loc_one_samples.append(loc_one_sample)
        loc_two_samples.append(loc_two_sample)

    loc_one_samples = np.array(loc_one_samples)
    loc_two_samples = np.array(loc_two_samples)
    loc_one_samples = loc_one_samples[1000:]  # drop samples for burn-in
    loc_two_samples = loc_two_samples[1000:]  # drop samples for burn-in

    self.assertAllClose(np.mean(loc_one_samples, 0),
                        np.array([0.] * num_chains),
                        rtol=1e-5, atol=1e-1)
    self.assertAllClose(np.mean(loc_two_samples, 0),
                        np.array([0.] * num_chains),
                        rtol=1e-5, atol=1e-1)
    self.assertAllClose(np.std(loc_one_samples, 0),
                        np.array([0.1] * num_chains),
                        rtol=1e-5, atol=1e-1)
    self.assertAllClose(np.std(loc_two_samples, 0),
                        np.array([0.1] * num_chains),
                        rtol=1e-5, atol=1e-1)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:60,代码来源:metropolis_hastings_test.py


示例3: testAddWeight

  def testAddWeight(self):
    layer = base_layers.Layer(name='my_layer')

    # Test basic variable creation.
    variable = layer.add_variable(
        'my_var', [2, 2], initializer=init_ops.zeros_initializer())
    self.assertEqual(variable.name, 'my_layer/my_var:0')
    self.assertListEqual(layer.variables, [variable])
    self.assertListEqual(layer.trainable_variables, [variable])
    self.assertListEqual(layer.non_trainable_variables, [])
    self.assertListEqual(layer.variables,
                         ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES))

    # Test non-trainable variable creation.
    # layer.add_variable should work even outside `build` and `call`.
    variable_2 = layer.add_variable(
        'non_trainable_var', [2, 2],
        initializer=init_ops.zeros_initializer(),
        trainable=False)
    self.assertListEqual(layer.variables, [variable, variable_2])
    self.assertListEqual(layer.trainable_variables, [variable])
    self.assertListEqual(layer.non_trainable_variables, [variable_2])
    self.assertEqual(
        len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 1)

    if context.in_graph_mode():
      # regularizers only supported in GRAPH mode.
      regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
      variable = layer.add_variable(
          'reg_var', [2, 2],
          initializer=init_ops.zeros_initializer(),
          regularizer=regularizer)
      self.assertEqual(len(layer.losses), 1)
开发者ID:keveman,项目名称:tensorflow,代码行数:33,代码来源:base_test.py


示例4: _auc_hist_accumulate

def _auc_hist_accumulate(hist_true, hist_false, nbins, collections):
  """Accumulate histograms in new variables."""
  with variable_scope.variable_scope(
      None, 'hist_accumulate', [hist_true, hist_false]):
    # Holds running total histogram of scores for records labeled True.
    hist_true_acc = variable_scope.get_variable(
        'hist_true_acc',
        initializer=init_ops.zeros_initializer(
            [nbins],
            dtype=hist_true.dtype),
        collections=collections,
        trainable=False)
    # Holds running total histogram of scores for records labeled False.
    hist_false_acc = variable_scope.get_variable(
        'hist_false_acc',
        initializer=init_ops.zeros_initializer(
            [nbins],
            dtype=hist_false.dtype),
        collections=collections,
        trainable=False)

    update_op = control_flow_ops.group(
        hist_true_acc.assign_add(hist_true),
        hist_false_acc.assign_add(hist_false),
        name='update_op')

    return hist_true_acc, hist_false_acc, update_op
开发者ID:BloodD,项目名称:tensorflow,代码行数:27,代码来源:histogram_ops.py


示例5: weighted_moving_average

def weighted_moving_average(value,
                            decay,
                            weight,
                            truediv=True,
                            collections=None,
                            name=None):
  """Compute the weighted moving average of `value`.

  Conceptually, the weighted moving average is:
    `moving_average(value * weight) / moving_average(weight)`,
  where a moving average updates by the rule
    `new_value = decay * old_value + (1 - decay) * update`
  Internally, this Op keeps moving average variables of both `value * weight`
  and `weight`.

  Args:
    value: A numeric `Tensor`.
    decay: A float `Tensor` or float value.  The moving average decay.
    weight:  `Tensor` that keeps the current value of a weight.
      Shape should be able to multiply `value`.
    truediv:  Boolean, if `True`, dividing by `moving_average(weight)` is
      floating point division.  If `False`, use division implied by dtypes.
    collections:  List of graph collections keys to add the internal variables
      `value * weight` and `weight` to.
      Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
    name: Optional name of the returned operation.
      Defaults to "WeightedMovingAvg".

  Returns:
    An Operation that updates and returns the weighted moving average.
  """
  # Unlike assign_moving_average, the weighted moving average doesn't modify
  # user-visible variables. It is the ratio of two internal variables, which are
  # moving averages of the updates.  Thus, the signature of this function is
  # quite different than assign_moving_average.
  if collections is None:
    collections = [ops.GraphKeys.GLOBAL_VARIABLES]
  with variable_scope.variable_scope(name, "WeightedMovingAvg",
                                     [value, weight, decay]) as scope:
    value_x_weight_var = variable_scope.get_variable(
        "value_x_weight",
        initializer=init_ops.zeros_initializer(value.get_shape(),
                                               dtype=value.dtype),
        trainable=False,
        collections=collections)
    weight_var = variable_scope.get_variable(
        "weight",
        initializer=init_ops.zeros_initializer(weight.get_shape(),
                                               dtype=weight.dtype),
        trainable=False,
        collections=collections)
    numerator = assign_moving_average(
        value_x_weight_var, value * weight, decay, zero_debias=False)
    denominator = assign_moving_average(
        weight_var, weight, decay, zero_debias=False)

    if truediv:
      return math_ops.truediv(numerator, denominator, name=scope.name)
    else:
      return math_ops.div(numerator, denominator, name=scope.name)
开发者ID:allesover,项目名称:tensorflow,代码行数:60,代码来源:moving_averages.py


示例6: linear_module

 def linear_module(x, output_size):
   w = variable_scope.get_variable(
       "w", shape=[x.get_shape()[1], output_size],
       initializer=init_ops.zeros_initializer())
   b = variable_scope.get_variable(
       "b", shape=[output_size],
       initializer=init_ops.zeros_initializer())
   return (math_ops.matmul(x, w) + b), w
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:8,代码来源:template_test.py


示例7: _templated

 def _templated():
   v = variable_scope.get_variable(
       "v", shape=[1], initializer=init_ops.zeros_initializer(),
       use_resource=True)
   v2 = variable_scope.get_variable(
       "v2", shape=[1], initializer=init_ops.zeros_initializer(),
       use_resource=True)
   return v, v + 1., v2
开发者ID:jackd,项目名称:tensorflow,代码行数:8,代码来源:checkpointable_utils_test.py


示例8: __init__

  def __init__(self,
               axis=-1,
               momentum=0.99,
               epsilon=1e-3,
               center=True,
               scale=True,
               beta_initializer=init_ops.zeros_initializer(),
               gamma_initializer=init_ops.ones_initializer(),
               moving_mean_initializer=init_ops.zeros_initializer(),
               moving_variance_initializer=init_ops.ones_initializer(),
               beta_regularizer=None,
               gamma_regularizer=None,
               beta_constraint=None,
               gamma_constraint=None,
               renorm=False,
               renorm_clipping=None,
               renorm_momentum=0.99,
               fused=None,
               trainable=True,
               virtual_batch_size=None,
               adjustment=None,
               name=None,
               **kwargs):
    super(BatchNormalization, self).__init__(
        name=name, trainable=trainable, **kwargs)
    if isinstance(axis, list):
      self.axis = axis[:]
    else:
      self.axis = axis
    self.momentum = momentum
    self.epsilon = epsilon
    self.center = center
    self.scale = scale
    self.beta_initializer = beta_initializer
    self.gamma_initializer = gamma_initializer
    self.moving_mean_initializer = moving_mean_initializer
    self.moving_variance_initializer = moving_variance_initializer
    self.beta_regularizer = beta_regularizer
    self.gamma_regularizer = gamma_regularizer
    self.beta_constraint = beta_constraint
    self.gamma_constraint = gamma_constraint
    self.renorm = renorm
    self.virtual_batch_size = virtual_batch_size
    self.adjustment = adjustment
    if fused is None:
      fused = True

    self.fused = fused
    self._bessels_correction_test_only = True

    if renorm:
      renorm_clipping = renorm_clipping or {}
      keys = ['rmax', 'rmin', 'dmax']
      if set(renorm_clipping) - set(keys):
        raise ValueError('renorm_clipping %s contains keys not in %s' %
                         (renorm_clipping, keys))
      self.renorm_clipping = renorm_clipping
      self.renorm_momentum = renorm_momentum
开发者ID:dansbecker,项目名称:tensorflow,代码行数:58,代码来源:normalization.py


示例9: testAddWeight

  def testAddWeight(self):
    layer = base_layers.Layer(name='my_layer')

    # Test basic variable creation.
    variable = layer.add_variable(
        'my_var', [2, 2], initializer=init_ops.zeros_initializer())
    self.assertEqual(variable.name, 'my_layer/my_var:0')
    self.assertEqual(layer.variables, [variable])
    self.assertEqual(layer.trainable_variables, [variable])
    self.assertEqual(layer.non_trainable_variables, [])
    if not context.executing_eagerly():
      self.assertEqual(
          layer.variables,
          ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES))

    # Test non-trainable variable creation.
    # layer.add_variable should work even outside `build` and `call`.
    variable_2 = layer.add_variable(
        'non_trainable_var', [2, 2],
        initializer=init_ops.zeros_initializer(),
        trainable=False)
    self.assertEqual(layer.variables, [variable, variable_2])
    self.assertEqual(layer.trainable_variables, [variable])
    self.assertEqual(layer.non_trainable_variables, [variable_2])

    if not context.executing_eagerly():
      self.assertEqual(
          len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 1)

    regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
    _ = layer.add_variable(
        'reg_var', [2, 2],
        initializer=init_ops.zeros_initializer(),
        regularizer=regularizer)
    self.assertEqual(len(layer.losses), 1)

    added_variable = [False]

    # Test that sync `ON_READ` variables are defaulted to be non-trainable.
    variable_3 = layer.add_variable(
        'sync_on_read_var', [2, 2],
        initializer=init_ops.zeros_initializer(),
        synchronization=variable_scope.VariableSynchronization.ON_READ,
        aggregation=variable_scope.VariableAggregation.SUM)
    self.assertEqual(layer.non_trainable_variables, [variable_2, variable_3])

    @def_function.function
    def function_adds_weight():
      if not added_variable[0]:
        layer.add_variable(
            'reg_var_from_function', [2, 2],
            initializer=init_ops.zeros_initializer(),
            regularizer=regularizer)
        added_variable[0] = True

    function_adds_weight()
    self.assertEqual(len(layer.losses), 2)
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:57,代码来源:base_test.py


示例10: testLSTMLayer

  def testLSTMLayer(self):
    # Run with all-0 weights, no padding.
    o = self._RunLSTMLayer('zeros', init_ops.zeros_initializer(), 0., 0., 0.)
    self.assertAllClose(o, [[[0.]] * self._batch_size] * 3)
    o = self._RunLSTMLayer('zeros', init_ops.zeros_initializer(), 0., 1., 0.)
    self.assertAllClose(o, [[[.25]] * self._batch_size,
                            [[.125]] * self._batch_size,
                            [[.0625]] * self._batch_size])
    o = self._RunLSTMLayer('zeros', init_ops.zeros_initializer(), 1., 0., 0.)
    self.assertAllClose(o, [[[0.]] * self._batch_size] * 3)
    o = self._RunLSTMLayer('zeros', init_ops.zeros_initializer(), 1., 1., 0.)
    self.assertAllClose(o, [[[.25]] * self._batch_size,
                            [[.125]] * self._batch_size,
                            [[.0625]] * self._batch_size])

    # Run with all-1 weights, no padding.
    weight1 = 1.
    for m_init in [0., 1.]:
      for c_init in [0., 1.]:
        o = self._RunLSTMLayer('ones',
                               init_ops.ones_initializer(), m_init, c_init, 0.)
        m0 = self._NextM(self._inputs, weight1, m_init, c_init)
        c0 = self._NextC(self._inputs, weight1, m_init, c_init)
        self.assertAllClose(o[0], m0)
        m1 = self._NextM(self._inputs, weight1, m0, c0)
        c1 = self._NextC(self._inputs, weight1, m0, c0)
        self.assertAllClose(o[1], m1)
        m2 = self._NextM(self._inputs, weight1, m1, c1)
        self.assertAllClose(o[2], m2)

    # Run with random weights.
    for weight in np.random.rand(3):
      weight_tf = constant_op.constant(weight, dtypes.float32)
      random_weight = lambda shape, w=weight_tf: array_ops.fill(shape, w)

      # No padding.
      for m_init in [0., 1.]:
        for c_init in [0., 1.]:
          o = self._RunLSTMLayer('random', random_weight, m_init, c_init, 0.)
          m0 = self._NextM(self._inputs, weight, m_init, c_init)
          c0 = self._NextC(self._inputs, weight, m_init, c_init)
          self.assertAllClose(o[0], m0)
          m1 = self._NextM(self._inputs, weight, m0, c0)
          c1 = self._NextC(self._inputs, weight, m0, c0)
          self.assertAllClose(o[1], m1)
          m2 = self._NextM(self._inputs, weight, m1, c1)
          self.assertAllClose(o[2], m2)

      # Set padding.
      o = self._RunLSTMLayer('random', random_weight, 0., 0., 1.)
      self.assertAllClose(o, [[[0.]] * self._batch_size] * 3)
      o = self._RunLSTMLayer('random', random_weight, 0., 1., 1.)
      self.assertAllClose(o, [[[0.]] * self._batch_size] * 3)
      o = self._RunLSTMLayer('random', random_weight, 1., 0., 1.)
      self.assertAllClose(o, [[[1.]] * self._batch_size] * 3)
      o = self._RunLSTMLayer('random', random_weight, 1., 1., 1.)
      self.assertAllClose(o, [[[1.]] * self._batch_size] * 3)
开发者ID:Albert-Z-Guo,项目名称:tensorflow,代码行数:57,代码来源:lstm_test.py


示例11: __init__

  def __init__(self,
               axis=-1,
               momentum=0.99,
               epsilon=1e-3,
               center=True,
               scale=True,
               beta_initializer=init_ops.zeros_initializer(),
               gamma_initializer=init_ops.ones_initializer(),
               moving_mean_initializer=init_ops.zeros_initializer(),
               moving_variance_initializer=init_ops.ones_initializer(),
               beta_regularizer=None,
               gamma_regularizer=None,
               beta_constraint=None,
               gamma_constraint=None,
               renorm=False,
               renorm_clipping=None,
               renorm_momentum=0.99,
               fused=None,
               trainable=True,
               name=None,
               **kwargs):
    super(BatchNormalization, self).__init__(
        name=name, trainable=trainable, **kwargs)
    self.axis = axis
    self.momentum = momentum
    self.epsilon = epsilon
    self.center = center
    self.scale = scale
    self.beta_initializer = beta_initializer
    self.gamma_initializer = gamma_initializer
    self.moving_mean_initializer = moving_mean_initializer
    self.moving_variance_initializer = moving_variance_initializer
    self.beta_regularizer = beta_regularizer
    self.gamma_regularizer = gamma_regularizer
    self.beta_constraint = beta_constraint
    self.gamma_constraint = gamma_constraint
    self.renorm = renorm
    # This environment variable is only used during the testing period of fused
    # batch norm and will be removed after that.
    if fused is None:
      fused = _FUSED_DEFAULT

    self.fused = fused
    self._bessels_correction_test_only = True
    if renorm:
      renorm_clipping = renorm_clipping or {}
      keys = ['rmax', 'rmin', 'dmax']
      if set(renorm_clipping) - set(keys):
        raise ValueError('renorm_clipping %s contains keys not in %s' %
                         (renorm_clipping, keys))
      self.renorm_clipping = renorm_clipping
      self.renorm_momentum = renorm_momentum
开发者ID:piyushjaiswal98,项目名称:tensorflow,代码行数:52,代码来源:normalization.py


示例12: __init__

 def __init__(self,
              axis=-1,
              momentum=0.99,
              epsilon=1e-3,
              center=True,
              scale=True,
              beta_initializer=init_ops.zeros_initializer(),
              gamma_initializer=init_ops.ones_initializer(),
              moving_mean_initializer=init_ops.zeros_initializer(),
              moving_variance_initializer=init_ops.ones_initializer(),
              beta_regularizer=None,
              gamma_regularizer=None,
              renorm=False,
              renorm_clipping=None,
              renorm_momentum=0.99,
              fused=False,
              trainable=True,
              name=None,
              **kwargs):
   super(BatchNormalization, self).__init__(
       name=name, trainable=trainable, **kwargs)
   self.axis = axis
   self.momentum = momentum
   self.epsilon = epsilon
   self.center = center
   self.scale = scale
   self.beta_initializer = beta_initializer
   self.gamma_initializer = gamma_initializer
   self.moving_mean_initializer = moving_mean_initializer
   self.moving_variance_initializer = moving_variance_initializer
   self.beta_regularizer = beta_regularizer
   self.gamma_regularizer = gamma_regularizer
   self.renorm = renorm
   self.fused = fused
   if self.fused and renorm:
     raise ValueError(
         'Batch renorm is currently not supported with fused batch norm.')
   if self.fused and (beta_regularizer is not None or
                      gamma_regularizer is not None):
     raise ValueError('Regularizers are not currently '
                      'supported for fused batch norm.')
   if renorm:
     renorm_clipping = renorm_clipping or {}
     keys = ['rmax', 'rmin', 'dmax']
     if set(renorm_clipping) - set(keys):
       raise ValueError('renorm_clipping %s contains keys not in %s' %
                        (renorm_clipping, keys))
     self.renorm_clipping = renorm_clipping
     self.renorm_momentum = renorm_momentum
开发者ID:AutumnQYN,项目名称:tensorflow,代码行数:49,代码来源:normalization.py


示例13: create_variables_and_ops

  def create_variables_and_ops(self, table, variable_name, num_hosts,
                               table_config, table_variables,
                               load_parameters_ops, retrieve_parameters_ops):
    optimizer_name = 'Adam'
    m_initializer = init_ops.zeros_initializer()
    m_variables = _create_partitioned_variables(
        name='%s/%s/m' % (variable_name, optimizer_name),
        num_hosts=num_hosts,
        vocabulary_size=table_config.vocabulary_size,
        embedding_dimension=table_config.dimension,
        collections=[ops.GraphKeys.GLOBAL_VARIABLES],
        initializer=m_initializer)
    v_initializer = init_ops.zeros_initializer()
    v_variables = _create_partitioned_variables(
        name='%s/%s/v' % (variable_name, optimizer_name),
        num_hosts=num_hosts,
        vocabulary_size=table_config.vocabulary_size,
        embedding_dimension=table_config.dimension,
        collections=[ops.GraphKeys.GLOBAL_VARIABLES],
        initializer=v_initializer)

    self._table_to_m_variables_dict[table] = m_variables
    self._table_to_v_variables_dict[table] = v_variables

    for host_id, table_variable, m_variable, v_variable in (zip(
        range(num_hosts), table_variables,
        m_variables, v_variables)):
      with ops.colocate_with(table_variable):
        load_parameters_op = (
            tpu_ops.load_tpu_embedding_adam_parameters(
                parameters=table_variable,
                momenta=m_variable,
                velocities=v_variable,
                table_name=table,
                num_shards=num_hosts,
                shard_id=host_id))
        retrieved_table, retrieved_m, retrieved_v = (
            tpu_ops.retrieve_tpu_embedding_adam_parameters(
                table_name=table,
                num_shards=num_hosts,
                shard_id=host_id))
        retrieve_parameters_op = control_flow_ops.group(
            state_ops.assign(table_variable, retrieved_table),
            state_ops.assign(m_variable, retrieved_m),
            state_ops.assign(v_variable, retrieved_v))

      load_parameters_ops.append(load_parameters_op)
      retrieve_parameters_ops.append(retrieve_parameters_op)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:48,代码来源:tpu_embedding.py


示例14: __init__

    def __init__(self, value, decay,
                 truediv=True,
                 collections=None,
                 reduction_indices=None,
                 name=None):
        self.value = value
        self.reduction_indices = reduction_indices or [0]

        eps = 1e-8
        if truediv:
            div = math_ops.truediv
        else:
            div = math_ops.div
        if collections is None:
            collections = [ops.GraphKeys.VARIABLES]

        value_shape = value.get_shape().as_list()
        shape = []
        for dim in range(len(value_shape)):
            if dim in self.reduction_indices:
                shape.append(1)
            else:
                shape.append(value_shape[dim])

        with variable_scope.variable_op_scope(
                [value, decay], name, "MomentTracker") as scope:

            mean_x_weight_var = variable_scope.get_variable("mean_x_weight", trainable=False, collections=collections,
                initializer=init_ops.zeros_initializer(shape, dtype=value.dtype))

            variance_x_weight_var = variable_scope.get_variable("variance_x_weight", trainable=False,
                collections=collections, initializer=init_ops.zeros_initializer(shape, dtype=value.dtype))

            weight_var = variable_scope.get_variable("weight", trainable=False, collections=collections,
                initializer=init_ops.zeros_initializer([1], dtype=tf.float32))

            self.tracked_mean = div(mean_x_weight_var, weight_var + eps)
            self.tracked_variance = div(variance_x_weight_var, weight_var + eps)

            self.batch_mean, self.batch_variance = tf.nn.moments(self.value, axes=self.reduction_indices,
                                                                 shift=self.tracked_mean, keep_dims=True)

            mean_numerator = assign_moving_average(mean_x_weight_var, self.batch_mean, decay)
            variance_numerator = assign_moving_average(variance_x_weight_var, self.batch_variance, decay)
            denominator = assign_moving_average(weight_var, 1.0, decay)

            self.update_mean = div(mean_numerator, denominator + eps, name=scope.name)
            self.update_variance = div(variance_numerator, denominator + eps, name=scope.name)
开发者ID:NoahDStein,项目名称:NeuralNetSandbox,代码行数:48,代码来源:moment_tracker.py


示例15: create_global_step

def create_global_step(graph=None):
  """Create global step tensor in graph.

  Args:
    graph: The graph in which to create the global step. If missing, use default
        graph.

  Returns:
    Global step tensor.

  Raises:
    ValueError: if global step key is already defined.
  """
  graph = ops.get_default_graph() if graph is None else graph
  if get_global_step(graph) is not None:
    raise ValueError('"global_step" already exists.')
  # Create in proper graph and base name_scope.
  with graph.as_default() as g, g.name_scope(None):
    collections = [ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP]
    return variable(
        ops.GraphKeys.GLOBAL_STEP,
        shape=[],
        dtype=dtypes.int64,
        initializer=init_ops.zeros_initializer(),
        trainable=False,
        collections=collections)
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:26,代码来源:variables.py


示例16: variable_scoped_function_no_return_value

 def variable_scoped_function_no_return_value(trainable=True):
   # defun cannot compile functions that return non-Tensor objects
   _ = variable_scope.get_variable(
       "dummy",
       shape=[1],
       trainable=trainable,
       initializer=init_ops.zeros_initializer())
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:7,代码来源:template_test.py


示例17: model_fn

 def model_fn(features, labels, mode):
   _ = labels
   step = training.get_global_step()
   w = variable_scope.get_variable(
       'w',
       shape=[],
       initializer=init_ops.zeros_initializer(),
       dtype=dtypes.int64)
   if estimator_lib.ModeKeys.TRAIN == mode:
     # to consume features, we have control dependency
     with ops.control_dependencies([features]):
       step_inc = state_ops.assign_add(training.get_global_step(), 1)
     with ops.control_dependencies([step_inc]):
       assign_w_to_step_plus_2 = w.assign(step + 2)
     return estimator_lib.EstimatorSpec(
         mode,
         loss=constant_op.constant(3.),
         train_op=assign_w_to_step_plus_2)
   if estimator_lib.ModeKeys.EVAL == mode:
     # to consume features, we have control dependency
     with ops.control_dependencies([features]):
       loss = constant_op.constant(5.)
     return estimator_lib.EstimatorSpec(
         mode,
         loss=loss,
         # w is constant in each step, so the mean.
         # w = 0 if step==0 else step+2
         eval_metric_ops={'mean_of_const': metrics_lib.mean(w)})
开发者ID:ChristinaEricka,项目名称:tensorflow,代码行数:28,代码来源:hooks_test.py


示例18: apply_gradients

  def apply_gradients(self, grads_and_vars, global_step=None, name=None):
    gradients = []
    # Number of stale gradients.
    stale_counter = variable_scope.get_variable(
        "stale_counter", [],
        initializer=init_ops.zeros_initializer(),
        trainable=False)

    def _AcceptGradientOp():
      with ops.control_dependencies(
          [self._opt.apply_gradients(
              grads_and_vars, global_step=global_step, name=name)]):
        return gen_array_ops.identity(0.0)

    def _DropGradientOp():
      return gen_array_ops.identity(1.0)

    for grad_and_var in grads_and_vars:
      grad = grad_and_var[0]
      if isinstance(grad, ops.Tensor):
        gradients.append(grad)
      else:
        gradients.append(grad.op)

    with ops.control_dependencies(gradients), ops.colocate_with(global_step):
      staleness = gen_array_ops.reshape(
          global_step - self._local_step, shape=())
      conditional_update = stale_counter.assign_add(control_flow_ops.cond(
          gen_math_ops.less_equal(staleness, self._staleness),
          _AcceptGradientOp, _DropGradientOp))

    summary.scalar(
        "Gradient staleness percentage",
        stale_counter / (math_ops.cast(global_step + 1, dtypes.float32)))
    return conditional_update
开发者ID:sandeepgupta2k4,项目名称:tensorflow,代码行数:35,代码来源:drop_stale_gradient_optimizer.py


示例19: testInitialValueComesFromCheckpoint

  def testInitialValueComesFromCheckpoint(self):
    checkpoint_dir = self.get_temp_dir()
    with self.test_session() as session:
      v1, _, _, _ = _create_checkpoints(session, checkpoint_dir)

    # New graph and session.
    with ops.Graph().as_default() as g:
      with self.test_session(graph=g) as session:
        with variable_scope.variable_scope(
            "some_scope", initializer=init_ops.zeros_initializer()):
          my1 = variable_scope.get_variable("my1", [1, 10])

        # At this point, my1.initialized_value() will add ops that reference
        # the zeros initializer of my1.
        before = variables.Variable(my1.initialized_value(), name="before")

        checkpoint_utils.init_from_checkpoint(checkpoint_dir, {"var1": my1})

        # At this point, my1.initialized_value() will add ops that reference
        # the newly set initializer of my1.
        after = variables.Variable(my1.initialized_value(), name="after")

        session.run(variables.global_variables_initializer())
        self.assertAllEqual(session.run(my1), v1)
        self.assertAllEqual(session.run(my1.initialized_value()), v1)
        self.assertAllClose(session.run(before), [[0.0] * 10])
        self.assertAllClose(session.run(after), v1)
        with self.assertRaises(AssertionError):
          self.assertAllClose(session.run(before), session.run(after))
开发者ID:QiangCai,项目名称:tensorflow,代码行数:29,代码来源:checkpoint_utils_test.py


示例20: testInitialValueComesFromCheckpoint

  def testInitialValueComesFromCheckpoint(self):
    checkpoint_dir = self.get_temp_dir()
    with self.test_session() as session:
      v1, _, _, _ = _create_checkpoints(session, checkpoint_dir)

    # New graph and session.
    with ops.Graph().as_default() as g:
      with self.test_session(graph=g) as session:
        with variable_scope.variable_scope(
            "some_scope", initializer=init_ops.zeros_initializer()):
          my1 = variable_scope.get_variable("my1", [1, 10])

        before = my1.initialized_value()

        checkpoint_utils.init_from_checkpoint(checkpoint_dir, {"var1": my1})

        after = my1.initialized_value()

        self.assertAllEqual(session.run(before), [[0.0] * 10])
        self.assertAllEqual(session.run(after), v1)

        session.run(variables.global_variables_initializer())

        self.assertAllEqual(session.run(my1), v1)
        self.assertAllEqual(session.run(my1.initialized_value()), v1)
        self.assertAllClose(session.run(before), v1)
        self.assertAllClose(session.run(after), v1)
        with self.assertRaises(AssertionError):
          self.assertAllClose(v1, [[0.0] * 10])
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:29,代码来源:checkpoint_utils_test.py



注:本文中的tensorflow.python.ops.init_ops.zeros_initializer函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python io_ops.read_file函数代码示例发布时间:2022-05-27
下一篇:
Python init_ops.uniform_unit_scaling_initializer函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap