• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensorflow.assign函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.assign函数的典型用法代码示例。如果您正苦于以下问题:Python assign函数的具体用法?Python assign怎么用?Python assign使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了assign函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: test_capture

  def test_capture(self):
    global_step = tf.contrib.framework.get_or_create_global_step()
    # Some test computation
    some_weights = tf.get_variable("weigths", [2, 128])
    computation = tf.nn.softmax(some_weights)

    hook = hooks.MetadataCaptureHook(
        params={"step": 5}, model_dir=self.model_dir,
        run_config=tf.contrib.learn.RunConfig())
    hook.begin()

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      #pylint: disable=W0212
      mon_sess = monitored_session._HookedSession(sess, [hook])
      # Should not trigger for step 0
      sess.run(tf.assign(global_step, 0))
      mon_sess.run(computation)
      self.assertEqual(gfile.ListDirectory(self.model_dir), [])
      # Should trigger *after* step 5
      sess.run(tf.assign(global_step, 5))
      mon_sess.run(computation)
      self.assertEqual(gfile.ListDirectory(self.model_dir), [])
      mon_sess.run(computation)
      self.assertEqual(
          set(gfile.ListDirectory(self.model_dir)),
          set(["run_meta", "tfprof_log", "timeline.json"]))
开发者ID:AbhinavJain13,项目名称:seq2seq,代码行数:27,代码来源:hooks_test.py


示例2: _apply

  def _apply(self, grad, var, indices=None):
    lr = tf.cast(self._learning_rate_tensor, var.dtype.base_dtype)
    m = self.get_slot(var, "m")
    v = self.get_slot(var, "v")
    beta1_t = tf.cast(self._beta1_t, var.dtype.base_dtype)
    beta2_t = tf.cast(self._beta2_t, var.dtype.base_dtype)
    epsilon_t = tf.cast(self._epsilon_t, var.dtype.base_dtype)

    # m_t = beta1 * m + (1 - beta1) * g_t
    m_scaled_g_values = grad * (1 - beta1_t)
    m_t = tf.assign(m, m * beta1_t, use_locking=self._use_locking)
    with tf.control_dependencies([m_t]):
      m_t = self._assign_add(m, updates=m_scaled_g_values, indices=indices)
    m_gathered = self._gather(m_t, indices=indices)

    # Also see tf.nn.moments.
    variance = tf.squared_difference(grad, m_gathered)

    # v_t = beta2 * v + (1 - beta2) * variance
    v_scaled_new_values = variance * (1 - beta2_t)
    v_t = tf.assign(v, v * beta2_t, use_locking=self._use_locking)
    with tf.control_dependencies([v_t]):
      v_t = self._assign_add(v, updates=v_scaled_new_values, indices=indices)
    v_gathered = self._gather(v_t, indices=indices)

    factor = v_gathered / (variance + epsilon_t)
    update = lr * grad * tf.minimum(factor, 1.0)
    var_update = self._assign_sub(ref=var, updates=update, indices=indices)
    return tf.group(*[var_update, m_t])
开发者ID:rwth-i6,项目名称:returnn,代码行数:29,代码来源:TFUpdater.py


示例3: testReuseVars

 def testReuseVars(self):
   height, width = 3, 3
   with self.test_session() as sess:
     image_shape = (10, height, width, 3)
     image_values = np.random.rand(*image_shape)
     expected_mean = np.mean(image_values, axis=(0, 1, 2))
     expected_var = np.var(image_values, axis=(0, 1, 2))
     images = tf.constant(image_values, shape=image_shape, dtype=tf.float32)
     output = ops.batch_norm(images, decay=0.1, is_training=False)
     update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION)
     with tf.control_dependencies(update_ops):
       barrier = tf.no_op(name='gradient_barrier')
       output = control_flow_ops.with_dependencies([barrier], output)
     # Initialize all variables
     sess.run(tf.global_variables_initializer())
     moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
     moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
     mean, variance = sess.run([moving_mean, moving_variance])
     # After initialization moving_mean == 0 and moving_variance == 1.
     self.assertAllClose(mean, [0] * 3)
     self.assertAllClose(variance, [1] * 3)
     # Simulate assigment from saver restore.
     init_assigns = [tf.assign(moving_mean, expected_mean),
                     tf.assign(moving_variance, expected_var)]
     sess.run(init_assigns)
     for _ in range(10):
       sess.run([output], {images: np.random.rand(*image_shape)})
     mean = moving_mean.eval()
     variance = moving_variance.eval()
     # Although we feed different images, the moving_mean and moving_variance
     # shouldn't change.
     self.assertAllClose(mean, expected_mean)
     self.assertAllClose(variance, expected_var)
开发者ID:Aravindreddy986,项目名称:TensorFlowOnSpark,代码行数:33,代码来源:ops_test.py


示例4: mean_var_with_update

    def mean_var_with_update():
        ema_apply_op = ema.apply([batch_mean, batch_var])
        pop_mean_op = tf.assign(pop_mean, ema.average(batch_mean))
        pop_var_op = tf.assign(pop_var, ema.average(batch_var))

        with tf.control_dependencies([ema_apply_op, pop_mean_op, pop_var_op]):
            return tf.identity(batch_mean), tf.identity(batch_var)
开发者ID:deworrall92,项目名称:groupConvolutions,代码行数:7,代码来源:BSD_model.py


示例5: batch_norm

def batch_norm(inputs, name_scope, is_training, epsilon=1e-3, decay=0.99):
    with tf.variable_scope(name_scope):
        size = inputs.get_shape().as_list()[1]

        gamma = tf.get_variable(
            'gamma', [size], initializer=tf.constant_initializer(0.1))
        # beta = tf.get_variable('beta', [size], initializer=tf.constant_initializer(0))
        beta = tf.get_variable('beta', [size])

        pop_mean = tf.get_variable('pop_mean', [size],
                                   initializer=tf.zeros_initializer(), trainable=False)
        pop_var = tf.get_variable('pop_var', [size],
                                  initializer=tf.ones_initializer(), trainable=False)
        batch_mean, batch_var = tf.nn.moments(inputs, [0])

        train_mean_op = tf.assign(
            pop_mean, pop_mean * decay + batch_mean * (1 - decay))
        train_var_op = tf.assign(
            pop_var, pop_var * decay + batch_var * (1 - decay))

        def batch_statistics():
            with tf.control_dependencies([train_mean_op, train_var_op]):
                return tf.nn.batch_normalization(inputs, batch_mean, batch_var, beta, gamma, epsilon)

        def pop_statistics():
            return tf.nn.batch_normalization(inputs, pop_mean, pop_var, beta, gamma, epsilon)

        # control flow
        return tf.cond(is_training, batch_statistics, pop_statistics)
开发者ID:seasky100,项目名称:tensorflow_end2end_speech_recognition,代码行数:29,代码来源:batch_normalization.py


示例6: train_spectrogram_encoder

def train_spectrogram_encoder():
  tf.initialize_all_variables().run()
  print("Pretrain")
  for i in range(6000-1):
    batch_xs, batch_ys = speech.train.next_batch(100)
    # WTF, tensorflow can't do 3D tensor operations?
    # https://github.com/tensorflow/tensorflow/issues/406 =>
    batch_xs=[flatten(matrix) for matrix in batch_xs]
    #  you have to reshape to flat/matrix data? why didn't they call it matrixflow?
    feed = {x: batch_xs, y_: batch_ys}
    speech_step.run(feed) # better for encod_entropy too! (later)
    if(i%100==0):
      print("iteration %d"%i)#, end=' ')
      eval(feed)
    if((i+1)%7000==0):
      print("l_rate*=0.1")
      sess.run(tf.assign(l_rate,l_rate*0.1))

  print("Train")
  for i in range(100000):
    batch_xs, batch_ys = speech.train.next_batch(100)
    feed = {x: batch_xs, y_: batch_ys}
    if((i+1)%9000==0):sess.run(tf.assign(l_rate,l_rate*0.3))
    encod_step.run(feed) # alternating!
    speech_step.run(feed)
    train_step.run(feed)
    if(i%100==0):
      print("iteration %d"%i)#, end=' ')
      eval(feed)
开发者ID:duydb2,项目名称:tensorflow-speech-recognition,代码行数:29,代码来源:generate_sound.py


示例7: expectation_maximization_step

    def expectation_maximization_step(self, x):
        
        # probability of emission sequence
        obs_prob_seq = tf.gather(self.E, x)

        with tf.name_scope('Forward_Backward'):
            self.forward_backward(obs_prob_seq)

        with tf.name_scope('Re_estimate_transition'):
            new_T0, new_transition = self.re_estimate_transition(x)
        
        with tf.name_scope('Re_estimate_emission'):
            new_emission = self.re_estimate_emission(x)

        with tf.name_scope('Check_Convergence'):
            converged = self.check_convergence(new_T0, new_transition, new_emission)

        with tf.name_scope('Update_parameters'):
            self.T0 = tf.assign(self.T0, new_T0)
            self.E = tf.assign(self.E, new_emission)
            self.T = tf.assign(self.T, new_transition)
            #self.count = tf.assign_add(self.count, 1)
             
            with tf.name_scope('histogram_summary'):
                _ = tf.histogram_summary(self.T0.name, self.T0)
                _ = tf.histogram_summary(self.T.name, self.T)
                _ = tf.histogram_summary(self.E.name, self.E)
        return converged
开发者ID:aliziaei,项目名称:HiddenMarkovModel_TensorFlow,代码行数:28,代码来源:HiddenMarkovModel.py


示例8: _cached_copy

  def _cached_copy(self, var, name):
    """Helper function to create a worker cached copy of a Variable.

    Args:
      var: Variable or list of Variable to cache. If a list, the items are
        concatenated along dimension 0 to get the cached entry.
      name: name of cached variable.

    Returns:
      Tuple consisting of following three entries:
      cache: the new transient Variable.
      cache_init: op to initialize the Variable
      cache_reset: op to reset the Variable to some default value
    """
    if var is None:
      return None, None, None
    else:
      cache = WALSModel._transient_var(name)
      with ops.colocate_with(cache):
        if isinstance(var, list):
          assert var
          if len(var) == 1:
            var = var[0]
          else:
            var = tf.concat(0, var)

      cache_init = tf.assign(cache, var, validate_shape=False)
      cache_reset = tf.assign(cache, 1.0, validate_shape=False)
      return cache, cache_init, cache_reset
开发者ID:AntHar,项目名称:tensorflow,代码行数:29,代码来源:factorization_ops.py


示例9: overflow_case

 def overflow_case():
   new_scale_val = tf.clip_by_value(self.scale / self.step_factor,
                                    self.scale_min, self.scale_max)
   scale_assign = tf.assign(self.scale, new_scale_val)
   overflow_iter_assign = tf.assign(self.last_overflow_iteration, self.iteration)
   with tf.control_dependencies([scale_assign, overflow_iter_assign]):
     return tf.identity(self.scale)
开发者ID:fotwo,项目名称:OpenSeq2Seq,代码行数:7,代码来源:automatic_loss_scaler.py


示例10: running_mean

def running_mean(cost, tag_name, batch_size=1):
    with tf.name_scope("running_mean_" + tag_name):
        with tf.variable_scope(tag_name):
            cost_sum = tf.get_variable(
              "cost_sum",
              initializer=tf.zeros_initializer,
              dtype=tf.float64,
              shape=(),
              collections=[tf.GraphKeys.LOCAL_VARIABLES],
              trainable=False)
            batches = tf.get_variable(
              "cost_num_batches",
              initializer=tf.zeros_initializer,
              dtype=tf.int32,
              shape=(),
              collections=[tf.GraphKeys.LOCAL_VARIABLES],
              trainable=False)

        cost_add = tf.assign_add(cost_sum, tf.cast(cost, dtype=tf.float64))
        batches_add = tf.assign_add(batches, batch_size)
        update_cost_mean = tf.group(cost_add, batches_add)

        reset_batches = tf.assign(batches, 0)
        reset_cost_sum = tf.assign(cost_sum, 0.0)
        reset_cost_mean = tf.group(reset_batches, reset_cost_sum)

        mean_cost = tf.divide(
          cost_sum,
          tf.cast(batches, dtype=tf.float64))
        train_loss_summary = tf.summary.scalar(tag_name, mean_cost)

    return reset_cost_mean, update_cost_mean, train_loss_summary
开发者ID:cupslab,项目名称:neural_network_cracking,代码行数:32,代码来源:pass_utils.py


示例11: run_tf_simulation

    def run_tf_simulation(self, c_in, h_in, timesteps=100, dt=0.005):
        r_e = tf.Variable( tf.zeros([self.N_pairs, self.N_pairs]) )
        r_i = tf.Variable( tf.zeros([self.N_pairs, self.N_pairs]) )
        
        W_EE = tf.placeholder(tf.float32)
        W_EI = tf.placeholder(tf.float32)
        W_IE = tf.placeholder(tf.float32)
        W_II = tf.placeholder(tf.float32)
        k = tf.placeholder(tf.float32)
        n_E = tf.placeholder(tf.float32)
        n_I = tf.placeholder(tf.float32) 
        tau_E = tf.placeholder(tf.float32)
        tau_I = tf.placeholder(tf.float32)
        
        c0 = tf.constant(c_in)
        h0 = tf.constant(h_in)
                
        # Compile functions:
        I_E = c0*h0 + tf.transpose(tf.reshape(tf.reduce_sum(W_EE * r_e, [1,2]), [75,75])) \
            - tf.transpose(tf.reshape(tf.reduce_sum(W_EI * r_i, [1,2]), [75,75]))
        I_I = c0*h0 + tf.transpose(tf.reshape(tf.reduce_sum(W_IE * r_e, [1,2]), [75,75])) \
            - tf.transpose(tf.reshape(tf.reduce_sum(W_II * r_i, [1,2]), [75,75]))

        I_thresh_E = tf.maximum(0., I_E)
        I_thresh_I = tf.maximum(0., I_I)

        r_SS_E = k * tf.pow(I_thresh_E, n_E)
        r_SS_I = k * tf.pow(I_thresh_I, n_I)

        rE_out = r_e + dt*(-r_e+r_SS_E)/tau_E
        rI_out = r_i + dt*(-r_i+r_SS_I)/tau_I
        
        update_rE = tf.assign(r_e, rE_out)
        update_rI = tf.assign(r_i, rI_out)
        
        init = tf.initialize_all_variables()
        
        rE = 0
        rI = 0
        
        fd = {W_EE:self.W_EE.astype(np.float32), 
                  W_EI:self.W_EI.astype(np.float32), 
                  W_IE:self.W_IE.astype(np.float32), 
                  W_II:self.W_II.astype(np.float32),
                  k:self.k.astype(np.float32),
                  n_E:self.n_E.astype(np.float32),
                  n_I:self.n_I.astype(np.float32),
                  tau_E:self.tau_E.astype(np.float32),
                  tau_I:self.tau_I.astype(np.float32)}
        
        with tf.Session() as sess:
            sess.run(init, feed_dict=fd)
            for t in range(timesteps):
                # run the simulation
                sess.run([update_rE, update_rI], feed_dict=fd)
            # fetch the rates
            rE = sess.run([r_e], feed_dict=fd)
            rI = sess.run([r_i], feed_dict=fd)
            
        return rE, rI
开发者ID:benselby,项目名称:v1_modelling,代码行数:60,代码来源:ssn_subpop_tf.py


示例12: __init__

 def __init__(self,inputs,size,is_training,sess,parForTarget=None,bn_param=None):
     
     self.sess = sess        
     self.scale = tf.Variable(tf.random_uniform([size],0.9,1.1))
     self.beta = tf.Variable(tf.random_uniform([size],-0.03,0.03))
     self.pop_mean = tf.Variable(tf.random_uniform([size],-0.03,0.03),trainable=False)
     self.pop_var = tf.Variable(tf.random_uniform([size],0.9,1.1),trainable=False)        
     self.batch_mean, self.batch_var = tf.nn.moments(inputs,[0])        
     self.train_mean = tf.assign(self.pop_mean,self.pop_mean * decay + self.batch_mean * (1 - decay))  
     self.train_var = tf.assign(self.pop_var,self.pop_var * decay + self.batch_var * (1 - decay))
             
     def training(): 
         return tf.nn.batch_normalization(inputs,
             self.batch_mean, self.batch_var, self.beta, self.scale, 0.0000001 )
 
     def testing(): 
         return tf.nn.batch_normalization(inputs,
         self.pop_mean, self.pop_var, self.beta, self.scale, 0.0000001)
     
     if parForTarget!=None:
         self.parForTarget = parForTarget
         self.updateScale = self.scale.assign(self.scale*(1-TAU)+self.parForTarget.scale*TAU)
         self.updateBeta = self.beta.assign(self.beta*(1-TAU)+self.parForTarget.beta*TAU)
         self.updateTarget = tf.group(self.updateScale, self.updateBeta)
         
     self.bnorm = tf.cond(is_training,training,testing) 
     
开发者ID:stevenpjg,项目名称:ddpg-aigym,代码行数:26,代码来源:batch_norm.py


示例13: getUpdatesForBnRollingAverage

 def getUpdatesForBnRollingAverage(self) :
     # This function or something similar should stay, even if I clean the BN rolling average.
     if self._appliedBnInLayer :
         return [ tf.assign( ref=self._sharedNewMu_B, value=self._newMu_B, validate_shape=True ),
                 tf.assign( ref=self._sharedNewVar_B, value=self._newVar_B, validate_shape=True ) ]
     else :
         return []
开发者ID:Kamnitsask,项目名称:deepmedic,代码行数:7,代码来源:layers.py


示例14: main

def main(args):
  # We init as h=x
  W = tf.Variable([1], dtype=tf.float32)
  b = tf.Variable([0], dtype=tf.float32)
  x = tf.placeholder(tf.float32)
  h = W * x + b
  
  init = tf.global_variables_initializer()
  sess = tf.Session()
  #sess.run(init)
  #print("hyposis init:", sess.run(h, {x:[1,2,3,4]}))

  y = tf.placeholder(tf.float32)
  squared_deltas = tf.square(h - y)
  cost = 0.5 * tf.reduce_mean(squared_deltas)
  #print("cost init:", sess.run(cost, {x:[1,2,3,4], y:[0,-1,-2,-3]}))

  fixW = tf.assign(W, [-1.])
  fixb = tf.assign(b, [1.])
  sess.run([fixW, fixb])
  #print("W, b, cost expected:", sess.run([fixW, fixb, cost], {x:[1,2,3,4], y:[0,-1,-2,-3]}))

  # linear regression 
  sess.run(init)#assign
  optimizer = tf.train.GradientDescentOptimizer(0.01)
  train = optimizer.minimize(cost)
  
  for i in range(10000):
    sess.run(train, {x:[1,2,3,4,-3,35], y:[0,-1,-2,-3,4,-34]})
  
  curr_W, curr_b, curr_loss = sess.run([W, b, cost], {x:[1,2,3,4,-3,35], y:[0,-1,-2,-3,4,-34]})
  
  print("W, b, cost learned: ", curr_W, curr_b, curr_loss)
开发者ID:yuqingwang15,项目名称:pythonproblempractices,代码行数:33,代码来源:linearregression.py


示例15: fit

 def fit(self, xs, ys):
     sess = tf.get_default_session()
     if self._normalize_inputs:
         # recompute normalizing constants for inputs
         sess.run([
             tf.assign(self._x_mean_var, np.mean(xs, axis=0, keepdims=True)),
             tf.assign(self._x_std_var, np.std(xs, axis=0, keepdims=True) + 1e-8),
         ])
     if self._normalize_outputs:
         # recompute normalizing constants for outputs
         sess.run([
             tf.assign(self._y_mean_var, np.mean(ys, axis=0, keepdims=True)),
             tf.assign(self._y_std_var, np.std(ys, axis=0, keepdims=True) + 1e-8),
         ])
     if self._use_trust_region:
         old_means, old_log_stds = self._f_pdists(xs)
         inputs = [xs, ys, old_means, old_log_stds]
     else:
         inputs = [xs, ys]
     loss_before = self._optimizer.loss(inputs)
     if self._name:
         prefix = self._name + "_"
     else:
         prefix = ""
     logger.record_tabular(prefix + 'LossBefore', loss_before)
     self._optimizer.optimize(inputs)
     loss_after = self._optimizer.loss(inputs)
     logger.record_tabular(prefix + 'LossAfter', loss_after)
     if self._use_trust_region:
         logger.record_tabular(prefix + 'MeanKL', self._optimizer.constraint_val(inputs))
     logger.record_tabular(prefix + 'dLoss', loss_before - loss_after)
开发者ID:flyers,项目名称:rllab,代码行数:31,代码来源:gaussian_mlp_regressor.py


示例16: update_parameters

def update_parameters(mu, sigma, best_params):
  new_mu = tf.reduce_mean(best_params, 0)
  mu_ass = tf.assign(mu, new_mu)
  diff = tf.squared_difference(best_params, new_mu)
  std = tf.sqrt(tf.reduce_mean(diff, 0))
  sigma_ass = tf.assign(sigma, std)
  return mu_ass, sigma_ass
开发者ID:sygi,项目名称:ml-learning-diary,代码行数:7,代码来源:cross+entropy+method.py


示例17: __init__

  def __init__(self, gan=None, config=None, trainer=None, name="ProgressCompressTrainHook"):
    super().__init__(config=config, gan=gan, trainer=trainer, name=name)
    d_loss = []

    self.x = tf.Variable(tf.zeros_like(gan.inputs.x))
    self.g = tf.Variable(tf.zeros_like(gan.generator.sample))

    stacked = tf.concat([self.gan.inputs.x, self.gan.generator.sample], axis=0)
    self.assign_x = tf.assign(self.x, gan.inputs.x)
    self.assign_g = tf.assign(self.g, gan.generator.sample)
    self.re_init_d = [d.initializer for d in gan.discriminator.variables()]
    gan.hack = self.g

    self.assign_knowledge_base = []

    bs = gan.batch_size()
    real = gan.discriminator.named_layers['knowledge_base_target']#tf.reshape(gan.loss.sample[:2], [2,-1])
    _inputs = hc.Config({'x':real})
    inner_gan = KBGAN(config=self.config.knowledge_base, inputs=_inputs, x=real, latent=stacked)
    self.kb_loss = inner_gan.loss
    self.kb = inner_gan.generator
    self.trainer = inner_gan.trainer
    variables = inner_gan.variables()
    #variables += self.kb.variables()

    for c in gan.components:
        if hasattr(c, 'knowledge_base'):
            for name, net in c.knowledge_base:
                assign = self.kb.named_layers[name]
                if self.ops.shape(assign)[0] > self.ops.shape(net)[0]:
                    assign = tf.slice(assign,[0 for i in self.ops.shape(net)] , [self.ops.shape(net)[0]]+self.ops.shape(assign)[1:])
                self.assign_knowledge_base.append(tf.assign(net, assign))

    self.gan.add_metric('d_kb', self.kb_loss.sample[0])
    self.gan.add_metric('g_kb', self.kb_loss.sample[1])
开发者ID:255BITS,项目名称:hyperchamber-gan,代码行数:35,代码来源:progress_compress_kbgan_train_hook.py


示例18: if_train

 def if_train():
     batch_mean, batch_var = tf.nn.moments(inputs, axes=[0, 1, 2]) # compute mean across these axes (all but channels)
     # Exponential Mov. Avg. Decay (compute moving average of population, update as batches are seen.)
     train_mean = tf.assign(pop_mean, pop_mean * decay + batch_mean * (1 - decay))
     train_var  = tf.assign(pop_var,  pop_var  * decay + batch_var  * (1 - decay))
     with tf.control_dependencies([train_mean, train_var]): # makes sure the moving averages are updated during training (absent below:)
       return tf.identity(batch_mean), tf.identity(batch_var)
开发者ID:alexwal,项目名称:Deep-Learning,代码行数:7,代码来源:small-cifar10-bn-low-level.py


示例19: batchnorm

def batchnorm(x, gamma, beta, r_mean, r_var):
  mean, var = tf.nn.moments(x,[0])
  update_mean = tf.assign(r_mean,0.9 * r_mean + 0.1 * mean)
  update_var = tf.assign(r_var,0.9 * r_var + 0.1 * var)
  with tf.control_dependencies([update_mean,update_var]):
    return tf.nn.batch_normalization(x,tf.clip_by_value(r_mean,1e-10,100),tf.clip_by_value(r_var,1e-10,100),
                                     offset=beta,scale=gamma,variance_epsilon=1e-5)
开发者ID:dunkyfool,项目名称:qai,代码行数:7,代码来源:layers.py


示例20: bn_layer

def bn_layer(inputs,is_training,name='BatchNorm',moving_decay=0.9,eps=1e-5):
    shape = inputs.shape
    assert len(shape) in [2,4]

    param_shape = shape[-1]

    gamma = tf.Variable(tf.ones(param_shape), name='gamma')
    beta  = tf.Variable(tf.zeros(param_shape), name='beta')
    mean  = tf.Variable(tf.ones(param_shape), trainable=False, name='mean')
    var   = tf.Variable(tf.ones(param_shape), trainable=False, name='var')

    tf.add_to_collection('l2_losses', tf.contrib.layers.l2_regularizer(lambda1)(gamma)) 
    tf.add_to_collection('l2_losses', tf.contrib.layers.l2_regularizer(lambda1)(beta)) 
    tf.add_to_collection('l2_losses', tf.contrib.layers.l2_regularizer(lambda1)(mean)) 
    tf.add_to_collection('l2_losses', tf.contrib.layers.l2_regularizer(lambda1)(var)) 



    if is_training == True:
        batch_mean, batch_var = tf.nn.moments(inputs,[0,1,2],name='moments')
        mean = tf.assign(mean, batch_mean)
        var = tf.assign(var, batch_var)
        return tf.nn.batch_normalization(inputs,batch_mean+mean*1e-10,batch_var+var*1e-10,gamma,beta,eps)
    else:
        return tf.nn.batch_normalization(inputs,mean,var,gamma,beta,eps)
开发者ID:chengyake,项目名称:karch,代码行数:25,代码来源:main.py



注:本文中的tensorflow.assign函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.assign_add函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.assert_variables_initialized函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap