• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensorflow.add_to_collection函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.add_to_collection函数的典型用法代码示例。如果您正苦于以下问题:Python add_to_collection函数的具体用法?Python add_to_collection怎么用?Python add_to_collection使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了add_to_collection函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: add_gradients_summary

def add_gradients_summary(grads, name_prefix="", name_suffix="",
                          collection_key=None):
    """ add_gradients_summary.

    Add histogram summary for given gradients.

    Arguments:
        grads: A list of `Tensor`. The gradients to summarize.
        name_prefix: `str`. A prefix to add to summary scope.
        name_suffix: `str`. A suffix to add to summary scope.
        collection_key: `str`. A collection to store the summaries.

    Returns:
        The list of created gradient summaries.

    """

    # Add histograms for gradients.
    summ = []
    for grad, var in grads:
        if grad is not None:
            summ_name = format_scope_name(var.op.name, name_prefix,
                                          "Gradients/" + name_suffix)
            summ_exists = summary_exists(summ_name)
            if summ_exists is not None:
                tf.add_to_collection(collection_key, summ_exists)
                summ.append(summ_exists)
            else:
                summ.append(get_summary("histogram", summ_name, grad,
                                        collection_key))
    return summ
开发者ID:rickyall,项目名称:tflearn,代码行数:31,代码来源:summaries.py


示例2: _variable_with_weight_decay

def _variable_with_weight_decay(name, shape, stddev, wd):
    var = _variable_on_cpu(name, shape,
                           tf.truncated_normal_initializer(stddev=stddev))
    if wd is not None:
        weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
    return var
开发者ID:amoliu,项目名称:Renju-AI,代码行数:7,代码来源:AI_multi_GPU_rollout_v3.py


示例3: _variable_with_weight_decay

def _variable_with_weight_decay(shape, stddev, wd):
    """Helper to create an initialized Variable with weight decay.

    Note that the Variable is initialized with a truncated normal
    distribution.
    A weight decay is added only if one is specified.

    Args:
      name: name of the variable
      shape: list of ints
      stddev: standard deviation of a truncated Gaussian
      wd: add L2Loss weight decay multiplied by this float. If None, weight
          decay is not added for this Variable.

    Returns:
      Variable Tensor
    """
    
    initializer = tf.truncated_normal_initializer(stddev=stddev)
    var = tf.get_variable('weights', shape=shape,
                          initializer=initializer)
    # var = tf.get_variable(name="weights", shape=shape, 
    #                       initializer=tf.contrib.layers.xavier_initializer())

    if wd and (not tf.get_variable_scope().reuse):
        weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
    return var
开发者ID:ruyi345,项目名称:FCN-TensorFlow,代码行数:28,代码来源:inception_resnet_v2_fcn_8s.py


示例4: batch_norm

def batch_norm(x, decay=0.999, epsilon=1e-03, is_training=True,
               scope="scope"):
    x_shape = x.get_shape()
    num_inputs = x_shape[-1]
    reduce_dims = list(range(len(x_shape) - 1))
    with tf.variable_scope(scope):
        beta = create_var("beta", [num_inputs,],
                               initializer=tf.zeros_initializer())
        gamma = create_var("gamma", [num_inputs,],
                                initializer=tf.ones_initializer())
        # for inference
        moving_mean = create_var("moving_mean", [num_inputs,],
                                 initializer=tf.zeros_initializer(),
                                 trainable=False)
        moving_variance = create_var("moving_variance", [num_inputs],
                                     initializer=tf.ones_initializer(),
                                     trainable=False)
    if is_training:
        mean, variance = tf.nn.moments(x, axes=reduce_dims)
        update_move_mean = moving_averages.assign_moving_average(moving_mean,
                                                mean, decay=decay)
        update_move_variance = moving_averages.assign_moving_average(moving_variance,
                                                variance, decay=decay)
        tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_move_mean)
        tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_move_variance)
    else:
        mean, variance = moving_mean, moving_variance
    return tf.nn.batch_normalization(x, mean, variance, beta, gamma, epsilon)
开发者ID:kaka7,项目名称:DeepLearning_tutorials,代码行数:28,代码来源:ResNet50.py


示例5: activation

def activation(incoming, activation='linear', name='activation'):

    """ Activation.

    Apply given activation to incoming tensor.

    Arguments:
        incoming: A `Tensor`. The incoming tensor.
        activation: `str` (name) or `function` (returning a `Tensor`).
            Activation applied to this layer (see tflearn.activations).
            Default: 'linear'.

    """

    if isinstance(activation, str):
        x = activations.get(activation)(incoming)
    elif hasattr(incoming, '__call__'):
        x = activation(incoming)
    else:
        raise ValueError('Unknown activation type.')

    # Track output tensor.
    tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, x)

    return x
开发者ID:igormq,项目名称:tflearn,代码行数:25,代码来源:core.py


示例6: func_wrapper

 def func_wrapper(weights):
   if weights.dtype.base_dtype == tf.float16:
     tf.add_to_collection('REGULARIZATION_FUNCTIONS', (weights, regularizer))
     # disabling the inner regularizer
     return None
   else:
     return regularizer(weights)
开发者ID:fotwo,项目名称:OpenSeq2Seq,代码行数:7,代码来源:mp_wrapper.py


示例7: loss

def loss(H, logits, labels):
    """Calculates the loss from the logits and the labels.

    Args:
      logits: Logits tensor, float - [batch_size, NUM_CLASSES].
      labels: Labels tensor, int32 - [batch_size].

    Returns:
      loss: Loss tensor of type float.
    """
    # Convert from sparse integer labels in the range [0, NUM_CLASSSES)
    # to 1-hot dense float vectors (that is we will have batch_size vectors,
    # each with NUM_CLASSES values, all of which are 0.0 except there will
    # be a 1.0 in the entry corresponding to the label).
    with tf.name_scope('loss'):
        batch_size = tf.size(labels)
        labels = tf.expand_dims(labels, 1)
        indices = tf.expand_dims(tf.range(0, batch_size), 1)
        concated = tf.concat(1, [indices, labels])
        onehot_labels = tf.sparse_to_dense(
            concated, tf.pack([batch_size, H['arch']['num_classes']]), 1.0, 0.0)
        cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits,
                                                                onehot_labels,
                                                                name='xentropy')
        cross_entropy_mean = tf.reduce_mean(
            cross_entropy, name='xentropy_mean')
        tf.add_to_collection('losses', cross_entropy_mean)

        loss = tf.add_n(tf.get_collection('losses'), name='total_loss')
    return loss
开发者ID:TensorVision,项目名称:MediSeg,代码行数:30,代码来源:VGG8.py


示例8: loss

def loss(logits, labels):
  """Add L2Loss to all the trainable variables.

  Add summary for for "Loss" and "Loss/avg".
  Args:
    logits: Logits from inference().
    labels: Labels from distorted_inputs or inputs(). 1-D tensor
            of shape [batch_size]

  Returns:
    Loss tensor of type float.
  """
  # Reshape the labels into a dense Tensor of
  # shape [batch_size, NUM_CLASSES].
  sparse_labels = tf.reshape(labels, [FLAGS.batch_size, 1])
  indices = tf.reshape(tf.range(0, FLAGS.batch_size, 1), [FLAGS.batch_size, 1])
  concated = tf.concat(1, [indices, sparse_labels])
  dense_labels = tf.sparse_to_dense(concated,
                                    [FLAGS.batch_size, NUM_CLASSES],
                                    1.0, 0.0)

  # Calculate the average cross entropy loss across the batch.
  cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
      logits, dense_labels, name='cross_entropy_per_example')
  cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
  tf.add_to_collection('losses', cross_entropy_mean)

  # The total loss is defined as the cross entropy loss plus all of the weight
  # decay terms (L2 loss).
  return tf.add_n(tf.get_collection('losses'), name='total_loss')
开发者ID:bicimsiz,项目名称:tensorflow,代码行数:30,代码来源:cifar10.py


示例9: _variable_with_weight_decay

def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
  var = _variable_on_cpu(
      name,
      shape,
      tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
  if wd is not None:
    weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var
开发者ID:2020zyc,项目名称:tensorflow,代码行数:25,代码来源:cifar10.py


示例10: weight_variable

def weight_variable(shape, initializer=None, init_val=None, wd=None, name=None, trainable=True):
    """Initialize weights.

    Args:
        shape: shape of the weights, list of int
        wd: weight decay
    """
    log = logger.get()

    if initializer is None:
        # initializer = tf.truncated_normal(shape, stddev=0.01)
        initializer = tf.truncated_normal_initializer(stddev=0.01)
    if init_val is None:
        var = tf.Variable(initializer(shape), name=name, trainable=trainable)
    else:
        var = tf.Variable(init_val, name=name, trainable=trainable)

    # log.info(var.name)
    # if init_val is not None:
    #     if hasattr(init_val, 'shape'):
    #         log.info('Initialized with array shape {}'.format(init_val.shape))
    #     else:
    #         log.info('Initialized with {}'.format(init_val))

    if wd:
        weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
    return var
开发者ID:ziyu-zhang,项目名称:ins-seg-public,代码行数:28,代码来源:nnlib.py


示例11: _variable

def _variable(name, shape, initializer, wd=None):
  var = tf.get_variable(name, shape, initializer=initializer)

  if wd is not None:
    weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var
开发者ID:adhsu,项目名称:deepaccent,代码行数:7,代码来源:utils.py


示例12: _construct

    def _construct(self):
        """
        Construct the model; main part of it goes here
        """
        # our query = m_u + e_i
        query = (self._cur_user, self._cur_item)
        neg_query = (self._cur_user, self._cur_item_negative)

        # Positive
        neighbor = self._mem_layer(query,
                                   self.user_memory(self.input_neighborhoods),
                                   self.user_output(self.input_neighborhoods),
                                   self.input_neighborhood_lengths,
                                   self.config.max_neighbors)[-1].output
        self.score = self._output_module(tf.concat([self._cur_user * self._cur_item,
                                                    neighbor], axis=1))

        # Negative
        neighbor_negative = self._mem_layer(neg_query,
                                            self.user_memory(self.input_neighborhoods_negative),
                                            self.user_output(self.input_neighborhoods_negative),
                                            self.input_neighborhood_lengths_negative,
                                            self.config.max_neighbors)[-1].output
        negative_output = self._output_module(tf.concat(
            [self._cur_user * self._cur_item_negative, neighbor_negative], axis=1))

        # Loss and Optimizer
        self.loss = LossLayer()(self.score, negative_output)
        self._optimizer = OptimizerLayer(self.config.optimizer, clip=self.config.grad_clip,
                                         params=self.config.optimizer_params)
        self.train = self._optimizer(self.loss)

        tf.add_to_collection(GraphKeys.PREDICTION, self.score)
开发者ID:dotrado,项目名称:CollaborativeMemoryNetwork,代码行数:33,代码来源:cmn.py


示例13: weighted_loss

def weighted_loss(logits, labels, num_classes, head=None):
    """ median-frequency re-weighting """
    with tf.name_scope('loss'):

        logits = tf.reshape(logits, (-1, num_classes))

        epsilon = tf.constant(value=1e-10)

        logits = logits + epsilon

        # consturct one-hot label array
        label_flat = tf.reshape(labels, (-1, 1))

        # should be [batch ,num_classes]
        labels = tf.reshape(tf.one_hot(label_flat, depth=num_classes), (-1, num_classes))

        softmax = tf.nn.softmax(logits)

        cross_entropy = -tf.reduce_sum(tf.multiply(labels * tf.log(softmax + epsilon), head), axis=[1])

        cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')

        tf.add_to_collection('losses', cross_entropy_mean)

        loss = tf.add_n(tf.get_collection('losses'), name='total_loss')

    return loss
开发者ID:Ray-Leung,项目名称:Tensorflow-SegNet,代码行数:27,代码来源:model.py


示例14: add_trainable_vars_summary

def add_trainable_vars_summary(variables, name_prefix="", name_suffix="",
                               collection_key=None):
    """ add_trainable_vars_summary.

    Add histogram summary for given variables weights.

    Arguments:
        variables: A list of `Variable`. The variables to summarize.
        name_prefix: `str`. A prefix to add to summary scope.
        name_suffix: `str`. A suffix to add to summary scope.
        collection_key: `str`. A collection to store the summaries.

    Returns:
        The list of created weights summaries.

    """

    # Add histograms for trainable variables.
    summ = []
    for var in variables:
        summ_name = format_scope_name(var.op.name, name_prefix, name_suffix)
        summ_exists = summary_exists(summ_name)
        if summ_exists is not None:
            tf.add_to_collection(collection_key, summ_exists)
            summ.append(summ_exists)
        else:
            summ.append(get_summary("histogram", summ_name, var, collection_key))
    return summ
开发者ID:rickyall,项目名称:tflearn,代码行数:28,代码来源:summaries.py


示例15: inference

def inference(input_tensor,train,regularizer):
    #第一层卷积
    with tf.variable_scope('layer1-conv1'):
        conv1_weights = tf.get_variable("weight",
                [CONV1_SIZE,CONV1_SIZE,NUM_CHANNELS,CONV1_DEEP],
                initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv1_biases = tf.get_variable("biases",[CONV1_DEEP],
                 initializer=tf.constant_initializer(0.0))
        conv1 = tf.nn.conv2d(input_tensor,conv1_weights,
                             strides=[1,1,1,1],padding='SAME')
        relu1 = tf.nn.relu(tf.nn.bias_add(conv1,conv1_biases))
    #第二层池化    
    with tf.name_scope('layer2-pool1'):
        pool1 = tf.nn.max_pool(relu1,ksize=[1,2,2,1],
                               strides=[1,2,2,1],padding='SAME')
    #第三层卷积
    with tf.variable_scope('layer3-conv2'):
        conv2_weights = tf.get_variable("weight",
                [CONV2_SIZE,CONV2_SIZE,CONV1_DEEP,CONV2_DEEP],
                initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv2_biases = tf.get_variable("biases",[CONV2_DEEP],
                 initializer=tf.constant_initializer(0.0))
        conv2 = tf.nn.conv2d(pool1,conv2_weights,
                             strides=[1,1,1,1],padding='SAME')
        relu2 = tf.nn.relu(tf.nn.bias_add(conv2,conv2_biases))    
        
    #第四层池化
    with tf.name_scope('layer4-pool2'):
        pool2 = tf.nn.max_pool(relu2,ksize=[1,2,2,1],
                               strides=[1,2,2,1],padding='SAME')
        
    pool_shape = pool2.get_shape().as_list()
    nodes = pool_shape[1] * pool_shape[2] * pool_shape[3]
    
    reshaped = tf.reshape(pool2,[pool_shape[0],nodes])
    
    #第五层全连接层
    with tf.variable_scope('layer5-fc1'):
        fc1_weights = tf.get_variable("weight",[nodes,FC_SIZE],
                initializer=tf.truncated_normal_initializer(stddev=0.1))
        #只有全连接层的权重需要加入正则化
        if regularizer != None:
            tf.add_to_collection('losses',regularizer(fc1_weights))
        fc1_biases = tf.get_variable("bias",[FC_SIZE],
                initializer=tf.constant_initializer(0.1))
        fc1 = tf.nn.relu(tf.matmul(reshaped,fc1_weights) + fc1_biases)
        if train: fc1 = tf.nn.dropout(fc1,0.5)

    #第六层全连接层
    with tf.variable_scope('layer6-fc2'):
        fc2_weights = tf.get_variable("weight",[FC_SIZE,NUM_LABELS],
                initializer=tf.truncated_normal_initializer(stddev=0.1))
        #只有全连接层的权重需要加入正则化
        if regularizer != None:
            tf.add_to_collection('losses',regularizer(fc2_weights))
        fc2_biases = tf.get_variable("bias",[NUM_LABELS],
                initializer=tf.constant_initializer(0.1))
        logit = tf.matmul(fc1,fc2_weights) + fc2_biases

    return logit
开发者ID:yyzahuopu,项目名称:Deep-learning,代码行数:60,代码来源:mnist_inferenceCNN.py


示例16: cross_entropy_loss

def cross_entropy_loss(logits,one_hot_labels,label_smoothing = 0,
        weight = 1.0,scope = None) :
    """Define a Cross Entropy loss using softmax_cross_entropy_with_logits.

    It can scale the loss by weight factor, and smooth the labels.

    Args:
      logits: [batch_size, num_classes] logits outputs of the network .
      one_hot_labels: [batch_size, num_classes] target one_hot_encoded labels.
      label_smoothing: if greater than 0 then smooth the labels.
      weight: scale the loss by this factor.
      scope: Optional scope for op_scope.

    Returns:
      A tensor with the softmax_cross_entropy loss.
    """
    logits.get_shape().assert_is_compatible_with(one_hot_labels.get_shape())
    #with tf.op_scope([logits,one_hot_labels],scope,'CrossEntropyLoss') :
    with tf.name_scope(scope,'CrossEntropyLoss',[logits,one_hot_labels]) :
        num_classes = one_hot_labels.get_shape()[-1].value
        one_hot_labels = tf.cast(one_hot_labels,logits.dtype)
        if label_smoothing > 0 :
            smooth_positives = 1.0 - label_smoothing
            smooth_negatives = label_smoothing / num_classes
            one_hot_labels = one_hot_labels * smooth_positives + smooth_negatives
        cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits,
            one_hot_labels,
            name = 'xentropy')
        weight = tf.convert_to_tensor(weight,
            dtype = logits.dtype.base_dtype,
            name = 'loss_weight')
        loss = tf.mul(weight,tf.reduce_mean(cross_entropy),name = 'value')
        tf.add_to_collection(LOSSES_COLLECTION,loss)
        return loss
开发者ID:fenss,项目名称:tat_algorithm,代码行数:34,代码来源:losses.py


示例17: loss

  def loss(self, predicts, labels, objects_num):
    """Add Loss to all the trainable variables

    Args:
      predicts: 4-D tensor [batch_size, cell_size, cell_size, 5 * boxes_per_cell]
      ===> (num_classes, boxes_per_cell, 4 * boxes_per_cell)
      labels  : 3-D tensor of [batch_size, max_objects, 5]
      objects_num: 1-D tensor [batch_size]
    """
    class_loss = tf.constant(0, tf.float32)
    object_loss = tf.constant(0, tf.float32)
    noobject_loss = tf.constant(0, tf.float32)
    coord_loss = tf.constant(0, tf.float32)
    loss = [0, 0, 0, 0]
    for i in range(self.batch_size):
      predict = predicts[i, :, :, :]
      label = labels[i, :, :]
      object_num = objects_num[i]
      nilboy = tf.ones([7,7,2])
      tuple_results = tf.while_loop(self.cond1, self.body1, [tf.constant(0), object_num, [class_loss, object_loss, noobject_loss, coord_loss], predict, label, nilboy])
      for j in range(4):
        loss[j] = loss[j] + tuple_results[2][j]
      nilboy = tuple_results[5]

    tf.add_to_collection('losses', (loss[0] + loss[1] + loss[2] + loss[3])/self.batch_size)

    tf.summary.scalar('class_loss', loss[0]/self.batch_size)
    tf.summary.scalar('object_loss', loss[1]/self.batch_size)
    tf.summary.scalar('noobject_loss', loss[2]/self.batch_size)
    tf.summary.scalar('coord_loss', loss[3]/self.batch_size)
    tf.summary.scalar('weight_loss', tf.add_n(tf.get_collection('losses')) - (loss[0] + loss[1] + loss[2] + loss[3])/self.batch_size )

    return tf.add_n(tf.get_collection('losses'), name='total_loss'), nilboy
开发者ID:TrendonixNetwork,项目名称:ProjectCybonix,代码行数:33,代码来源:yolo_tiny_net.py


示例18: get_output_for

    def get_output_for(self):
        """Perform the convolution operation, activation and return the output
        ``tf.Tensor``.

        Returns
        -------
        ``tf.Tensor``
            Output tensor of this layer.
        """
        states = []
        outputs = []
        lstm = rnn_cell.BasicLSTMCell(self.num_units, state_is_tuple=True)
        initial_state = state = lstm.zero_state(batch_size, tf.float32)
        with tf.name_scope(self.name) as scope:
            for _id in xrange(self.num_of_cells):
                if _id > 0:
                    scope.reuse_variables()
                output, state = lstm(self.input_layer, state)

                if self.activation is not None:
                    output = self.activation(output)

                outputs.append(output)
                states.append(state)

        final_state = state
        if self.return_cell_out:
            output = tf.reshape(tf.concat(1, outputs), [-1, size])
        else:
            output = outputs[-1]
        tf.add_to_collection(BerryKeys.LAYER_OUTPUTS, output)
        return output
开发者ID:Arya-ai,项目名称:braid,代码行数:32,代码来源:rnn.py


示例19: bacthnorm

def bacthnorm(inputs, scope, epsilon=1e-05, momentum=0.99, is_training=True):
    inputs_shape = inputs.get_shape().as_list()# 输出 形状尺寸
    params_shape = inputs_shape[-1:]# 输入参数的长度
    axis = list(range(len(inputs_shape) - 1))

    with tf.variable_scope(scope):
        beta = create_variable("beta", params_shape,
                               initializer=tf.zeros_initializer())
        gamma = create_variable("gamma", params_shape,
                                initializer=tf.ones_initializer())
        # 均值 常量 不需要训练 for inference
        moving_mean = create_variable("moving_mean", params_shape,
                            initializer=tf.zeros_initializer(), trainable=False)
		# 方差 常量 不需要训练
        moving_variance = create_variable("moving_variance", params_shape,
                            initializer=tf.ones_initializer(), trainable=False)
    if is_training:
        mean, variance = tf.nn.moments(inputs, axes=axis)# 计算均值和方差
		# 移动平均求 均值和 方差  考虑上一次的量 xt = a * x_t-1 +(1-a)*x_now
        update_move_mean = moving_averages.assign_moving_average(moving_mean,
                                                mean, decay=momentum)
        update_move_variance = moving_averages.assign_moving_average(moving_variance,
                                                variance, decay=momentum)
        tf.add_to_collection(UPDATE_OPS_COLLECTION, update_move_mean)
        tf.add_to_collection(UPDATE_OPS_COLLECTION, update_move_variance)
    else:
        mean, variance = moving_mean, moving_variance
    return tf.nn.batch_normalization(inputs, mean, variance, beta, gamma, epsilon)
开发者ID:dyz-zju,项目名称:MVision,代码行数:28,代码来源:MobileNet_tf.py


示例20: bn

def bn(x, c):
    x_shape = x.get_shape()
    params_shape = x_shape[-1:]

    if c["use_bias"]:
        bias = _get_variable("bias", params_shape, initializer=tf.zeros_initializer)
        return x + bias

    axis = list(range(len(x_shape) - 1))

    beta = _get_variable("beta", params_shape, initializer=tf.zeros_initializer)
    gamma = _get_variable("gamma", params_shape, initializer=tf.ones_initializer)

    moving_mean = _get_variable("moving_mean", params_shape, initializer=tf.zeros_initializer, trainable=False)
    moving_variance = _get_variable("moving_variance", params_shape, initializer=tf.ones_initializer, trainable=False)

    # These ops will only be preformed when training.
    mean, variance = tf.nn.moments(x, axis)
    update_moving_mean = moving_averages.assign_moving_average(moving_mean, mean, BN_DECAY)
    update_moving_variance = moving_averages.assign_moving_average(moving_variance, variance, BN_DECAY)
    tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_mean)
    tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_variance)

    mean, variance = control_flow_ops.cond(
        c["is_training"], lambda: (mean, variance), lambda: (moving_mean, moving_variance)
    )

    x = tf.nn.batch_normalization(x, mean, variance, beta, gamma, BN_EPSILON)
    # x.set_shape(inputs.get_shape()) ??

    return x
开发者ID:yaowenwu,项目名称:tensorflow-resnet,代码行数:31,代码来源:resnet.py



注:本文中的tensorflow.add_to_collection函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.all_variables函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.add_n函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap