• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensorflow.histogram_summary函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.histogram_summary函数的典型用法代码示例。如果您正苦于以下问题:Python histogram_summary函数的具体用法?Python histogram_summary怎么用?Python histogram_summary使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了histogram_summary函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: produce_embeddings

def produce_embeddings(source):
    """ Produce the embbedings from the one-hot vectors 
    
    Args:
        source: 4D tensor, shape=(BATCH_SIZE, 1, S_ENGLISH, T_ENGLISH)
    
    Returns:
        4D tensor, shape=(BATCH_SIZE, 1, S_ENGLISH, EMBEDDINGS_DIMENSION)
    """
    
    with tf.variable_scope('Embeddings'):
        weights = tf.get_variable(name='weights', 
                                  shape=[1,1,T_ENGLISH,EMBEDDINGS_DIMENSION], 
                                  initializer=tf.random_normal_initializer(stddev=1.0/math.sqrt(float(T_ENGLISH)))
                                  )
        
        weights_hist = tf.histogram_summary("weights-encode", weights)
        
        biases = tf.get_variable(name='biases',
                                 shape=[EMBEDDINGS_DIMENSION],          
                                 initializer=tf.constant_initializer(0.0))
                                 
        biases_hist = tf.histogram_summary("biases-encode", biases)
        
        embeddings = tf.nn.tanh(biases + tf.nn.conv2d(source, filter=weights, strides=[1,1,1,1], padding='VALID'))
        
        return embeddings                
开发者ID:alexisrosuel,项目名称:NMT,代码行数:27,代码来源:NMT.py


示例2: train

def train(total_loss, global_step):
    num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
    decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)

    lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE, global_step, decay_steps, LEARNING_RATE_DECAY_FACTOR, staircase=True)
    tf.scalar_summary("learning_rate", lr)

    loss_averages_op = _add_loss_summaries(total_loss)

    with tf.control_dependencies([loss_averages_op]):
        opt = tf.train.GradientDescentOptimizer(lr)
        grads = opt.compute_gradients(total_loss)

    apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)

    for var in tf.trainable_variables():
        tf.histogram_summary(var.op.name, var)

    for grad, var in grads:
        if grad:
            tf.histogram_summary(var.op.name + "/gradients", grad)

    #variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
    #variables_averages_op = variable_averages.apply(tf.trainable_variables())

    with tf.control_dependencies([apply_gradient_op]):
        train_op = tf.no_op(name="train")

    return train_op
开发者ID:kannagiblog,项目名称:cnn_predict_minecraft_biome,代码行数:29,代码来源:tf_model.py


示例3: nn_conv_layer

    def nn_conv_layer(input_tensor, patch_size, num_channels,output_depth, layer_name, biases=False,act=None, pool=None):
        """Reusable code for making a simple neural net layer.

    """
        # Adding a name scope ensures logical grouping of the layers in the graph.
        with tf.name_scope(layer_name):
            # This Variable will hold the state of the weights for the layer
            with tf.name_scope('weights'):
                weights = weight_variable([patch_size,patch_size,num_channels,output_depth])
                # print ("weights:%s"%(weights.get_shape()))
                variable_summaries(weights, layer_name + '/weights')
            if (biases==True):
                with tf.name_scope('biases'):
                    biases = bias_variable([output_depth])
                    # print("biases:%s" % (biases.get_shape()))
                    variable_summaries(biases, layer_name + '/biases')
            with tf.name_scope('conv2d'):
                # print("input:%s" % (input_tensor.get_shape()))
                preactivate = tf.nn.conv2d(input_tensor, weights, [1, 1, 1, 1], padding='SAME')
                tf.histogram_summary(layer_name + '/pre_activations', preactivate)
                print("preactivate:%s" % (preactivate.get_shape()))
            if (pool!=None):
                max_pool=pool(preactivate,ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
                                         padding='SAME',name='max_pool')
            if (act!=None):
                activations = act(max_pool+biases, 'activation')
                # tf.histogram_summary(layer_name + '/activations', activations)

            return preactivate
开发者ID:KannShi,项目名称:Udacity_DL,代码行数:29,代码来源:CNN.py


示例4: inference

def inference(images):
    """
    Build the MNIST model
    """

    # Hidden 1
    with tf.name_scope('hidden1'):
        weights = tf.Variable(
            tf.truncated_normal([IMAGE_PIXELS, LAYER_SIZE],
                                stddev= 1.0 / math.sqrt(float(IMAGE_PIXELS))),
        name='weights')
        biases = tf.Variable(tf.zeros([LAYER_SIZE]),
                             name='biases')
        hidden1 = tf.nn.relu(tf.matmul(images, weights) + biases)
        # Add summary ops to collect data
        tf.histogram_summary('weights', weights)
        tf.histogram_summary('biases', biases)

    # Output Layer - is this correct? does this layer have any weights?
    with tf.name_scope('softmax_linear'):
        weights = tf.Variable(
            tf.truncated_normal([LAYER_SIZE, NUM_CLASSES],
                                stddev=1.0 / math.sqrt(float(LAYER_SIZE))),
            name='weights')
        biases = tf.Variable(tf.zeros([NUM_CLASSES]),
                             name='biases')
        logits = logSoftMax(tf.matmul(hidden1, weights) + biases)
        return logits
开发者ID:mdrumond,项目名称:tensorflow,代码行数:28,代码来源:simpleDnn_model.py


示例5: pool_layer

    def pool_layer(self, input_, ksize, stride, name):
        with tf.variable_scope(name):
            pooled = self.max_pool(input_, ksize, stride, name="name")

            tf.histogram_summary(name + "/pooled", pooled)

            return pooled
开发者ID:amharc,项目名称:jnp3,代码行数:7,代码来源:Model.py


示例6: train

def train(loss, learning_rate):
	""" Sets up an ADAM optimizer, computes gradients and updates variables.
	
	Args:
		loss: A float. The loss function to minimize.
		learning_rate: A float. The learning rate for ADAM.
	
	Returns:
		train_op: The operation to run for training.
		global_step: The current number of training steps made by the optimizer.
	"""
	# Set optimization parameters
	global_step = tf.Variable(0, name='global_step', trainable=False)
	optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=0.9, 
									   beta2=0.995, epsilon=1e-06)
	
	# Compute and apply gradients		   
	gradients = optimizer.compute_gradients(loss)
	train_op = optimizer.apply_gradients(gradients, global_step=global_step)
	
	# Summarize gradients
	for gradient, variable in gradients:
		if gradient is not None:
			tf.histogram_summary(variable.op.name + '/gradients', gradient)

	return train_op, global_step
开发者ID:brokendata,项目名称:cnn4brca,代码行数:26,代码来源:model.py


示例7: inference

    def inference(self, images, z):
        print "="*100
        print "images DCGAN inference:"
        print images.get_shape()
        print "="*100

        self.z_sum = tf.histogram_summary("z", z)

        # Generative
        print "generative"
        self.generator = Generative()
        self.G = self.generator.inference(z)

        # Discriminative
        print "discriminative from images"
        self.discriminator = Discriminative()
        self.D, self.D_logits = self.discriminator.inference(images)

        print "discriminative for sample from noize"
        self.sampler = self.generator.sampler(z)
        self.D_, self.D_logits_ = self.discriminator.inference(self.G, reuse=True)

        self.d_sum = tf.histogram_summary("d", self.D)
        self.d__sum = tf.histogram_summary("d_", self.D_)
        self.G_sum = tf.image_summary("G", self.G)

        return images, self.D_logits, self.D_logits_, self.G_sum, self.z_sum, self.d_sum, self.d__sum
开发者ID:MasazI,项目名称:dcgan,代码行数:27,代码来源:model.py


示例8: _deconv

def _deconv(inpOp, kH, kW, nOut, dH=1, dW=1, relu=True, name=None):
    global deconv_counter
    global parameters
    if not name:
      name = 'deconv' + str(deconv_counter)
    deconv_counter += 1
    with tf.variable_scope(name) as scope:
        nIn = int(inpOp.get_shape()[-1])
        in_shape = inpOp.get_shape()
        stddev = 1e-3
        kernel = tf.get_variable('weights',[kH, kW, nOut, nIn], initializer=tf.random_normal_initializer(stddev=(kH*kW*nIn)**0.5*stddev))
        
        conv = tf.nn.deconv2d(inpOp, kernel, [int(in_shape[0]),int(in_shape[1]),int(in_shape[2]),nOut], [1, 1, 1, 1],
                         padding="SAME")
                         
        biases = tf.get_variable('biases', [nOut], initializer=tf.constant_initializer(value=0.0))
        bias = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape())
        if relu:
          bias = tf.nn.relu(bias, name='relu')
        #parameters += [kernel, biases]
        #bias = tf.Print(bias, [tf.sqrt(tf.reduce_mean(tf.square(inpOp - tf.reduce_mean(inpOp))))], message=kernel.name)
        tf.histogram_summary(bias.name+"/output", bias)
        tf.image_summary(bias.name+"/output", bias[:,:,:,0:3])
        #tf.image_summary(scope+"/depth_weight", depthwise_filter)
        # tf.image_summary(scope+"/point_weight", pointwise_filter)
        
        return bias
开发者ID:Hello1024,项目名称:tf-gen,代码行数:27,代码来源:utils.py


示例9: run_training

def run_training(cost_threshold=FLAGS.cost_threshold, max_steps=FLAGS.max_steps):
  global setup_done
  cost_value = 1e9
  accuracy_value = 0.0
  # if setup_done is False:
  setup_done = True
  opt = tf.train.AdamOptimizer()
  # try:
  #opt = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)
  i_trains = [s.idx for s in trains]
  i_valids = [s.idx for s in valids]
  i_tests = [s.idx for s in tests]
  i_all = [s.idx for s in sentences]
  logits = batch_logits(i_ss, activations.ref())
  labs = batch_labels(i_ss)
  loss = calc_loss(logits, labs)
  i_ss_accuracy = accuracy(logits, labs)
  #v_labs = batch_labels(valid_ss)
  #v_logits = batch_logits(valid_ss, activations.ref())
  #v_loss = calc_loss(v_logits, v_labs)
  #train_accuracy = accuracy(logits, labs)
  #valid_accuracy = accuracy(v_logits, v_labs)
  # test_accuracy = accuracy(i_tests, activations.ref())
  train_op = opt.minimize(loss)
  #tf.histogram_summary('activations', activations)
  tf.histogram_summary('samples', i_ss)
  tf.scalar_summary('loss', loss)
  #tf.scalar_summary('training accuracy', train_accuracy)
  tf.scalar_summary('validation accuracy', i_ss_accuracy)
  # tf.scalar_summary('test accuracy', test_accuracy)
  merged = tf.merge_all_summaries()
  sess.run(tf.initialize_all_variables())
  writer = tf.train.SummaryWriter(
      '/Users/rgobbel/src/pymisc/rntn_tf/tf_logs', sess.graph)
  # except Exception as exc:
  #     print('Exception: {0}'.format(exc))
  # setup_done = False
  f_dict[i_ss] = random.sample(i_trains, FLAGS.batch_size)
  _, cost_value = sess.run([train_op, loss], feed_dict=f_dict)
  #f_dict[valid_ss] = i_valids
  _ = sess.run(zero_activations(activations.ref()), feed_dict=f_dict)
  print('starting')
  accuracy_value = sess.run([i_ss_accuracy], feed_dict=f_dict)
  for step in range(max_steps):
    #_ = sess.run(zero_activations(activations.ref()), feed_dict=f_dict)
    f_dict[i_ss] = random.sample(i_trains, FLAGS.batch_size)
    #logits = batch_logits(i_ss, activations.ref())
    #labs = batch_labels(i_ss)
    _, _, cost_value, _ = sess.run([tf.pack([i_ss]), train_op, loss], feed_dict=f_dict)
    #_ = sess.run(zero_activations(activations.ref()), feed_dict=f_dict)
    f_dict[i_ss] = i_valids
    _, valid_accuracy_value = sess.run([loss, i_ss_accuracy], feed_dict=f_dict)
    (summ,) = sess.run([merged], feed_dict=f_dict)
    # summ = sess.run([merged], feed_dict=f_dict)
    writer.add_summary(summ, step)
    writer.flush()
    print('.', end='', flush=True)
    if cost_value < cost_threshold:
      return step, cost_value, valid_accuracy_value
  return max_steps, cost_value, valid_accuracy_value
开发者ID:rgobbel,项目名称:rntn,代码行数:60,代码来源:tf_rntn.py


示例10: dense

 def dense(self, width=100, act=tf.nn.relu):
     """
     Fully connected layer.
     It does a matrix multiply, bias add, and then uses relu to nonlinearize.
     """
     input_tensor = self.layers[-1]["activations"]
     layer_name = "dense" + str(len([l for l in self.layers
         if l["type"]=="dense"]))
     input_dim = functools.reduce(operator.mul, input_tensor.get_shape()[1:].as_list(), 1)
     input_tensor = tf.reshape(input_tensor, (-1, input_dim))
     # Adding a name scope ensures logical grouping of the layers in the graph.
     with tf.name_scope(layer_name):
         # This Variable will hold the state of the weights for the layer
         with tf.name_scope('weights'):
             weights = weight_variable([input_dim, width])
             variable_summaries(weights, layer_name + '/weights')
         with tf.name_scope('biases'):
             biases = bias_variable([width])
             variable_summaries(biases, layer_name + '/biases')
         with tf.name_scope('Wx_plus_b'):
             preactivate = tf.matmul(input_tensor, weights) + biases
             activations = act(preactivate, 'activation')
             tf.histogram_summary(layer_name + '/activations', activations)
     self.layers.append( {
         "activations": activations,
         "weights": weights,
         "biases": biases,
         "type": "dense"
         } )
     return self
开发者ID:butternutdog,项目名称:ogres,代码行数:30,代码来源:basics.py


示例11: _conv

def _conv(inpOp, kH, kW, nOut, dH=1, dW=1, relu=True):
    global conv_counter
    global parameters
    name = 'conv' + str(conv_counter)
    conv_counter += 1
    with tf.name_scope(name) as scope:
        nIn = int(inpOp.get_shape()[-1])
        stddev = 5e-3
        kernel = tf.Variable(tf.truncated_normal([kH, kW, nIn, nOut],
                                                 dtype=tf.float32,
                                                 stddev=(kH*kW*nIn)**0.5*stddev), name='weights')
        
        conv = tf.nn.conv2d(inpOp, kernel, [1, 1, 1, 1],
                         padding="SAME")

        biases = tf.Variable(tf.constant(0.0, shape=[nOut], dtype=tf.float32),
                             trainable=True, name='biases')
        bias = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape())
        if relu:
          bias = tf.nn.relu(bias, name=scope)
        #parameters += [kernel, biases]
        #bias = tf.Print(bias, [tf.sqrt(tf.reduce_mean(tf.square(inpOp - tf.reduce_mean(inpOp))))], message=kernel.name)
        tf.histogram_summary(scope+"/output", bias)
        tf.image_summary(scope+"/output", bias[:,:,:,0:3])
        tf.image_summary(scope+"/kernel_weight", tf.expand_dims(kernel[:,:,0:3,0], 0))
        # tf.image_summary(scope+"/point_weight", pointwise_filter)
        
        return bias
开发者ID:Hello1024,项目名称:tf-gen,代码行数:28,代码来源:utils.py


示例12: bn

    def bn(self, act=tf.nn.relu):
        """
        Batch normalization.
        See: http://arxiv.org/pdf/1502.03167v3.pdf
        Based on implementation found at: 
        http://www.r2rt.com/posts/implementations/2016-03-29-implementing-batch-normalization-tensorflow/
        """
        # Adding a name scope ensures logical grouping of the layers in the graph.

        layer_name = "bn" + str(len([l for l in self.layers
            if l["type"]=="bn"]))

        input_tensor = self.layers[-1]["activations"]
        
        with tf.name_scope(layer_name):
            
            dim = input_tensor.get_shape()[1:] # 64, 1, 10, 100
            
            beta = tf.Variable(tf.zeros(dim))
            scale = tf.Variable(tf.ones(dim))
            variable_summaries(beta, layer_name + "/beta")
            variable_summaries(scale, layer_name + "/scale")
            z = input_tensor
            batch_mean, batch_var = tf.nn.moments(input_tensor,[0])
            epsilon = 1e-3
            z_hat = (z - batch_mean) / tf.sqrt(batch_var + epsilon)
            bn_z = scale * z_hat + beta
            activations = act(bn_z, 'activation')
            tf.histogram_summary(layer_name + '/activations', activations)
              
        self.layers.append({
            "activations": activations,
            "type": "bn"})
        return self
开发者ID:butternutdog,项目名称:ogres,代码行数:34,代码来源:basics.py


示例13: train

def train(lr, total_loss, global_step):
    # Variables that affect learning rate.

    # Compute gradients.
    #with tf.control_dependencies([loss_averages_op]):
    opt = tf.train.GradientDescentOptimizer(lr)
    grads = opt.compute_gradients(total_loss)

    # Add histograms for gradients.
    for i, (grad, var) in enumerate(grads):
        if grad is not None:
            tf.histogram_summary(var.op.name + '/gradients', grad)
            grads[i] = (tf.clip_by_norm(grad, 5), var)

    apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)

    # Add histograms for trainable variables.
    for var in tf.trainable_variables():
        tf.histogram_summary(var.op.name, var)

    # Track the moving averages of all trainable variables.
    variable_averages = tf.train.ExponentialMovingAverage(
        MOVING_AVERAGE_DECAY, global_step)
    variables_averages_op = variable_averages.apply(tf.trainable_variables())

    with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
        train_op = tf.no_op(name='train')

    return train_op
开发者ID:danfeiX,项目名称:drl,代码行数:29,代码来源:dqn.py


示例14: conv_nn_layer

def conv_nn_layer(input_tensor, window_width, window_height, input_dim, 
                  output_dim, layer_name, act=tf.nn.relu):
    """
    Defines a convolutional neural network layer
    """
    # Adding a name scope ensures logical grouping of the layers in the graph.
    with tf.name_scope(layer_name):
        # Define layer weights
        with tf.name_scope('weights'):
            weights = weight_variable([window_width, window_height, 
                                       input_dim, output_dim])
            variable_summaries(weights, layer_name + '/weights')
            
        # Define biases
        with tf.name_scope('biases'):
            biases = bias_variable([output_dim])
            variable_summaries(biases, layer_name + '/biases')
            
        # Convolve weights on image
        with tf.name_scope('preactivation'):
            preactivate = conv2d(input_tensor, weights) + biases
            tf.histogram_summary(layer_name + '/pre_activations', preactivate)
            
        # Determine layer activation
        activations = act(preactivate, 'activation')
        tf.histogram_summary(layer_name + '/activations', activations)
        return activations
开发者ID:rhiga2,项目名称:AppliedML,代码行数:27,代码来源:mnist_functions.py


示例15: expectation_maximization_step

    def expectation_maximization_step(self, x):
        
        # probability of emission sequence
        obs_prob_seq = tf.gather(self.E, x)

        with tf.name_scope('Forward_Backward'):
            self.forward_backward(obs_prob_seq)

        with tf.name_scope('Re_estimate_transition'):
            new_T0, new_transition = self.re_estimate_transition(x)
        
        with tf.name_scope('Re_estimate_emission'):
            new_emission = self.re_estimate_emission(x)

        with tf.name_scope('Check_Convergence'):
            converged = self.check_convergence(new_T0, new_transition, new_emission)

        with tf.name_scope('Update_parameters'):
            self.T0 = tf.assign(self.T0, new_T0)
            self.E = tf.assign(self.E, new_emission)
            self.T = tf.assign(self.T, new_transition)
            #self.count = tf.assign_add(self.count, 1)
             
            with tf.name_scope('histogram_summary'):
                _ = tf.histogram_summary(self.T0.name, self.T0)
                _ = tf.histogram_summary(self.T.name, self.T)
                _ = tf.histogram_summary(self.E.name, self.E)
        return converged
开发者ID:aliziaei,项目名称:HiddenMarkovModel_TensorFlow,代码行数:28,代码来源:HiddenMarkovModel.py


示例16: conv_layer

def conv_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu, fully_connected=False):
    """
    Makes a simple convolutional layer based on input and output dimensions.

    input_tensor: A tensor of the input data from the previous layer (of shape [a, b, c, d])

    Returns the pooled tensor after CONV -> ACT -> POOL
    """
    with tf.name_scope(layer_name):
        with tf.name_scope("weights"):
            weights = weight_variable([input_dim, output_dim]) if fully_connected else weight_variable([5, 5, input_dim, output_dim])
            variable_summaries(weights, layer_name + '/weights')
        with tf.name_scope("biases"):
            bias = bias_variable([output_dim])
            variable_summaries(bias, layer_name + '/bias')
        if fully_connected:
            with tf.name_scope("fully_connected"):
                final = act(tf.matmul(input_tensor, weights) + bias)
                tf.histogram_summary(layer_name + '/fully_connected', final)
                return final
        else:
            with tf.name_scope("convolution"):
                convolution = act(conv2d(input_tensor, weights) + bias)
                tf.histogram_summary(layer_name + '/convolution', convolution)
                pooled = max_pool_2x2(convolution)
                return pooled
开发者ID:mutual-ai,项目名称:deep-learning,代码行数:26,代码来源:deep_mnist_k.py


示例17: conv_layer

	def conv_layer(input, filter_shape, strides=[1, 1, 1, 1], keep_prob=1):
		""" Adds a convolutional layer to the graph. 
	
		Creates filters and biases, computes the convolutions, passes the output
		through a leaky ReLU activation function and applies dropout. Equivalent
		to calling conv_op()->leaky_relu()->dropout().

		Args:
			input: A tensor of floats with shape [batch_size, input_height,
				input_width, input_depth]. The input volume.
			filter_shape: A list of 4 integers with shape [filter_height, 
			filter_width, input_depth, output_depth]. This determines the size
			and number of filters of the convolution.
			strides: A list of 4 integers. The amount of stride in the four
				dimensions of the input.
			keep_prob: A float. Probability of dropout in the layer.
			
		Returns:
			A tensor of floats with shape [batch_size, output_height,
			output_width, output_depth]. The product of the convolutional layer.
		"""
		# conv -> relu -> dropout
		conv = conv_op(input, filter_shape, strides) 
		relu = leaky_relu(conv)
		output = dropout(relu, keep_prob)
		
		# Summarize activations
		scope = tf.get_default_graph()._name_stack # No easier way
		tf.histogram_summary(scope + '/activations', output)
		
		return output
开发者ID:brokendata,项目名称:cnn4brca,代码行数:31,代码来源:model.py


示例18: _process

 def _process(self, grads):
     for grad, var in grads:
         tf.histogram_summary(var.op.name + '/grad', grad)
         tf.add_to_collection(MOVING_SUMMARY_VARS_KEY,
                              tf.sqrt(tf.reduce_mean(tf.square(grad)),
                                      name=var.op.name + '/gradRMS'))
     return grads
开发者ID:Jothecat,项目名称:tensorpack,代码行数:7,代码来源:gradproc.py


示例19: train

def train(total_loss, global_step, learning_rate=INITIAL_LEARNING_RATE):
  lr = tf.train.exponential_decay(learning_rate,
                                  global_step,
                                  DECAY_STEPS,#number of steps required for it to decay
                                  LEARNING_RATE_DECAY_FACTOR,
                                  staircase=True)

  tf.scalar_summary('learning_rate', lr)

  #compute gradient step
  with tf.control_dependencies([total_loss]):
    opt = tf.train.MomentumOptimizer(lr, momentum=0.95)
    grads = opt.compute_gradients(total_loss)

  #if we wanted to clip the gradients
  #would apply the operation here

  #apply the gradients
  apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)

  for grad, var in grads:
    if grad is not None:
      print("Found gradients for: ", var.op.name)
      tf.histogram_summary(var.op.name + "/gradients", grad)

  with tf.control_dependencies([apply_gradient_op]):
    train_op = tf.no_op(name="train")

  #opt = tf.train.GradientDescentOptimizer(lr).minimize(total_loss, global_step=global_step)
  # grads = opt.compute_gradients(total_loss)

  return train_op
开发者ID:kingtaurus,项目名称:cs231n,代码行数:32,代码来源:cifar10_tensorflow.py


示例20: deconv_layer

def deconv_layer(input_tensor, mode_tensor, weight_init, filter_size,
                 filter_stride, num_filters, in_channels, output_size,
                 nonlinear_func, use_batchnorm, name):
    # Initialize variables
    weight_shape = [filter_size, filter_size, num_filters, in_channels]
    initializer = tf.random_normal_initializer(stddev=weight_init)
    deconv_weights = tf.get_variable(name + '/weights',
                                     shape=weight_shape,
                                     initializer=initializer)
    bias = tf.get_variable(name + '/bias',
                           shape=[num_filters],
                           initializer=tf.constant_initializer())

    # Apply deconvolution
    output_shape = [FLAGS.batch_size, output_size, output_size, num_filters]
    stride = [1, filter_stride, filter_stride, 1]
    deconv = tf.nn.conv2d_transpose(input_tensor, deconv_weights, output_shape,
                                    stride, padding='SAME',
                                    name=name + '/deconv')
    deconv = tf.nn.bias_add(deconv, bias, name=name + '/deconv_bias')
    # Apply batchnorm
    if use_batchnorm:
        deconv = batch_norm(deconv, num_filters,
                            tf.equal(mode_tensor, 'train'),
                            name + '/bn')

    activation = nonlinear_func(tf.nn.bias_add(deconv, bias),
                                name=name + '/activation')

    if not tf.get_variable_scope().reuse:
        tf.histogram_summary('summary/weights/' + name, deconv_weights)
        tf.histogram_summary('summary/activations/' + name, activation)
    return activation
开发者ID:kkihara,项目名称:GAN,代码行数:33,代码来源:model.py



注:本文中的tensorflow.histogram_summary函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.identity函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.group函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap