• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensorflow.scalar_summary函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.scalar_summary函数的典型用法代码示例。如果您正苦于以下问题:Python scalar_summary函数的具体用法?Python scalar_summary怎么用?Python scalar_summary使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了scalar_summary函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _testGraphExtensionRestore

  def _testGraphExtensionRestore(self):
    test_dir = os.path.join(self.get_temp_dir(), "graph_extension")
    filename = os.path.join(test_dir, "metafile")
    saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
    with self.test_session(graph=tf.Graph()) as sess:
      # Restores from MetaGraphDef.
      new_saver = tf.train.import_meta_graph(filename)
      # Generates a new MetaGraphDef.
      new_saver.export_meta_graph()
      # Restores from checkpoint.
      new_saver.restore(sess, saver0_ckpt)
      # Addes loss and train.
      labels = tf.constant(0, tf.int32, shape=[100], name="labels")
      batch_size = tf.size(labels)
      labels = tf.expand_dims(labels, 1)
      indices = tf.expand_dims(tf.range(0, batch_size), 1)
      concated = tf.concat(1, [indices, labels])
      onehot_labels = tf.sparse_to_dense(
          concated, tf.pack([batch_size, 10]), 1.0, 0.0)
      logits = tf.get_collection("logits")[0]
      cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits,
                                                              onehot_labels,
                                                              name="xentropy")
      loss = tf.reduce_mean(cross_entropy, name="xentropy_mean")

      tf.scalar_summary(loss.op.name, loss)
      # Creates the gradient descent optimizer with the given learning rate.
      optimizer = tf.train.GradientDescentOptimizer(0.01)

      # Runs train_op.
      train_op = optimizer.minimize(loss)
      sess.run(train_op)
开发者ID:2er0,项目名称:tensorflow,代码行数:32,代码来源:saver_test.py


示例2: __init__

    def __init__(self, encoders, vocabulary, data_id,
                 layers=[], activation=tf.tanh, dropout_keep_p=0.5, name='seq_classifier'):
        self.encoders = encoders
        self.vocabulary = vocabulary
        self.data_id = data_id
        self.layers = layers
        self.activation = activation
        self.dropout_keep_p = dropout_keep_p
        self.name = name
        self.max_output_len = 1

        with tf.variable_scope(name):
            self.learning_step = tf.Variable(0, name="learning_step", trainable=False)
            self.dropout_placeholder = tf.placeholder(tf.float32, name="dropout_plc")
            self.gt_inputs = [tf.placeholder(tf.int32, shape=[None], name="targets")]
            mlp_input = tf.concat(1, [enc.encoded for enc in encoders])
            mlp = MultilayerPerceptron(mlp_input, layers, self.dropout_placeholder, len(vocabulary))

            self.loss_with_gt_ins = tf.reduce_mean(
                tf.nn.sparse_softmax_cross_entropy_with_logits(mlp.logits, self.gt_inputs[0]))
            self.loss_with_decoded_ins = self.loss_with_gt_ins
            self.cost = self.loss_with_gt_ins

            self.decoded_seq = [mlp.classification]
            self.decoded_logits = [mlp.logits]

            tf.scalar_summary('val_optimization_cost', self.cost, collections=["summary_val"])
            tf.scalar_summary('train_optimization_cost', self.cost, collections=["summary_train"])
开发者ID:archerbroler,项目名称:neuralmonkey,代码行数:28,代码来源:sequence_classifier.py


示例3: _activation_summary

def _activation_summary(x):
    '''
    可視化用のサマリを作成
    '''
    tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
    tf.histogram_summary(tensor_name + '/activations', x)
    tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
开发者ID:pmnyc,项目名称:Machine_Learning_Test_Repository,代码行数:7,代码来源:model_mlp.py


示例4: _add_loss_summaries

def _add_loss_summaries(total_loss):
  """Add summaries for losses in CIFAR-10 model.

  Generates moving average for all losses and associated summaries for
  visualizing the performance of the network.

  Args:
    total_loss: Total loss from loss().
  Returns:
    loss_averages_op: op for generating moving averages of losses.
  """
  # Compute the moving average of all individual losses and the total loss.
  loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
  losses = tf.get_collection('losses')
  loss_averages_op = loss_averages.apply(losses + [total_loss])

  # Attach a scalar summary to all individual losses and the total loss; do the
  # same for the averaged version of the losses.
  for l in losses + [total_loss]:
    # Name each loss as '(raw)' and name the moving average version of the loss
    # as the original loss name.
    tf.scalar_summary(l.op.name +' (raw)', l)
    tf.scalar_summary(l.op.name, loss_averages.average(l))

  return loss_averages_op
开发者ID:maximelouis,项目名称:whatfood,代码行数:25,代码来源:cifarfood.py


示例5: get_config

def get_config():
    basename = os.path.basename(__file__)
    logger.set_logger_dir(
        os.path.join('train_log', basename[:basename.rfind('.')]))

    dataset_train = FakeData([(227,227,3), tuple()], 10)
    dataset_train = BatchData(dataset_train, 10)
    step_per_epoch = 1

    sess_config = get_default_sess_config()
    sess_config.gpu_options.per_process_gpu_memory_fraction = 0.5

    lr = tf.train.exponential_decay(
        learning_rate=1e-8,
        global_step=get_global_step_var(),
        decay_steps=dataset_train.size() * 50,
        decay_rate=0.1, staircase=True, name='learning_rate')
    tf.scalar_summary('learning_rate', lr)

    param_dict = np.load('alexnet.npy').item()

    return TrainConfig(
        dataset=dataset_train,
        optimizer=tf.train.AdamOptimizer(lr),
        callbacks=Callbacks([
            StatPrinter(),
            ModelSaver(),
            #ValidationError(dataset_test, prefix='test'),
        ]),
        session_config=sess_config,
        model=Model(),
        step_per_epoch=step_per_epoch,
        session_init=ParamRestore(param_dict),
        max_epoch=100,
    )
开发者ID:gongenhao,项目名称:tensorpack,代码行数:35,代码来源:load_alexnet.py


示例6: train

def train(total_loss, global_step):
    num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
    decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)

    lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE, global_step, decay_steps, LEARNING_RATE_DECAY_FACTOR, staircase=True)
    tf.scalar_summary("learning_rate", lr)

    loss_averages_op = _add_loss_summaries(total_loss)

    with tf.control_dependencies([loss_averages_op]):
        opt = tf.train.GradientDescentOptimizer(lr)
        grads = opt.compute_gradients(total_loss)

    apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)

    for var in tf.trainable_variables():
        tf.histogram_summary(var.op.name, var)

    for grad, var in grads:
        if grad:
            tf.histogram_summary(var.op.name + "/gradients", grad)

    #variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
    #variables_averages_op = variable_averages.apply(tf.trainable_variables())

    with tf.control_dependencies([apply_gradient_op]):
        train_op = tf.no_op(name="train")

    return train_op
开发者ID:kannagiblog,项目名称:cnn_predict_minecraft_biome,代码行数:29,代码来源:tf_model.py


示例7: evaluate

def evaluate(accuracy_accumulator, val_loss_accumulator, validation_batches):
    accuracy = accuracy_accumulator/validation_batches
    loss = val_loss_accumulator/validation_batches
    accuracy_summary_op = tf.scalar_summary("accuracy", accuracy)
    val_loss_summary_op = tf.scalar_summary("val_cost", loss)

    return accuracy, accuracy_summary_op, val_loss_summary_op
开发者ID:darksigma,项目名称:Fundamentals-of-Deep-Learning-Book,代码行数:7,代码来源:imdb_ohlstm.py


示例8: drawGraph

    def drawGraph(self, n_row, n_latent, n_col):
        with tf.name_scope('matDecomp'):
            self._p = tf.placeholder(tf.float32, shape=[None, n_col])
            self._c = tf.placeholder(tf.float32, shape=[None, n_col])
            self._lambda = tf.placeholder(tf.float32)
            self._index = tf.placeholder(tf.float32, shape=[None, n_row])
            self._A = tf.Variable(tf.truncated_normal([n_row, n_latent]))
            self._B = tf.Variable(tf.truncated_normal([n_latent, n_col]))
            self._h = tf.matmul(tf.matmul(self._index, self._A), self._B) 
            
            weighted_loss = tf.reduce_mean(tf.mul(self._c, tf.squared_difference(self._p, self._h)))
            self._weighted_loss = weighted_loss
            l2_A = tf.reduce_sum(tf.square(self._A))
            l2_B = tf.reduce_sum(tf.square(self._B))
            n_w = tf.constant(n_row * n_latent + n_latent * n_col, tf.float32)
            l2 = tf.truediv(tf.add(l2_A, l2_B), n_w)
            reg_term = tf.mul(self._lambda, l2)
            self._loss = tf.add(weighted_loss, reg_term)
            
            self._mask = tf.placeholder(tf.float32, shape=[n_row, n_col])
            one = tf.constant(1, tf.float32)
            pred = tf.cast(tf.greater_equal(tf.matmul(self._A, self._B), one), tf.float32)
            cor = tf.mul(tf.cast(tf.equal(pred, self._p), tf.float32), self._c)
            self._vali_err = tf.reduce_sum(tf.mul(cor, self._mask))

            self._saver = tf.train.Saver([v for v in tf.all_variables() if v.name.find('matDecomp') != -1])
            tf.scalar_summary('training_weighted_loss_l2', self._loss)
            tf.scalar_summary('validation_weighted_loss', self._weighted_loss)
            merged = tf.merge_all_summaries()
开发者ID:cning,项目名称:ehc,代码行数:29,代码来源:model.py


示例9: train

    def train(self, eval_on_test=False):
        """ Train model and save it to file.

        Train model with given hidden layers. Training data is created
        by prepare_training_data(), which must be called before this function.
        """
        tf.reset_default_graph()
        with tf.Session() as sess:
            feature_data = tf.placeholder("float", [None, self.num_predictors])
            labels = tf.placeholder("float", [None, self.num_classes])

            layers = [self.num_predictors] + self.hidden_layers + [self.num_classes]
            model = self.inference(feature_data, layers)
            cost, cost_summary_op = self.loss(model, labels)
            training_op = self.training(cost, learning_rate=0.0001)

            correct_prediction = tf.equal(tf.argmax(model, 1), tf.argmax(labels, 1))
            accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

            # Merge all variable summaries and save the results to log file
            # summary_op = tf.merge_all_summaries()
            accuracy_op_train = tf.scalar_summary("Accuracy on Train", accuracy)
            summary_op_train = tf.merge_summary([cost_summary_op, accuracy_op_train])
            if eval_on_test:
                accuracy_op_test = tf.scalar_summary("Accuracy on Test", accuracy)
                summary_op_test = tf.merge_summary([accuracy_op_test])

            summary_writer = tf.train.SummaryWriter(self.log_dir + self.model_name, sess.graph)

            train_dict = {
                feature_data: self.training_predictors_tf.values,
                labels: self.training_classes_tf.values.reshape(len(self.training_classes_tf.values), self.num_classes)}

            if eval_on_test:
                test_dict = {
                    feature_data: self.test_predictors_tf.values,
                    labels: self.test_classes_tf.values.reshape(len(self.test_classes_tf.values), self.num_classes)}

            init = tf.initialize_all_variables()
            sess.run(init)

            for i in range(1, self.max_iteration):
                sess.run(training_op, feed_dict=train_dict)

                # Write summary to log
                if i % 100 == 0:
                    summary_str = sess.run(summary_op_train, feed_dict=train_dict)
                    summary_writer.add_summary(summary_str, i)
                    if eval_on_test:
                        summary_str = sess.run(summary_op_test, feed_dict=test_dict)
                        summary_writer.add_summary(summary_str, i)
                    summary_writer.flush()

                # Print current accuracy to console
                if i%5000 == 0:
                    print (i, sess.run(accuracy, feed_dict=train_dict))

            # Save trained parameters
            saver = tf.train.Saver()
            saver.save(sess, self.model_filename)
开发者ID:kanoh-k,项目名称:pred225,代码行数:60,代码来源:model.py


示例10: build_eval_graph

    def build_eval_graph(self):
        # Keep track of the totals while running through the batch data
        self.total_loss = tf.Variable(0.0, trainable=False, collections=[])
        self.total_correct = tf.Variable(0.0, trainable=False, collections=[])
        self.example_count = tf.Variable(0.0, trainable=False, collections=[])

        # Calculates the means
        self.mean_loss = self.total_loss / self.example_count
        self.accuracy = self.total_correct / self.example_count

        # Operations to modify to the stateful variables
        inc_total_loss = self.total_loss.assign_add(self.model.total_loss)
        inc_total_correct = self.total_correct.assign_add(
            tf.reduce_sum(tf.cast(self.model.correct_predictions, "float")))
        inc_example_count = self.example_count.assign_add(self.model.batch_size)

        # Operation to reset all the stateful vars. Should be called before starting a data set evaluation.
        with tf.control_dependencies(
                [self.total_loss.initializer, self.total_correct.initializer, self.example_count.initializer]):
            self.eval_reset = tf.no_op()

        # Operation to modify the stateful variables with data from one batch
        # Should be called for each batch in the evaluatin set
        with tf.control_dependencies([inc_total_loss, inc_total_correct, inc_example_count]):
            self.eval_step = tf.no_op()

        # Summaries
        summary_mean_loss = tf.scalar_summary("mean_loss", self.mean_loss)
        summary_acc = tf.scalar_summary("accuracy", self.accuracy)
        self.summaries = tf.merge_summary([summary_mean_loss, summary_acc])
开发者ID:alphawolfxiaoliu,项目名称:tf-models,代码行数:30,代码来源:rnn_classifier.py


示例11: training

def training(cost, learning_rate_pl):
    
    # add scaler summary TODO
    """ Set up training operation
        - generate a summary to track cost in tensorboard
        - create gradient descent optimizer for all trainable variables

        The training op returned has to be called in sess.run()

    Args:
        cost: cost tensor from cost()
        learning_rate_pl: gradient descent learning rate, a PLACEHOLDER TO BE FED

    Returns:
        train_op: training op
    """
    with tf.name_scope('Training'):

        tf.scalar_summary('Mean cost', cost, name='Cost_summary')

        # create gradient descent optimizer
        optimizer = tf.train.AdamOptimizer(learning_rate_pl, name='Optimizer')

        # create global step variable to track global step: TODO
        global_step = tf.Variable(0, name='global_step', trainable=False)

        train_op = optimizer.minimize(cost, global_step=global_step, name='Train_OP')
        return train_op
开发者ID:mingyue312,项目名称:TensorFlowFinance,代码行数:28,代码来源:cnn.py


示例12: __init__

    def __init__(self, config):
        self.config = config

        self.input = tf.placeholder('int32', [self.config.batch_size, config.max_seq_len], name='input')
        self.labels = tf.placeholder('int64', [self.config.batch_size], name='labels')
        self.labels_one_hot = tf.one_hot(indices=self.labels,
                                         depth=config.output_dim,
                                         on_value=1.0,
                                         off_value=0.0,
                                         axis=-1)

        self.gru = GRUCell(config.hidden_state_dim)

        embeddings_we = tf.get_variable('word_embeddings', initializer=tf.random_uniform([config.vocab_size, config.embedding_dim], -1.0, 1.0))
        self.emb = embed_input = tf.nn.embedding_lookup(embeddings_we, self.input)
        inputs = [tf.squeeze(i, squeeze_dims=[1]) for i in tf.split(1, config.max_seq_len, embed_input)]

        outputs, last_slu_state = tf.nn.rnn(
            cell=self.gru,
            inputs=inputs,
            dtype=tf.float32,)

        w_project = tf.get_variable('project2labels', initializer=tf.random_uniform([config.hidden_state_dim, config.output_dim], -1.0, 1.0))
        self.logits = logits_bo = tf.matmul(last_slu_state, w_project)
        tf.histogram_summary('logits', logits_bo)
        self.probabilities = tf.nn.softmax(logits_bo)
        self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits_bo, self.labels_one_hot))
        self.predict = tf.nn.softmax(logits_bo)

        # TensorBoard
        self.accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.predict, 1), self.labels), 'float32'), name='accuracy')
        tf.scalar_summary('CCE loss', self.loss)
        tf.scalar_summary('Accuracy', self.accuracy)
        self.tb_info = tf.merge_all_summaries()
开发者ID:vojtsek,项目名称:sds-tracker,代码行数:34,代码来源:fat_model.py


示例13: loss

  def loss(self, predicts, labels, objects_num):
    """Add Loss to all the trainable variables

    Args:
      predicts: 4-D tensor [batch_size, cell_size, cell_size, 5 * boxes_per_cell]
      ===> (num_classes, boxes_per_cell, 4 * boxes_per_cell)
      labels  : 3-D tensor of [batch_size, max_objects, 5]
      objects_num: 1-D tensor [batch_size]
    """
    class_loss = tf.constant(0, tf.float32)
    object_loss = tf.constant(0, tf.float32)
    noobject_loss = tf.constant(0, tf.float32)
    coord_loss = tf.constant(0, tf.float32)
    loss = [0, 0, 0, 0]
    for i in range(self.batch_size):
      predict = predicts[i, :, :, :]
      label = labels[i, :, :]
      object_num = objects_num[i]
      nilboy = tf.ones([7,7,2])
      tuple_results = tf.while_loop(self.cond1, self.body1, [tf.constant(0), object_num, [class_loss, object_loss, noobject_loss, coord_loss], predict, label, nilboy])
      for j in range(4):
        loss[j] = loss[j] + tuple_results[2][j]
      nilboy = tuple_results[5]

    tf.add_to_collection('losses', (loss[0] + loss[1] + loss[2] + loss[3])/self.batch_size)

    tf.scalar_summary('class_loss', loss[0]/self.batch_size)
    tf.scalar_summary('object_loss', loss[1]/self.batch_size)
    tf.scalar_summary('noobject_loss', loss[2]/self.batch_size)
    tf.scalar_summary('coord_loss', loss[3]/self.batch_size)
    tf.scalar_summary('weight_loss', tf.add_n(tf.get_collection('losses')) - (loss[0] + loss[1] + loss[2] + loss[3])/self.batch_size )

    return tf.add_n(tf.get_collection('losses'), name='total_loss'), nilboy
开发者ID:yyf013932,项目名称:tensormsa,代码行数:33,代码来源:yolo_net.py


示例14: get_config

def get_config():
    logger.auto_set_dir()

    data_train, data_test = get_data()
    step_per_epoch = data_train.size()

    lr = tf.train.exponential_decay(
        learning_rate=1e-3,
        global_step=get_global_step_var(),
        decay_steps=data_train.size() * 60,
        decay_rate=0.2, staircase=True, name='learning_rate')
    tf.scalar_summary('learning_rate', lr)

    return TrainConfig(
        dataset=data_train,
        optimizer=tf.train.AdamOptimizer(lr),
        callbacks=Callbacks([
            StatPrinter(),
            ModelSaver(),
            InferenceRunner(data_test,
                [ScalarStats('cost'), ClassificationError()])
        ]),
        model=Model(),
        step_per_epoch=step_per_epoch,
        max_epoch=350,
    )
开发者ID:amirstar,项目名称:tensorpack,代码行数:26,代码来源:svhn-digit-convnet.py


示例15: add_evaluation_step

def add_evaluation_step(result_tensor, ground_truth_tensor):
  """Inserts the operations we need to evaluate the accuracy of our results.

  Args:
    result_tensor: The new final node that produces results.
    ground_truth_tensor: The node we feed ground truth data
    into.

  Returns:
    Nothing.
  """
  with tf.name_scope('accuracy'):
    with tf.name_scope('correct_prediction'):
      # tf.argmax(result_tensor, 1) = return index of maximal value (= 1 in a 1-of-N encoding vector) in each row (axis = 1)
      # But we have more ones (indicating multiple labels) in one row of result_tensor due to the multi-label classification
      # correct_prediction = tf.equal(tf.argmax(result_tensor, 1), \
      #   tf.argmax(ground_truth_tensor, 1))

      # ground_truth is not a binary tensor, it contains the probabilities of each label = we need to tf.round() it
      # to acquire a binary tensor allowing comparison by tf.equal()
      # See: http://stackoverflow.com/questions/39219414/in-tensorflow-how-can-i-get-nonzero-values-and-their-indices-from-a-tensor-with

      correct_prediction = tf.equal(tf.round(result_tensor), ground_truth_tensor)
    with tf.name_scope('accuracy'):
      # Mean accuracy over all labels:
      # http://stackoverflow.com/questions/37746670/tensorflow-multi-label-accuracy-calculation
      evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    tf.scalar_summary('accuracy', evaluation_step)
  return evaluation_step
开发者ID:samhains,项目名称:Multi-label-Inception-net,代码行数:29,代码来源:retrain.py


示例16: get_config

def get_config():
    basename = os.path.basename(__file__)
    logger.set_logger_dir(
        os.path.join('train_log', basename[:basename.rfind('.')]))

    ds = CharRNNData(param.corpus, 100000)
    ds = BatchData(ds, param.batch_size)
    step_per_epoch = ds.size()

    lr = tf.Variable(2e-3, trainable=False, name='learning_rate')
    tf.scalar_summary('learning_rate', lr)

    return TrainConfig(
        dataset=ds,
        optimizer=tf.train.AdamOptimizer(lr),
        callbacks=Callbacks([
            StatPrinter(),
            ModelSaver(),
            #HumanHyperParamSetter('learning_rate', 'hyper.txt')
            ScheduledHyperParamSetter('learning_rate', [(25, 2e-4)])
        ]),
        model=Model(),
        step_per_epoch=step_per_epoch,
        max_epoch=50,
    )
开发者ID:Jothecat,项目名称:tensorpack,代码行数:25,代码来源:char-rnn.py


示例17: get_config

def get_config():
    # prepare dataset
    dataset_train = get_data('train')
    step_per_epoch = dataset_train.size()
    dataset_test = get_data('test')

    sess_config = get_default_sess_config(0.9)

    # warm up with small LR for 1 epoch
    lr = tf.Variable(0.01, trainable=False, name='learning_rate')
    tf.scalar_summary('learning_rate', lr)

    return TrainConfig(
        dataset=dataset_train,
        optimizer=tf.train.MomentumOptimizer(lr, 0.9),
        callbacks=Callbacks([
            StatPrinter(),
            PeriodicSaver(),
            ValidationError(dataset_test, prefix='test'),
            ScheduledHyperParamSetter('learning_rate',
                                      [(1, 0.1), (82, 0.01), (123, 0.001), (300, 0.0001)])
        ]),
        session_config=sess_config,
        model=Model(n=18),
        step_per_epoch=step_per_epoch,
        max_epoch=500,
    )
开发者ID:saifrahmed,项目名称:tensorpack,代码行数:27,代码来源:cifar10_resnet.py


示例18: train

def train(total_loss, global_step, batch_size=BATCH_SIZE):
  number_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / batch_size

  decay_steps = int(number_batches_per_epoch * NUM_EPOCHS_PER_DECAY)

  lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
                                  global_step,
                                  decay_steps,
                                  LEARNING_RATE_DECAY_FACTOR,
                                  staircase=True)

  tf.scalar_summary('learning_rate', lr)

  # with tf.control_dependencies([total_loss]):
  #   opt = tf.train.AdamOptimizer(lr)
  #   grads = opt.compute_gradients(total_loss)

  # #apply the gradients
  # apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)

  # for grad, var in grads:
  #   if grad is not None:
  #     tf.histogram_summary(var.op.name + "/gradients", grad)

  # with tf.control_dependencies([apply_gradient_op]):
  #   train_op = tf.no_op(name="train")

  opt = tf.train.GradientDescentOptimizer(lr).minimize(total_loss, global_step=global_step)
  # grads = opt.compute_gradients(total_loss)

  return opt
开发者ID:kingtaurus,项目名称:cs231n,代码行数:31,代码来源:cifar10_tensorflow_batch_queue.py


示例19: build_graph

  def build_graph(self):
    """Build the graph for the full model."""
    opts = self._options
    # The training data. A text file.
    (words, counts, words_per_epoch, self._epoch, self._words, examples,
     labels) = word2vec.skipgram(filename=opts.train_data,
                                 batch_size=opts.batch_size,
                                 window_size=opts.window_size,
                                 min_count=opts.min_count,
                                 subsample=opts.subsample)
    (opts.vocab_words, opts.vocab_counts,
     opts.words_per_epoch) = self._session.run([words, counts, words_per_epoch])
    opts.vocab_size = len(opts.vocab_words)
    print("Data file: ", opts.train_data)
    print("Vocab size: ", opts.vocab_size - 1, " + UNK")
    print("Words per epoch: ", opts.words_per_epoch)
    self._examples = examples
    self._labels = labels
    self._id2word = opts.vocab_words
    for i, w in enumerate(self._id2word):
      self._word2id[w] = i
    true_logits, sampled_logits = self.forward(examples, labels)
    loss = self.nce_loss(true_logits, sampled_logits)
    tf.scalar_summary("NCE loss", loss)
    self._loss = loss
    self.optimize(loss)

    # Properly initialize all variables.
    tf.initialize_all_variables().run()

    self.saver = tf.train.Saver()
开发者ID:debaratidas1994,项目名称:tensorflow,代码行数:31,代码来源:word2vec.py


示例20: train

    def train(self, total_loss):
        loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
        losses = tf.get_collection('losses')
        loss_averages_op = loss_averages.apply(losses + [total_loss])

        for l in losses + [total_loss]:
            tf.scalar_summary(l.op.name + ' (raw)', l)

        # Apply gradients, and add histograms
        with tf.control_dependencies([loss_averages_op]):
            opt = tf.train.AdamOptimizer()
            grads = opt.compute_gradients(total_loss)
        apply_gradient_op = opt.apply_gradients(grads)
        for var in tf.trainable_variables():
            tf.histogram_summary(var.op.name, var)
        for grad, var in grads:
            if grad is not None:
                tf.histogram_summary(var.op.name + '/gradients', grad)

        # Track the moving averages of all trainable variables
        variable_averages = tf.train.ExponentialMovingAverage(Recognizer.MOVING_AVERAGE_DECAY)
        variables_averages_op = variable_averages.apply(tf.trainable_variables())

        with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
            train_op = tf.no_op(name='train')
        return train_op
开发者ID:wolfinwool,项目名称:tf-face-recognizer,代码行数:26,代码来源:recognizer.py



注:本文中的tensorflow.scalar_summary函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.scan函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.scalar_mul函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap