• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensorflow.merge_all_summaries函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.merge_all_summaries函数的典型用法代码示例。如果您正苦于以下问题:Python merge_all_summaries函数的具体用法?Python merge_all_summaries怎么用?Python merge_all_summaries使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了merge_all_summaries函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: __init__

  def __init__(self,
      learning_rate,
      noise_level,
      input_layer_size,
      class_count,
      encoder_layer_definitions,
      denoising_cost_multipliers):
    assert class_count == encoder_layer_definitions[-1][0]

    self.learning_rate = learning_rate
    self.denoising_cost_multipliers = denoising_cost_multipliers

    self.placeholders = _Placeholders(input_layer_size, class_count)

    self.output = _ForwardPass(self.placeholders,
        noise_level=noise_level,
        encoder_layer_definitions=encoder_layer_definitions)

    self.accuracy_measure = self._accuracy_measure(
        self.placeholders, self.output)
    self.supervised_train_step = self._supervised_train_step(
        self.placeholders, self.output)
    self.unsupervised_train_step = self._unsupervised_train_step(
        self.placeholders, self.output)

    self.unsupervised_summaries = tf.merge_all_summaries("unsupervised")
    self.supervised_summaries = tf.merge_all_summaries("supervised")
    self.test_summaries = tf.merge_all_summaries("test")

    self.saver = tf.train.Saver()
开发者ID:tarvaina,项目名称:tensorflow-ladder,代码行数:30,代码来源:ladder_network.py


示例2: _setup_summary_writer

 def _setup_summary_writer(self, logdir):
     """Sets up the summary writer to prepare for later optional visualization."""
     # Create summary to monitor loss
     tf.scalar_summary("loss", self._model_loss)
     # Set up a single operator to merge all the summaries
     tf.merge_all_summaries()
     # Set up summary writer to the specified log directory
     self._summary_writer = tf.train.SummaryWriter(os.path.join(logdir,
      datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')),
       graph_def=self._session.graph_def)
开发者ID:priyamuurali,项目名称:skflow,代码行数:10,代码来源:__init__.py


示例3: _set_model

    def _set_model(self, model):
        import tensorflow as tf
        import keras.backend.tensorflow_backend as KTF

        self.model = model
        self.sess = KTF.get_session()
        if self.histogram_freq and self.merged is None:
            layers = self.model.layers
            for layer in layers:
                if hasattr(layer, 'W'):
                    tf.histogram_summary('{}_W'.format(layer), layer.W)
                if hasattr(layer, 'b'):
                    tf.histogram_summary('{}_b'.format(layer), layer.b)
                if hasattr(layer, 'output'):
                    tf.histogram_summary('{}_out'.format(layer),
                                         layer.output)
        self.merged = tf.merge_all_summaries()
        if self.write_graph:
            if parse_version(tf.__version__) >= parse_version('0.8.0'):
                self.writer = tf.train.SummaryWriter(self.log_dir,
                                                     self.sess.graph)
            else:
                self.writer = tf.train.SummaryWriter(self.log_dir,
                                                     self.sess.graph_def)
        else:
            self.writer = tf.train.SummaryWriter(self.log_dir)
开发者ID:AdrianJohnston,项目名称:keras,代码行数:26,代码来源:callbacks.py


示例4: __init__

    def __init__(self, optimizer, categories, num_of_terms, num_of_hidden_nodes):
        self.optimizer           = optimizer
        self.categories          = categories
        self.num_of_categories   = len(self.categories)
        self.num_of_terms        = num_of_terms
        self.num_of_hidden_nodes = num_of_hidden_nodes

        self.input_ph      = tf.placeholder(tf.float32, [None, self.num_of_terms], name="input")
        self.supervisor_ph = tf.placeholder(tf.float32, [None, self.num_of_categories], name="supervisor")

        with tf.name_scope("inference") as scope:
            weight1_var    = tf.Variable(tf.truncated_normal([self.num_of_terms, self.num_of_hidden_nodes], stddev=0.1), name="weight1")
            weight2_var    = tf.Variable(tf.truncated_normal([self.num_of_hidden_nodes, self.num_of_categories], stddev=0.1), name="weight2")
            bias1_var      = tf.Variable(tf.zeros([self.num_of_hidden_nodes]), name="bias1")
            bias2_var      = tf.Variable(tf.zeros([self.num_of_categories]), name="bias2")
            hidden_op      = tf.nn.relu(tf.matmul(self.input_ph, weight1_var) + bias1_var)
            self.output_op = tf.nn.softmax(tf.matmul(hidden_op, weight2_var) + bias2_var)

        with tf.name_scope("loss") as scope:
            cross_entropy = -tf.reduce_sum(self.supervisor_ph * tf.log(self.output_op))
            l2_sqr        = tf.nn.l2_loss(weight1_var) + tf.nn.l2_loss(weight2_var)
            lambda_2      = 0.01
            self.loss_op  = cross_entropy + lambda_2 * l2_sqr
            tf.scalar_summary("loss", self.loss_op)

        with tf.name_scope("training") as scope:
            self.training_op = self.optimizer.minimize(self.loss_op)

        with tf.name_scope("accuracy") as scope:
            correct_prediction = tf.equal(tf.argmax(self.output_op, 1), tf.argmax(self.supervisor_ph, 1))
            self.accuracy_op   = tf.reduce_mean(tf.cast(correct_prediction, "float"))
            tf.scalar_summary("accuracy", self.accuracy_op)

        self.summary_op = tf.merge_all_summaries()
开发者ID:nayutaya,项目名称:20160228-gdg-kobe,代码行数:34,代码来源:mlp.py


示例5: evaluate

def evaluate (tfrecord_file_paths, theme):
    eval_dir = 'workspace/{}/eval'.format(theme)
    with tf.Graph().as_default() as g:
        images, labels = distorted_inputs(tfrecord_file_paths=tfrecord_file_paths)
        logits = cifar10.inference(tf.image.resize_images(images, cifar10.IMAGE_SIZE, cifar10.IMAGE_SIZE))

        # Calculate predictions.
        top_k_op = tf.nn.in_top_k(logits, labels, 1)

        variable_averages = tf.train.ExponentialMovingAverage(cifar10.MOVING_AVERAGE_DECAY)
        variables_to_restore = {}

        for v in tf.all_variables():
            if v in tf.trainable_variables():
                restore_name = variable_averages.average_name(v)
            else:
                restore_name = v.op.name
            variables_to_restore[restore_name] = v

        saver = tf.train.Saver(variables_to_restore)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.merge_all_summaries()
        summary_writer = tf.train.SummaryWriter(eval_dir, g)

        eval_once(theme, saver, summary_writer, top_k_op, summary_op)
开发者ID:daiz713,项目名称:tfPhotoClassifier,代码行数:26,代码来源:eval.py


示例6: evaluate

def evaluate():
    """Eval CIFAR-10 for a number of steps."""
    with tf.Graph().as_default():
        # Get images and labels for CIFAR-10.
        images, labels = inputs()

        # Build a Graph that computes the logits predictions from the
        # inference model.
        logits = svhn.inference(images)

        # Calculate predictions.
        top_k_op = tf.nn.in_top_k(logits, labels, 1)
        top_k_predict_op = tf.argmax(logits, 1)

        # Restore the moving average version of the learned variables for eval.
        variable_averages = tf.train.ExponentialMovingAverage(svhn.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.merge_all_summaries()

        graph_def = tf.get_default_graph().as_graph_def()
        summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir, graph_def=graph_def)

        while True:
            eval_once(saver, summary_writer, top_k_op, top_k_predict_op, summary_op, images)
            break
开发者ID:jkschin,项目名称:svhn,代码行数:28,代码来源:svhn_eval.py


示例7: time_tensorflow_run

def time_tensorflow_run(session, target, info_string):
  num_steps_burn_in = 10
  total_duration = 0.0
  total_duration_squared = 0.0
  for i in xrange(FLAGS.num_batches + num_steps_burn_in):
    run_options = None
    run_metadata = None
    if FLAGS.enable_trace and i == num_steps_burn_in - 1:
      run_options = config_pb2.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
      run_metadata = config_pb2.RunMetadata()
      merged = tf.merge_all_summaries()
      writer = tf.train.SummaryWriter('/home/minjie/tmp/pipeline', session.graph)

    # Run session
    start_time = time.time()
    _ = session.run(target, options=run_options, run_metadata=run_metadata)
    duration = time.time() - start_time

    if FLAGS.enable_trace and i == num_steps_burn_in - 1:
      tl = tf.python.client.timeline.Timeline(run_metadata.step_stats)
      ctf = tl.generate_chrome_trace_format()
      with open('tf_trace.ctf', 'w') as f:
        f.write(ctf)

    if i > num_steps_burn_in:
      if not i % 10:
        print ('%s: step %d, duration = %.3f speed = %.3f images/sec' %
               (datetime.now(), i - num_steps_burn_in, duration, FLAGS.batch_size / duration))
      total_duration += duration
      total_duration_squared += duration * duration
  mn = total_duration / FLAGS.num_batches
  vr = total_duration_squared / FLAGS.num_batches - mn * mn
  sd = math.sqrt(vr)
  print ('%s: %s across %d steps, %.3f +/- %.3f sec / batch' %
         (datetime.now(), info_string, FLAGS.num_batches, mn, sd))
开发者ID:houcy,项目名称:models,代码行数:35,代码来源:pipelining.py


示例8: evaluate

def evaluate(eval_data, model_path, global_step ):
  """Eval CIFAR-100 prediction performance."""
  with tf.Graph().as_default() as g:
    # Get images and labels for CIFAR-100
    images, labels = data_utils.inputs(eval_data=eval_data, data_dir = FLAGS.data_dir, batch_size=FLAGS.batch_size) #Get batches

    # Build a Graph that computes the logits predictions from the
    # inference model.
    logits = inference(images)   #Run predictions on the images
    logits_norm = tf.nn.softmax(logits)   #Check the softmax of the images, this should normalize our scores for predictions
    # Calculate predictions.
    top_k_op = tf.nn.in_top_k(logits_norm, labels, 1) #Get the highest ranked logit_norms

    # Restore the moving average version of the learned variables for eval.
    variable_averages = tf.train.ExponentialMovingAverage(
        MOVING_AVERAGE_DECAY)
    variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)

    # Build the summary operation based on the TF collection of Summaries.
    summary_op = tf.merge_all_summaries()

    summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir, g)

    eval_once(eval_data, model_path, global_step, saver, summary_writer, top_k_op, summary_op)
开发者ID:netzo92,项目名称:cs291k-FP,代码行数:25,代码来源:conv_net.py


示例9: evaluate

def evaluate():
    with tf.Graph().as_default():
        # testデータのロード
        images, labels = data_inputs.inputs('data/train_kirin_norm_32.tfrecords')
        logits = model.inference(images)

        top_k_op = tf.nn.in_top_k(logits, labels, 1)
        
        variable_averages = tf.train.ExponentialMovingAverage(FLAGS.moving_average_decay)
        variables_to_restore = {}
        for v in tf.trainable_variables():
            if v in tf.trainable_variables():
                restore_name = variable_averages.average_name(v)
            else:
                restore_name = v.op.name
            variables_to_restore[restore_name] = v
        saver = tf.train.Saver(variables_to_restore)
        summary_op = tf.merge_all_summaries()

        graph_def = tf.get_default_graph().as_graph_def()
        summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir, graph_def=graph_def)

        while True:
            eval_once(saver, summary_writer, top_k_op, summary_op)
            if FLAGS.run_once:
                break
            time.sleep(FLAGS.eval_interval_secs)
开发者ID:pmnyc,项目名称:Machine_Learning_Test_Repository,代码行数:27,代码来源:eval.py


示例10: __init__

    def __init__(self,
                 state_dim,
                 action_dim,
                 batch_size=64,
                 gamma=0.9,
                 buffer_size=1024 * 1024,
                 initial_epsilon=0.5,
                 final_epsilon=0.01,
                 logdir='/data/log'):

        self.state_dim = state_dim
        self.action_dim = action_dim
        self.replay_buffer = deque()
        self.time_step = 0
        self.epsilon = self.initial_epsilon = initial_epsilon
        self.final_epsilon = final_epsilon

        self.batch_size = batch_size
        self.gamma = gamma
        self.buffer_size = batch_size

        self.create_Q_network()
        self.create_training_method()

        self.reward = tf.placeholder(tf.float32)
        tf.scalar_summary("reward", self.reward)
        self.merged = tf.merge_all_summaries()

        self.session = tf.InteractiveSession()

        self.summary_writer = tf.train.SummaryWriter(logdir, self.session.graph)
        self.session.run(tf.initialize_all_variables())
开发者ID:witwolf,项目名称:RL-DQN,代码行数:32,代码来源:dqn.py


示例11: drawGraph

    def drawGraph(self, n_row, n_latent, n_col):
        with tf.name_scope('matDecomp'):
            self._p = tf.placeholder(tf.float32, shape=[None, n_col])
            self._c = tf.placeholder(tf.float32, shape=[None, n_col])
            self._lambda = tf.placeholder(tf.float32)
            self._index = tf.placeholder(tf.float32, shape=[None, n_row])
            self._A = tf.Variable(tf.truncated_normal([n_row, n_latent]))
            self._B = tf.Variable(tf.truncated_normal([n_latent, n_col]))
            self._h = tf.matmul(tf.matmul(self._index, self._A), self._B) 
            
            weighted_loss = tf.reduce_mean(tf.mul(self._c, tf.squared_difference(self._p, self._h)))
            self._weighted_loss = weighted_loss
            l2_A = tf.reduce_sum(tf.square(self._A))
            l2_B = tf.reduce_sum(tf.square(self._B))
            n_w = tf.constant(n_row * n_latent + n_latent * n_col, tf.float32)
            l2 = tf.truediv(tf.add(l2_A, l2_B), n_w)
            reg_term = tf.mul(self._lambda, l2)
            self._loss = tf.add(weighted_loss, reg_term)
            
            self._mask = tf.placeholder(tf.float32, shape=[n_row, n_col])
            one = tf.constant(1, tf.float32)
            pred = tf.cast(tf.greater_equal(tf.matmul(self._A, self._B), one), tf.float32)
            cor = tf.mul(tf.cast(tf.equal(pred, self._p), tf.float32), self._c)
            self._vali_err = tf.reduce_sum(tf.mul(cor, self._mask))

            self._saver = tf.train.Saver([v for v in tf.all_variables() if v.name.find('matDecomp') != -1])
            tf.scalar_summary('training_weighted_loss_l2', self._loss)
            tf.scalar_summary('validation_weighted_loss', self._weighted_loss)
            merged = tf.merge_all_summaries()
开发者ID:cning,项目名称:ehc,代码行数:29,代码来源:model.py


示例12: evaluate

def evaluate():
    with tf.Graph().as_default():
        eval_data = FLAGS.eval_data == 'test'
        images, labels = model.inputs(eval_data=eval_data)

        logits = model.inference(images)

        top_k_op = tf.nn.in_top_k(logits, labels, 1)

        variable_averages = tf.train.ExponentialMovingAverage(
            model.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        summary_op = tf.merge_all_summaries()

        graph_def = tf.get_default_graph().as_graph_def()
        summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir,
                                                graph_def=graph_def)

        while True:
            eval_once(saver, summary_writer, top_k_op, summary_op)
            if FLAGS.run_once:
                break
            time.sleep(FLAGS.eval_interval_secs)
开发者ID:rahuljayaraman,项目名称:slide_extraction,代码行数:25,代码来源:model_eval.py


示例13: main

def main(_):
    ps_hosts = FLAGS.ps_hosts.split(",")
    worker_hosts = FLAGS.worker_hosts.split(",")

    # Create a cluster from the parameter server and worker hosts.
    cluster = tf.train.ClusterSpec({"ps": ps_hosts, "worker": worker_hosts})

    # Create and start a server for the local task.
    server = tf.train.Server(cluster,
                             job_name=FLAGS.job_name,
                             task_index=FLAGS.task_index)

    if FLAGS.job_name == "ps":
        server.join()
    elif FLAGS.job_name == "worker":
        # Assigns ops to the local worker by default.
        with tf.device(tf.train.replica_device_setter(
                worker_device="/job:worker/task:%d" % FLAGS.task_index,
                cluster=cluster)):

            # Build model...
            x = tf.placeholder("float", [10, 10], name="x")
            y = tf.placeholder("float", [10, 1], name="y")
            initial_w = np.zeros((10, 1))
            w = tf.Variable(initial_w, name="w", dtype="float32")
            loss = tf.pow(tf.add(y,-tf.matmul(x,w)),2,name="loss")
            global_step = tf.Variable(0)

            saver = tf.train.Saver()
            summary_op = tf.merge_all_summaries()
            init_op = tf.initialize_all_variables()

        # Create a "supervisor", which oversees the training process.
        sv = tf.train.Supervisor(is_chief=(FLAGS.task_index == 0),
                                 logdir="/tmp/train_logs",
                                 init_op=init_op,
                                 summary_op=summary_op,
                                 saver=saver,
                                 global_step=global_step,
                                 save_model_secs=600)

        # The supervisor takes care of session initialization, restoring from
        # a checkpoint, and closing when done or an error occurs.
        with sv.managed_session(server.target) as sess:
            # Loop until the supervisor shuts down or 1000000 steps have completed.
            step = 0
            while not sv.should_stop() and step < 1000000:
                # Run a training step asynchronously.
                # See `tf.train.SyncReplicasOptimizer` for additional details on how to
                # perform *synchronous* training.
                #_, step = sess.run([loss, global_step])
                _, step = sess.run([loss, global_step],
                {
                  x: np.random.rand(10,10),
                  y: np.random.rand(10).reshape(-1,1)
                })
                print("job_name: %s; task_index: %s; step: %d" % (FLAGS.job_name,FLAGS.task_index,step))

        # Ask for all the services to stop.
        sv.stop()
开发者ID:apmanikandan,项目名称:Neuralnet,代码行数:60,代码来源:Dist.py


示例14: _initialize_tf_utilities_and_ops

    def _initialize_tf_utilities_and_ops(self, restore_previous_model):
        """Initialize TensorFlow operations.

        tf operations: summaries, init operations, saver, summary_writer.
        Restore a previously trained model if the flag restore_previous_model
            is true.
        :param restore_previous_model:
                    if true, a previous trained model
                    with the same name of this model is restored from disk
                    to continue training.
        """
        self.tf_merged_summaries = tf.merge_all_summaries()
        init_op = tf.initialize_all_variables()
        self.tf_saver = tf.train.Saver()

        self.tf_session.run(init_op)

        if restore_previous_model:
            print('Restore previously model from %s' % self.model_path)
            self.tf_saver.restore(self.tf_session, self.model_path)

        # Retrieve run identifier
        run_id = 0
        for e in os.listdir(self.tf_summary_dir):
            if e[:3] == 'run':
                r = int(e[3:])
                if r > run_id:
                    run_id = r
        run_id += 1
        run_dir = os.path.join(self.tf_summary_dir, 'run' + str(run_id))
        print('Tensorboard logs dir for this run is %s' % (run_dir))

        self.tf_summary_writer = tf.train.SummaryWriter(
            run_dir, self.tf_session.graph)
开发者ID:Aldor007,项目名称:atabox-server,代码行数:34,代码来源:model.py


示例15: evaluate

def evaluate(dataset, model, summary_path, read_checkpoint_path):
    with tf.Graph().as_default():
        # input and evaluation procedure
        images, true_labels = dataset.evaluation_inputs()
        predictions = model.inference(images, dataset.num_classes, False)
        top_k_op = _in_top_k(predictions, true_labels)

        saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=None)

        test_err = tf.placeholder(tf.float32, shape=[], name='test_err')
        # FIXME test error averaged starts at 0
        test_err_avg_op = _add_test_error_summary(test_err)

        with tf.control_dependencies([test_err_avg_op]):
            summary_op = tf.merge_all_summaries()
            summary_writer = tf.train.SummaryWriter(summary_path, tf.get_default_graph().as_graph_def())

        with tf.Session() as sess:

            sess.run(tf.initialize_all_variables())

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

            last = None
            while True:
                last = _eval_once(sess, coord, last, saver, read_checkpoint_path, summary_writer, top_k_op, summary_op,
                                  test_err)
                if FLAGS.run_once or last == FLAGS.training_steps:
                    break
                time.sleep(FLAGS.eval_interval_secs)

            coord.request_stop()
            coord.join(threads)
开发者ID:mackcmillion,项目名称:reslearn,代码行数:34,代码来源:evaluate.py


示例16: run_training

def run_training():
  data_sets = data_mnist.read_data_sets()
  with tf.Graph().as_default():
    images_placeholder, labels_placeholder = placeholder_inputs(FLAGS.batch_size)
    logits = mnist.inference(images_placeholder, FLAGS.hidden1, FLAGS.hidden2)
    loss = mnist.loss(logits, labels_placeholder)
    train_op = mnist.training(loss, FLAGS.learning_rate)
    eval_correct = mnist.evaluation(logits, labels_placeholder)

    summary_op = tf.merge_all_summaries()
    saver = tf.train.Saver()
    
    sess = tf.Session()
    sess.run(tf.initialize_all_variables())
    summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph)

    # Start the training loop.
    for step in xrange(FLAGS.max_steps):
      start_time = time.time()
      feed_dict = fill_feed_dict(data_sets.train, images_placeholder, labels_placeholder)
      _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict)
      duration = time.time() - start_time

      if step % 100 == 0:
        print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration))
        summary_str = sess.run(summary_op, feed_dict=feed_dict)
        summary_writer.add_summary(summary_str, step)
        summary_writer.flush()

      if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:
        checkpoint_file = os.path.join(FLAGS.train_dir, 'checkpoint')
        saver.save(sess, checkpoint_file, global_step=step)
        do_eval(sess,eval_correct, images_placeholder, labels_placeholder, data_sets.train)
        do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.validation)
        do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.test)
开发者ID:CosmosShadow,项目名称:MLPythonLib,代码行数:35,代码来源:fully_connected_feed.py


示例17: __init__

    def __init__(self, config):
        self.config = config

        self.input = tf.placeholder('int32', [self.config.batch_size, config.max_seq_len], name='input')
        self.labels = tf.placeholder('int64', [self.config.batch_size], name='labels')
        self.labels_one_hot = tf.one_hot(indices=self.labels,
                                         depth=config.output_dim,
                                         on_value=1.0,
                                         off_value=0.0,
                                         axis=-1)

        self.gru = GRUCell(config.hidden_state_dim)

        embeddings_we = tf.get_variable('word_embeddings', initializer=tf.random_uniform([config.vocab_size, config.embedding_dim], -1.0, 1.0))
        self.emb = embed_input = tf.nn.embedding_lookup(embeddings_we, self.input)
        inputs = [tf.squeeze(i, squeeze_dims=[1]) for i in tf.split(1, config.max_seq_len, embed_input)]

        outputs, last_slu_state = tf.nn.rnn(
            cell=self.gru,
            inputs=inputs,
            dtype=tf.float32,)

        w_project = tf.get_variable('project2labels', initializer=tf.random_uniform([config.hidden_state_dim, config.output_dim], -1.0, 1.0))
        self.logits = logits_bo = tf.matmul(last_slu_state, w_project)
        tf.histogram_summary('logits', logits_bo)
        self.probabilities = tf.nn.softmax(logits_bo)
        self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits_bo, self.labels_one_hot))
        self.predict = tf.nn.softmax(logits_bo)

        # TensorBoard
        self.accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.predict, 1), self.labels), 'float32'), name='accuracy')
        tf.scalar_summary('CCE loss', self.loss)
        tf.scalar_summary('Accuracy', self.accuracy)
        self.tb_info = tf.merge_all_summaries()
开发者ID:vojtsek,项目名称:sds-tracker,代码行数:34,代码来源:fat_model.py


示例18: start_session

    def start_session(self):
        """
        Creates the session.

        """
        self.input_layer_mats = ["W_input", "b_input"]
        self.hidden_layer_mats = []
        for i in range(self.num_hidden):
            self.hidden_layer_mats.append("W" + str(i))
            self.hidden_layer_mats.append("b" + str(i))
        self.output_layer_mats = ["W_output", "b_output"]

        self.weight_mats = self.input_layer_mats + self.hidden_layer_mats + self.output_layer_mats

        with tf.variable_scope("network") as scope:
            self.create_model_trainable()

        with tf.variable_scope("target") as scope:
            self.create_model_target()

        session = tf.Session()

        # TensorBoard init
        if self.tensorboard:
            self.merged_summaries = tf.merge_all_summaries()
            now = datetime.now().strftime('%Y-%m-%d--%H-%M-%S')
            self.summary_writer = tf.train.SummaryWriter('./outputs/' + now + '/', session.graph)
        else:
            self.summary_writer = None

        init = tf.initialize_all_variables()

        session.run(init)

        return session
开发者ID:AMairesse,项目名称:dqn,代码行数:35,代码来源:cnn_target.py


示例19: __init__

 def __init__(self, log_dir='./logs', max_queue=10, flush_secs=120):
     self.log_dir = log_dir
     self.merged = tf.merge_all_summaries()
     self.writer = tf.train.SummaryWriter(self.log_dir,
                                          max_queue=max_queue,
                                          flush_secs=flush_secs,
                                          graph_def=None)
开发者ID:developeralgo8888,项目名称:zipline-tensorboard,代码行数:7,代码来源:tensorboard.py


示例20: run_training

    def run_training(self,sess, eval_correct, train_op, loss):

        summary_op = tf.merge_all_summaries()
        summary_writer = tf.train.SummaryWriter(self.train_dir, graph=sess.graph)
        saver = tf.train.Saver()

        feed_dict = self.fill_feed_dict(self.train_dataset, self.train_labels, 0)

        for step in range(self.num_steps):
            start_time = time.time()
            _, loss_value = sess.run([train_op, loss],
                                       feed_dict=feed_dict)

            feed_dict = self.fill_feed_dict(self.train_dataset, self.train_labels, step+1)

            duration = time.time() - start_time
            if step % 5000 == 0:
                print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration))
                summary_str = sess.run(summary_op, feed_dict=feed_dict)
                summary_writer.add_summary(summary_str, step)

            if (step + 1) % 10000 == 0 or (step + 1) == self.num_steps:
                saver.save(sess, self.train_dir, global_step=step)
                print('Training Data Eval:')
                self.do_eval(sess, eval_correct,
                    feed_dict[self.images_placeholder], feed_dict[self.labels_placeholder])
                print('Validation Data Eval:')
                self.do_eval(sess, eval_correct, self.valid_dataset, self.valid_labels)
开发者ID:andreslechuga,项目名称:DeepLearning,代码行数:28,代码来源:Assignment3.py



注:本文中的tensorflow.merge_all_summaries函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.merge_summary函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.maximum函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap