• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensorflow.image_summary函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.image_summary函数的典型用法代码示例。如果您正苦于以下问题:Python image_summary函数的具体用法?Python image_summary怎么用?Python image_summary使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了image_summary函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: nerve_inputs

def nerve_inputs(batch_size):
  """ Construct nerve input net.
  Args:
    batch_size: Number of images per batch.
  Returns:
    images: Images. 4D tensor. Possible of size [batch_size, 84x84x4].
    mask: Images. 4D tensor. Possible of size [batch_size, 84x84x4].
  """

  shape = (420,580)

  tfrecord_filename = glb('../data/tfrecords/*') 
  print(tfrecord_filename)
  
  filename_queue = tf.train.string_input_producer(tfrecord_filename) 

  image, mask = read_data(filename_queue, shape)

  images, masks = _generate_image_label_batch(image, mask, batch_size)
 
  # display in tf summary page 
  tf.image_summary('images', images)
  tf.image_summary('mask', masks)

  return images, masks 
开发者ID:loliverhennigh,项目名称:ultrasound-nerve-segmentation-in-tensorflow,代码行数:25,代码来源:nerve_input.py


示例2: _conv

def _conv(inpOp, kH, kW, nOut, dH=1, dW=1, relu=True):
    global conv_counter
    global parameters
    name = 'conv' + str(conv_counter)
    conv_counter += 1
    with tf.name_scope(name) as scope:
        nIn = int(inpOp.get_shape()[-1])
        stddev = 5e-3
        kernel = tf.Variable(tf.truncated_normal([kH, kW, nIn, nOut],
                                                 dtype=tf.float32,
                                                 stddev=(kH*kW*nIn)**0.5*stddev), name='weights')
        
        conv = tf.nn.conv2d(inpOp, kernel, [1, 1, 1, 1],
                         padding="SAME")

        biases = tf.Variable(tf.constant(0.0, shape=[nOut], dtype=tf.float32),
                             trainable=True, name='biases')
        bias = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape())
        if relu:
          bias = tf.nn.relu(bias, name=scope)
        #parameters += [kernel, biases]
        #bias = tf.Print(bias, [tf.sqrt(tf.reduce_mean(tf.square(inpOp - tf.reduce_mean(inpOp))))], message=kernel.name)
        tf.histogram_summary(scope+"/output", bias)
        tf.image_summary(scope+"/output", bias[:,:,:,0:3])
        tf.image_summary(scope+"/kernel_weight", tf.expand_dims(kernel[:,:,0:3,0], 0))
        # tf.image_summary(scope+"/point_weight", pointwise_filter)
        
        return bias
开发者ID:Hello1024,项目名称:tf-gen,代码行数:28,代码来源:utils.py


示例3: read_image_data

def read_image_data():
    dirname, filename = os.path.split(os.path.abspath(__file__))
    #Create a list of filenames
    #path = '/home/david/datasets/fs_ready/Aaron_Eckhart/'
    jpeg_files = glob.glob(os.path.join(path, '*.jpg'))
    path = '/home/david/datasets/fs_ready/Zooey_Deschanel/'
    #Create a queue that produces the filenames to read
    filename_queue = tf.train.string_input_producer(jpeg_files)
    #Create a reader for the filequeue
    reader = tf.WholeFileReader()
    #Read in the files
    key, value = reader.read(filename_queue)
    #Convert the Tensor(of type string) to representing the Tensor of type uint8
    # and shape [height, width, channels] representing the images
    images = tf.image.decode_jpeg(value, channels=3)
    #convert images to floats and attach image summary
    float_images = tf.expand_dims(tf.cast(images, tf.float32),0)
    tf.image_summary('images', float_images)
    
    #Create session
    sess = tf.Session()
    summary_op = tf.merge_all_summaries()
    tf.initialize_all_variables()
    #Write summary
    summary_writer = tf.train.SummaryWriter(dirname+'/log/', graph_def=sess.graph_def)
    tf.train.start_queue_runners(sess=sess)
    for i in xrange(10):
        summary_str, float_image = sess.run([summary_op, float_images])
        print (float_image.shape)
        summary_writer.add_summary(summary_str)
    #Close session
    sess.close()
开发者ID:21hub,项目名称:facenet,代码行数:32,代码来源:faceread.py


示例4: conv2d

def conv2d(x, num_filters, name, filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32, collections=None,
           summary_tag=None):
    with tf.variable_scope(name):
        stride_shape = [1, stride[0], stride[1], 1]
        filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[3]), num_filters]

        # there are "num input feature maps * filter height * filter width"
        # inputs to each hidden unit
        fan_in = intprod(filter_shape[:3])
        # each unit in the lower layer receives a gradient from:
        # "num output feature maps * filter height * filter width" /
        #   pooling size
        fan_out = intprod(filter_shape[:2]) * num_filters
        # initialize weights with random weights
        w_bound = np.sqrt(6. / (fan_in + fan_out))

        w = tf.get_variable("W", filter_shape, dtype, tf.random_uniform_initializer(-w_bound, w_bound),
                            collections=collections)
        b = tf.get_variable("b", [1, 1, 1, num_filters], initializer=tf.zeros_initializer,
                            collections=collections)

        if summary_tag is not None:
            tf.image_summary(summary_tag,
                             tf.transpose(tf.reshape(w, [filter_size[0], filter_size[1], -1, 1]),
                                          [2, 0, 1, 3]),
                             max_images=10)

        return tf.nn.conv2d(x, w, stride_shape, pad) + b
开发者ID:Neithardt-zn,项目名称:homework,代码行数:28,代码来源:tf_util.py


示例5: inputs

def inputs(files, distort=False):
    fqueue = tf.train.string_input_producer(files)
    reader = tf.TFRecordReader()
    key, value = reader.read(fqueue)
    features = tf.parse_single_example(value, features={
        'label': tf.FixedLenFeature([], tf.int64),
        'image_raw': tf.FixedLenFeature([], tf.string),
    })
    image = tf.image.decode_jpeg(features['image_raw'], channels=3)
    image = tf.cast(image, tf.float32)

    if distort:
        cropsize = random.randint(INPUT_SIZE, IMAGE_SIZE)
        image = tf.image.random_crop(image, [cropsize, cropsize])
        image = tf.image.random_flip_left_right(image)
        image = tf.image.random_brightness(image, max_delta=0.63)
        image = tf.image.random_contrast(image, lower=0.8, upper=1.2)
        image = tf.image.random_hue(image, max_delta=0.02)
        image = tf.image.random_saturation(image, lower=0.8, upper=1.2)
    else:
        image = tf.image.random_crop(image, [IMAGE_SIZE, IMAGE_SIZE])
        image = tf.image.resize_image_with_crop_or_pad(image, INPUT_SIZE, INPUT_SIZE)

    min_fraction_of_examples_in_queue = 0.4
    min_queue_examples = int(FLAGS.num_examples_per_epoch_for_train * min_fraction_of_examples_in_queue)
    images, labels = tf.train.shuffle_batch(
        [tf.image.per_image_whitening(image), tf.cast(features['label'], tf.int32)],
        batch_size=BATCH_SIZE,
        capacity=min_queue_examples + 3 * BATCH_SIZE,
        min_after_dequeue=min_queue_examples
    )
    images = tf.image.resize_images(images, INPUT_SIZE, INPUT_SIZE)
    tf.image_summary('images', images)
    return images, labels
开发者ID:nyakosuta,项目名称:tf-classifier,代码行数:34,代码来源:v2.py


示例6: inputs

def inputs(eval_data, data_dir, batch_size):
  filename = os.path.join(data_dir, TEST_FILE)
  filename_queue = tf.train.string_input_producer([filename])
  image, label = read_and_decode(filename_queue)
  height = IMAGE_SIZE
  width = IMAGE_SIZE
  print ("THIS",image.get_shape)
  
  resized_image = tf.image.resize_images(image, height, width)
  print (resized_image.get_shape)

  # Subtract off the mean and divide by the variance of the pixels.
  float_image = tf.image.per_image_whitening(resized_image)

  # Ensure that the random shuffling has good mixing properties.
  min_fraction_of_examples_in_queue = 0.4
  min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_EVAL *
                           min_fraction_of_examples_in_queue)

  images, label_batch = tf.train.batch(
      [image, label],
      batch_size=batch_size,
      num_threads=1,
      capacity=min_queue_examples + 3 * batch_size)

  tf.image_summary('images', images)
  return images, tf.reshape(label_batch, [batch_size])
开发者ID:jkschin,项目名称:tf_code,代码行数:27,代码来源:cnn_input.py


示例7: distorted_inputs

def distorted_inputs (tfrecord_file_paths=[]):
    fqueue = tf.train.string_input_producer(tfrecord_file_paths)
    reader = tf.TFRecordReader()
    key, serialized_example = reader.read(fqueue)
    features = tf.parse_single_example(serialized_example, features={
        'label': tf.FixedLenFeature([], tf.int64),
        'image': tf.FixedLenFeature([], tf.string)
    })
    image = tf.image.decode_jpeg(features['image'], channels=size['depth'])
    image = tf.cast(image, tf.float32)
    image.set_shape([size['width'], size['height'], size['depth']])

    min_fraction_of_examples_in_queue = 0.4
    min_queue_examples = int(cifar10.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL * min_fraction_of_examples_in_queue)

    images, labels = tf.train.shuffle_batch(
        [tf.image.per_image_whitening(image), tf.cast(features['label'], tf.int32)],
        batch_size=BATCH_SIZE,
        capacity=min_queue_examples + 3 * BATCH_SIZE,
        min_after_dequeue=min_queue_examples
    )

    images = tf.image.resize_images(images, size['input_width'], size['input_height'])
    tf.image_summary('images', images)
    return images, labels
开发者ID:daiz713,项目名称:tfPhotoClassifier,代码行数:25,代码来源:eval.py


示例8: _multichannel_image_summary

def _multichannel_image_summary(name, images, perm=[0, 3, 1, 2], max_summary_images=16):
    _min = tf.reduce_min(images)
    _max = tf.reduce_max(images)
    _ = tf.mul(tf.div(tf.add(images, _min), tf.sub(_max, _min)), 255.0)
    _ = tf.transpose(_, perm=perm)
    shape = _.get_shape().as_list()
    tf.image_summary(name, tf.reshape(tf.transpose(_, perm=perm), [reduce(lambda x,y:x*y, shape)/(shape[3]*shape[2]), shape[2], shape[3], 1]), max_images=max_summary_images)
开发者ID:wbaek,项目名称:tensorflow-tutorials,代码行数:7,代码来源:helper.py


示例9: _generate_image_and_label_batch

def _generate_image_and_label_batch(image, label, min_queue_examples):
  """Construct a queued batch of images and labels.

  Args:
    image: 3-D Tensor of [IMAGE_SIZE, IMAGE_SIZE, 3] of type.float32.
    label: 1-D Tensor of type.int32
    min_queue_examples: int32, minimum number of samples to retain
      in the queue that provides of batches of examples.

  Returns:
    images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.
  """
  # Create a queue that shuffles the examples, and then
  # read 'FLAGS.batch_size' images + labels from the example queue.
  num_preprocess_threads = 16
  images, label_batch = tf.train.shuffle_batch(
      [image, label],
      batch_size=FLAGS.batch_size,
      num_threads=num_preprocess_threads,
      capacity=min_queue_examples + 3 * FLAGS.batch_size,
      min_after_dequeue=min_queue_examples)

  # Display the training images in the visualizer.
  tf.image_summary('images', images)

  return images, tf.reshape(label_batch, [FLAGS.batch_size])
开发者ID:bicimsiz,项目名称:tensorflow,代码行数:27,代码来源:cifar10.py


示例10: _generate_image_and_label_batch

def _generate_image_and_label_batch(image, label, min_queue_examples,
                                    batch_size, shuffle):
  """Construct a queued batch of images and labels.
  Args:
    image: 3-D Tensor of [height, width, 3] of type.float32.
    label: 1-D Tensor of type.int32
    min_queue_examples: int32, minimum number of samples to retain
      in the queue that provides of batches of examples.
    batch_size: Number of images per batch.
    shuffle: boolean indicating whether to use a shuffling queue.
  Returns:
    images: Images. 4D tensor of [batch_size, height, width, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.
  """
  # Create a queue that shuffles the examples, and then
  # read 'batch_size' images + labels from the example queue.
  num_preprocess_threads = 16
  if shuffle:
    images, label_batch = tf.train.shuffle_batch(
        [image, label],
        batch_size=batch_size,
        num_threads=num_preprocess_threads,
        capacity=min_queue_examples + 3 * batch_size,
        min_after_dequeue=min_queue_examples)
  else:
    images, label_batch = tf.train.batch(
        [image, label],
        batch_size=batch_size,
        num_threads=num_preprocess_threads,
        capacity=min_queue_examples + 3 * batch_size)

  # Display the training images in the visualizer.
  tf.image_summary('images', images)

  return images, tf.reshape(label_batch, [batch_size])
开发者ID:chuckaschultz,项目名称:CIFAR10,代码行数:35,代码来源:cifar10_input.py


示例11: model

    def model(self):
        """
        Define the model
        """
        # Reshape the input for batchSize, dims_in[0] X dims_in[1] image, dims_in[2] channels
        x_image = tf.reshape(self.input, [-1, self.dims_in[0], self.dims_in[1], self.dims_in[2]],
                             name='x_input_reshaped')

        # Apply image resize
        x_image_upscale = tf.image.resize_bilinear(x_image, np.array([self.dims_out[0],
                                          self.dims_out[1]]), align_corners=None, name='x_input_upscale')

        self.x_input_upscale = x_image_upscale
        # Dump input image out
        tf.image_summary('x_upscale', x_image_upscale)

        # Model convolutions
        conv_1 = ops.conv2d(x_image_upscale, output_dim=8, k_h=5, k_w=5, d_h=1, d_w=1, name="conv_1")
        relu_1 = tf.nn.relu(conv_1)

        conv_2 = ops.conv2d(relu_1, output_dim=4, k_h=3, k_w=3, d_h=1, d_w=1, name="conv_2")
        relu_2 = tf.nn.relu(conv_2)

        conv_3 = ops.conv2d(relu_2, output_dim=1, k_h=1, k_w=1, d_h=1, d_w=1, name="conv_3")
        relu_3 = tf.nn.relu(conv_3)

        conv_4 = ops.conv2d(relu_3, output_dim=1, k_h=3, k_w=3, d_h=1, d_w=1, name="conv_4")

        predict = tf.reshape(conv_4, [-1, self.dims_out[0], self.dims_out[1], self.dims_out[2]], name='predict')

        # Dump prediction out
        tf.image_summary('predict', predict)
        return predict
开发者ID:shohad25,项目名称:thesis,代码行数:33,代码来源:k_space_super_resolution.py


示例12: _generate_image_and_label_batch

def _generate_image_and_label_batch(image, label, filename, min_queue_examples,
                                    batch_size, shuffle):

    # Create a queue that shuffles the examples, and then
    # read 'batch_size' images + labels from the example queue.
    num_preprocess_threads = 16
    capacity = min_queue_examples + 3 * batch_size

    if shuffle:
        images, label_batch, filename = tf.train.shuffle_batch(
            [image, label, filename],
            batch_size=batch_size,
            num_threads=num_preprocess_threads,
            capacity=capacity,
            min_after_dequeue=min_queue_examples)
    else:
        images, label_batch, filename = tf.train.batch(
            [image, label, filename],
            batch_size=batch_size,
            num_threads=num_preprocess_threads,
            capacity=min_queue_examples + 3 * batch_size)

    # Display the training images in the visualizer.
    tf.image_summary('image', images, max_images = 100)

    labels = tf.reshape(label_batch, [batch_size, NUM_CLASS])
    return images, labels, filename
开发者ID:kenmaz,项目名称:momo_mind,代码行数:27,代码来源:mcz_input.py


示例13: _generate_image_and_label_batch

def _generate_image_and_label_batch(image, label, min_queue_examples,
                                    batch_size, shuffle):
    """ generate a batch of images and labels.

    Args:
        image: the trained image.
        label: label correspond to the image.
        min_queue_examples: the least examples int the example's queue.
        batch_size: the size of a batch.
        shuffle: whether or not to shuffle the examples.

    Returns:
        A batch of examples including images and the corresponding label.
    """
    num_preprocess_threads = 16
    if shuffle:
        images, label_batch = tf.train.shuffle_batch(
            [image, label],
            batch_size=batch_size,
            num_threads=num_preprocess_threads,
            capacity=min_queue_examples + 3 * batch_size,
            min_after_dequeue=min_queue_examples)
    else:
        images, label_batch = tf.train.batch(
            [image, label],
            batch_size=batch_size,
            num_threads=num_preprocess_threads,
            capacity=min_queue_examples + 3 * batch_size)

    # Display the training images in the visualizer.
    tf.image_summary('images', images)
    return images, label_batch
开发者ID:ttfjya,项目名称:image_desc_vector,代码行数:32,代码来源:image_input.py


示例14: preprocess

 def preprocess(self):
     with tf.name_scope('input'):
         x = tf.placeholder(tf.float32, [None, 784], name='x-input')
         image_shaped_input = tf.reshape(x, [-1, 28, 28, 1])
         tf.image_summary('input', image_shaped_input, max_images=100)
         y_ = tf.placeholder(tf.float32, [None, 10], name='y-input')
         return (x, y_)
开发者ID:mwalton,项目名称:deep-q-learning,代码行数:7,代码来源:test_cnn.py


示例15: _deconv

def _deconv(inpOp, kH, kW, nOut, dH=1, dW=1, relu=True, name=None):
    global deconv_counter
    global parameters
    if not name:
      name = 'deconv' + str(deconv_counter)
    deconv_counter += 1
    with tf.variable_scope(name) as scope:
        nIn = int(inpOp.get_shape()[-1])
        in_shape = inpOp.get_shape()
        stddev = 1e-3
        kernel = tf.get_variable('weights',[kH, kW, nOut, nIn], initializer=tf.random_normal_initializer(stddev=(kH*kW*nIn)**0.5*stddev))
        
        conv = tf.nn.deconv2d(inpOp, kernel, [int(in_shape[0]),int(in_shape[1]),int(in_shape[2]),nOut], [1, 1, 1, 1],
                         padding="SAME")
                         
        biases = tf.get_variable('biases', [nOut], initializer=tf.constant_initializer(value=0.0))
        bias = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape())
        if relu:
          bias = tf.nn.relu(bias, name='relu')
        #parameters += [kernel, biases]
        #bias = tf.Print(bias, [tf.sqrt(tf.reduce_mean(tf.square(inpOp - tf.reduce_mean(inpOp))))], message=kernel.name)
        tf.histogram_summary(bias.name+"/output", bias)
        tf.image_summary(bias.name+"/output", bias[:,:,:,0:3])
        #tf.image_summary(scope+"/depth_weight", depthwise_filter)
        # tf.image_summary(scope+"/point_weight", pointwise_filter)
        
        return bias
开发者ID:Hello1024,项目名称:tf-gen,代码行数:27,代码来源:utils.py


示例16: model

    def model(data, train=False):
        """The Model definition."""
        # 2D convolution, with 'SAME' padding (i.e. the output feature map has
        # the same size as the input). Note that {strides} is a 4D array whose
        # shape matches the data layout: [image index, y, x, depth].
        conv = tf.nn.conv2d(data,
                            conv1_weights,
                            strides=[1, 1, 1, 1],
                            padding='SAME')
        # Bias and rectified linear non-linearity.
        relu = tf.nn.relu(tf.nn.bias_add(conv, conv1_biases))
        # Max pooling. The kernel size spec {ksize} also follows the layout of
        # the data. Here we have a pooling window of 2, and a stride of 2.
        pool = tf.nn.max_pool(relu,
                              ksize=[1, 2, 2, 1],
                              strides=[1, 2, 2, 1],
                              padding='SAME')

        conv2 = tf.nn.conv2d(pool,
                            conv2_weights,
                            strides=[1, 1, 1, 1],
                            padding='SAME')
        relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))
        pool2 = tf.nn.max_pool(relu2,
                              ksize=[1, 2, 2, 1],
                              strides=[1, 2, 2, 1],
                              padding='SAME')



        # Reshape the feature map cuboid into a 2D matrix to feed it to the
        # fully connected layers.
        pool_shape = pool2.get_shape().as_list()
        reshape = tf.reshape(
            pool2,
            [pool_shape[0], pool_shape[1] * pool_shape[2] * pool_shape[3]])
        # Fully connected layer. Note that the '+' operation automatically
        # broadcasts the biases.
        hidden = tf.nn.relu(tf.matmul(reshape, fc1_weights) + fc1_biases)
        # Add a 50% dropout during training only. Dropout also scales
        # activations such that no rescaling is needed at evaluation time.
        #if train:
        #    hidden = tf.nn.dropout(hidden, 0.5, seed=SEED)
        out = tf.matmul(hidden, fc2_weights) + fc2_biases

        if train == True:
            summary_id = '_0'
            s_data = get_image_summary(data)
            filter_summary0 = tf.image_summary('summary_data' + summary_id, s_data)
            s_conv = get_image_summary(conv)
            filter_summary2 = tf.image_summary('summary_conv' + summary_id, s_conv)
            s_pool = get_image_summary(pool)
            filter_summary3 = tf.image_summary('summary_pool' + summary_id, s_pool)
            s_conv2 = get_image_summary(conv2)
            filter_summary4 = tf.image_summary('summary_conv2' + summary_id, s_conv2)
            s_pool2 = get_image_summary(pool2)
            filter_summary5 = tf.image_summary('summary_pool2' + summary_id, s_pool2)

        return out
开发者ID:albertbuchard,项目名称:semantic-segmentation,代码行数:59,代码来源:Training_run.py


示例17: distorted_inputs

def distorted_inputs(data_dir, batch_size):
  """Construct distorted input for CIFAR training using the Reader ops.
  Args:
    data_dir: Path to the CIFAR-10 data directory.
    batch_size: Number of images per batch.
  Returns:
    images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.
  """
  filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i)
               for i in xrange(1, 6)]
  for f in filenames:
    if not tf.gfile.Exists(f):
      raise ValueError('Failed to find file: ' + f)

  # Create a queue that produces the filenames to read.
  filename_queue = tf.train.string_input_producer(filenames)

  # Read examples from files in the filename queue.
  read_input = read_cifar10(filename_queue)
  reshaped_image = tf.cast(read_input.uint8image, tf.float32)

  height = IMAGE_SIZE
  width = IMAGE_SIZE

  # Image processing for training the network. Note the many random
  # distortions applied to the image.

  # Randomly crop a [height, width] section of the image.
  distorted_image = tf.random_crop(reshaped_image, [height, width, 3])

  # Randomly flip the image horizontally.
  distorted_image = tf.image.random_flip_left_right(distorted_image)

  # Because these operations are not commutative, consider randomizing
  # randomize the order their operation.
  distorted_image = tf.image.random_brightness(distorted_image,
                                               max_delta=63)
  distorted_image = tf.image.random_contrast(distorted_image,
                                             lower=0.2, upper=1.8)

  # Subtract off the mean and divide by the variance of the pixels.
  float_image = tf.image.per_image_whitening(distorted_image)

  # Ensure that the random shuffling has good mixing properties.
  min_fraction_of_examples_in_queue = 0.4
  min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN *
                           min_fraction_of_examples_in_queue)
  print ('Filling queue with %d CIFAR images before starting to train. '
         'This will take a few minutes.' % min_queue_examples)

  # Generate a batch of images and labels by building up a queue of examples.

  images, labels = _generate_image_and_label_batch(float_image, read_input.label,
                                                   min_queue_examples, batch_size)

  tf.image_summary('distorted_images', images)

  return images, labels
开发者ID:BenJamesbabala,项目名称:Fundamentals-of-Deep-Learning-Book,代码行数:59,代码来源:cifar10_input.py


示例18: collectImage

def collectImage(name,tensor,num=5):
    """
    :param name: 用于显示的名字前缀
    :param tensor:输入图片batch
    :param num:需要显示的图片数量
    :return:
    """
    temp = tf.reduce_mean(tensor,axis=3,keep_dims=True)
    tf.image_summary(tag=name,tensor=temp,max_images=num)
开发者ID:nanqiangyipo,项目名称:PyCodeFragment,代码行数:9,代码来源:0504_cnn_image_classify.py


示例19: _get_cost

    def _get_cost(self, input_vars, is_training):
        image, label = input_vars
        keep_prob = tf.constant(0.5 if is_training else 1.0)

        if is_training:
            image, label = tf.train.shuffle_batch(
                [image, label], BATCH_SIZE, CAPACITY, MIN_AFTER_DEQUEUE,
                num_threads=6, enqueue_many=True)
            tf.image_summary("train_image", image, 10)

        image = image / 4.0     # just to make range smaller
        l = Conv2D('conv1.1', image, out_channel=64, kernel_shape=3)
        l = Conv2D('conv1.2', l, out_channel=64, kernel_shape=3, nl=tf.identity)
        l = BatchNorm('bn1', l, is_training)
        l = tf.nn.relu(l)
        l = MaxPooling('pool1', l, 3, stride=2, padding='SAME')

        l = Conv2D('conv2.1', l, out_channel=128, kernel_shape=3)
        l = Conv2D('conv2.2', l, out_channel=128, kernel_shape=3, nl=tf.identity)
        l = BatchNorm('bn2', l, is_training)
        l = tf.nn.relu(l)
        l = MaxPooling('pool2', l, 3, stride=2, padding='SAME')

        l = Conv2D('conv3.1', l, out_channel=128, kernel_shape=3, padding='VALID')
        l = Conv2D('conv3.2', l, out_channel=128, kernel_shape=3, padding='VALID', nl=tf.identity)
        l = BatchNorm('bn3', l, is_training)
        l = tf.nn.relu(l)
        l = FullyConnected('fc0', l, 1024 + 512,
                           b_init=tf.constant_initializer(0.1))
        l = tf.nn.dropout(l, keep_prob)
        l = FullyConnected('fc1', l, out_dim=512,
                           b_init=tf.constant_initializer(0.1))
        # fc will have activation summary by default. disable for the output layer
        logits = FullyConnected('linear', l, out_dim=10, summary_activation=False,
                                nl=tf.identity)
        prob = tf.nn.softmax(logits, name='output')

        y = one_hot(label, 10)
        cost = tf.nn.softmax_cross_entropy_with_logits(logits, y)
        cost = tf.reduce_mean(cost, name='cross_entropy_loss')
        tf.add_to_collection(MOVING_SUMMARY_VARS_KEY, cost)

        # compute the number of failed samples, for ValidationError to use at test time
        wrong = prediction_incorrect(logits, label)
        nr_wrong = tf.reduce_sum(wrong, name='wrong')
        # monitor training error
        tf.add_to_collection(
            MOVING_SUMMARY_VARS_KEY, tf.reduce_mean(wrong, name='train_error'))

        # weight decay on all W of fc layers
        wd_cost = tf.mul(0.004,
                         regularize_cost('fc.*/W', tf.nn.l2_loss),
                         name='regularize_loss')
        tf.add_to_collection(MOVING_SUMMARY_VARS_KEY, wd_cost)

        add_param_summary([('.*/W', ['histogram', 'sparsity'])])   # monitor W
        return tf.add_n([cost, wd_cost], name='cost')
开发者ID:saifrahmed,项目名称:tensorpack,代码行数:57,代码来源:cifar10_convnet.py


示例20: main

def main(argv=None):
    print "Reading MNIST data..."
    data = mnist.input_data.read_data_sets("MNIST_data", one_hot=True)
    images = tf.placeholder(tf.float32, [None, IMAGE_SIZE * IMAGE_SIZE])
    tf.image_summary("Input", tf.reshape(images, [-1, IMAGE_SIZE, IMAGE_SIZE, 1]), max_images=1)
    print "Setting up inference..."
    encoded, output_image = inference_fc(images)
    tf.image_summary("Output", tf.reshape(output_image, [-1, IMAGE_SIZE, IMAGE_SIZE, 1]), max_images=1)

    print "Loss setup..."
    loss1 = tf.nn.l2_loss(tf.sub(output_image, images)) / (IMAGE_SIZE * IMAGE_SIZE)
    loss2 = tf.add_n(tf.get_collection("losses"))
    loss = loss1 + FLAGS.regularization * loss2
    tf.scalar_summary("Loss", loss)
    tf.scalar_summary("Encoder_loss", loss1)
    tf.scalar_summary("Reg_loss", loss2)

    print "Setting up optimizer..."
    train_op = tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss)

    print 'Setting up graph summary...'
    summary_op = tf.merge_all_summaries()

    # print "Creating matplot fig"
    # fig = plt.figure()
    # ax = fig.add_subplot(111, projection='3d')

    with tf.Session() as sess:
        summary_writer = tf.train.SummaryWriter(FLAGS.logs_dir, sess.graph_def)
        print "Creating saver..."
        saver = tf.train.Saver()
        sess.run(tf.initialize_all_variables())
        ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            print "Model restored..."

        for step in xrange(MAX_ITERATIONS):
            batch_image, batch_label = data.train.next_batch(FLAGS.batch_size)
            feed_dict = {images: batch_image}
            if step % 100 == 0:
                summary_str, loss_val = sess.run([summary_op, loss], feed_dict=feed_dict)
                print "Step %d Train loss %f" % (step, loss_val)
                summary_writer.add_summary(summary_str, global_step=step)

            if step % 1000 == 0:
                saver.save(sess, FLAGS.logs_dir + "model.ckpt", global_step=step)
                test_compression = sess.run(encoded, feed_dict={images: data.test.images})
                labels = np.argmax(data.test.labels, axis=1).reshape((-1, 1))
                write_file = os.path.join(FLAGS.logs_dir, "checkpoint%d.txt" % step)
                write_arr = np.hstack((test_compression, np.argmax(data.test.labels, axis=1).reshape((-1, 1))))
                np.savetxt(write_file, write_arr)
                # ax.clear()
                # ax.scatter(test_compression[:, 0], test_compression[:, 1], test_compression[:, 2], s=10,
                #            c=COLORS[labels], marker='o')
                # plt.show()
            sess.run(train_op, feed_dict=feed_dict)
开发者ID:RosieCampbell,项目名称:TensorflowProjects,代码行数:57,代码来源:MNISTAutoEncoder.py



注:本文中的tensorflow.image_summary函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.import_graph_def函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.imag函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap