• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python slim.dropout函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.contrib.slim.dropout函数的典型用法代码示例。如果您正苦于以下问题:Python dropout函数的具体用法?Python dropout怎么用?Python dropout使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了dropout函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: conv_net_kelz

def conv_net_kelz(inputs):
  """Builds the ConvNet from Kelz 2016."""
  with slim.arg_scope(
      [slim.conv2d, slim.fully_connected],
      activation_fn=tf.nn.relu,
      weights_initializer=tf.contrib.layers.variance_scaling_initializer(
          factor=2.0, mode='FAN_AVG', uniform=True)):
    net = slim.conv2d(
        inputs, 32, [3, 3], scope='conv1', normalizer_fn=slim.batch_norm)

    net = slim.conv2d(
        net, 32, [3, 3], scope='conv2', normalizer_fn=slim.batch_norm)
    net = slim.max_pool2d(net, [1, 2], stride=[1, 2], scope='pool2')
    net = slim.dropout(net, 0.25, scope='dropout2')

    net = slim.conv2d(
        net, 64, [3, 3], scope='conv3', normalizer_fn=slim.batch_norm)
    net = slim.max_pool2d(net, [1, 2], stride=[1, 2], scope='pool3')
    net = slim.dropout(net, 0.25, scope='dropout3')

    # Flatten while preserving batch and time dimensions.
    dims = tf.shape(net)
    net = tf.reshape(net, (dims[0], dims[1],
                           net.shape[2].value * net.shape[3].value), 'flatten4')

    net = slim.fully_connected(net, 512, scope='fc5')
    net = slim.dropout(net, 0.5, scope='dropout5')

    return net
开发者ID:Alice-ren,项目名称:magenta,代码行数:29,代码来源:model.py


示例2: build_graph

def build_graph(top_k):
    keep_prob = tf.placeholder(dtype=tf.float32, shape=[], name='keep_prob')
    images = tf.placeholder(dtype=tf.float32, shape=[None, 64, 64, 1], name='image_batch')
    labels = tf.placeholder(dtype=tf.int64, shape=[None], name='label_batch')
    is_training = tf.placeholder(dtype=tf.bool, shape=[], name='train_flag')
    with tf.device('/gpu:0'):
        with slim.arg_scope([slim.conv2d, slim.fully_connected],
                            normalizer_fn=slim.batch_norm,
                            normalizer_params={'is_training': is_training}):
            conv3_1 = slim.conv2d(images, 64, [3, 3], 1, padding='SAME', scope='conv3_1')
            max_pool_1 = slim.max_pool2d(conv3_1, [2, 2], [2, 2], padding='SAME', scope='pool1')
            conv3_2 = slim.conv2d(max_pool_1, 128, [3, 3], padding='SAME', scope='conv3_2')
            max_pool_2 = slim.max_pool2d(conv3_2, [2, 2], [2, 2], padding='SAME', scope='pool2')
            conv3_3 = slim.conv2d(max_pool_2, 256, [3, 3], padding='SAME', scope='conv3_3')
            max_pool_3 = slim.max_pool2d(conv3_3, [2, 2], [2, 2], padding='SAME', scope='pool3')
            conv3_4 = slim.conv2d(max_pool_3, 512, [3, 3], padding='SAME', scope='conv3_4')
            conv3_5 = slim.conv2d(conv3_4, 512, [3, 3], padding='SAME', scope='conv3_5')
            max_pool_4 = slim.max_pool2d(conv3_5, [2, 2], [2, 2], padding='SAME', scope='pool4')

            flatten = slim.flatten(max_pool_4)
            fc1 = slim.fully_connected(slim.dropout(flatten, keep_prob), 1024,
                                       activation_fn=tf.nn.relu, scope='fc1')
            logits = slim.fully_connected(slim.dropout(fc1, keep_prob), FLAGS.charset_size, activation_fn=None,
                                          scope='fc2')
        loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels))
        accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(logits, 1), labels), tf.float32))

        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        if update_ops:
            updates = tf.group(*update_ops)
            loss = control_flow_ops.with_dependencies([updates], loss)

        global_step = tf.get_variable("step", [], initializer=tf.constant_initializer(0.0), trainable=False)
        optimizer = tf.train.AdamOptimizer(learning_rate=0.1)
        train_op = slim.learning.create_train_op(loss, optimizer, global_step=global_step)
        probabilities = tf.nn.softmax(logits)

        tf.summary.scalar('loss', loss)
        tf.summary.scalar('accuracy', accuracy)
        merged_summary_op = tf.summary.merge_all()
        predicted_val_top_k, predicted_index_top_k = tf.nn.top_k(probabilities, k=top_k)
        accuracy_in_top_k = tf.reduce_mean(tf.cast(tf.nn.in_top_k(probabilities, labels, top_k), tf.float32))

    return {'images': images,
            'labels': labels,
            'keep_prob': keep_prob,
            'top_k': top_k,
            'global_step': global_step,
            'train_op': train_op,
            'loss': loss,
            'is_training': is_training,
            'accuracy': accuracy,
            'accuracy_top_k': accuracy_in_top_k,
            'merged_summary_op': merged_summary_op,
            'predicted_distribution': probabilities,
            'predicted_index_top_k': predicted_index_top_k,
            'predicted_val_top_k': predicted_val_top_k}
开发者ID:oraSC,项目名称:Chinese-Character-Recognition,代码行数:57,代码来源:chinese_character_recognition_bn.py


示例3: construct_embedding

  def construct_embedding(self):
    """Builds a conv -> spatial softmax -> FC adaptation network."""
    is_training = self._is_training
    normalizer_params = {'is_training': is_training}
    with tf.variable_scope('tcn_net', reuse=self._reuse) as vs:
      self._adaptation_scope = vs.name
      with slim.arg_scope(
          [slim.layers.conv2d],
          activation_fn=tf.nn.relu,
          normalizer_fn=slim.batch_norm, normalizer_params=normalizer_params,
          weights_regularizer=slim.regularizers.l2_regularizer(
              self._l2_reg_weight),
          biases_regularizer=slim.regularizers.l2_regularizer(
              self._l2_reg_weight)):
        with slim.arg_scope(
            [slim.layers.fully_connected],
            activation_fn=tf.nn.relu,
            normalizer_fn=slim.batch_norm, normalizer_params=normalizer_params,
            weights_regularizer=slim.regularizers.l2_regularizer(
                self._l2_reg_weight),
            biases_regularizer=slim.regularizers.l2_regularizer(
                self._l2_reg_weight)):

          # Input to embedder is pre-trained inception output.
          net = self._pretrained_output

          # Optionally add more conv layers.
          for num_filters in self._additional_conv_sizes:
            net = slim.layers.conv2d(
                net, num_filters, kernel_size=[3, 3], stride=[1, 1])
            net = slim.dropout(net, keep_prob=self._conv_hidden_keep_prob,
                               is_training=is_training)

          # Take the spatial soft arg-max of the last convolutional layer.
          # This is a form of spatial attention over the activations.
          # See more here: http://arxiv.org/abs/1509.06113.
          net = tf.contrib.layers.spatial_softmax(net)
          self.spatial_features = net

          # Add fully connected layers.
          net = slim.layers.flatten(net)
          for fc_hidden_size in self._fc_hidden_sizes:
            net = slim.layers.fully_connected(net, fc_hidden_size)
            if self._fc_hidden_keep_prob < 1.0:
              net = slim.dropout(net, keep_prob=self._fc_hidden_keep_prob,
                                 is_training=is_training)

          # Connect last FC layer to embedding.
          net = slim.layers.fully_connected(net, self._embedding_size,
                                            activation_fn=None)

          # Optionally L2 normalize the embedding.
          if self._embedding_l2:
            net = tf.nn.l2_normalize(net, dim=1)

          return net
开发者ID:ALISCIFP,项目名称:models,代码行数:56,代码来源:model.py


示例4: inference

def inference(inputs):
    x = tf.reshape(inputs,[-1,28,28,1])
    conv_1 = tf.nn.relu(slim.conv2d(x,32,[3,3])) #28 * 28 * 32
    pool_1 = slim.max_pool2d(conv_1,[2,2]) # 14 * 14 * 32
    block_1 = res_identity(pool_1,32,[3,3],'layer_2')
    block_2 = res_change(block_1,64,[3,3],'layer_3')
    block_3 = res_identity(block_2,64,[3,3],'layer_4')
    block_4 = res_change(block_3,32,[3,3],'layer_5')
    net_flatten = slim.flatten(block_4,scope='flatten')
    fc_1 = slim.fully_connected(slim.dropout(net_flatten,0.8),200,activation_fn=tf.nn.tanh,scope='fc_1')
    output = slim.fully_connected(slim.dropout(fc_1,0.8),10,activation_fn=None,scope='output_layer')
    return output
开发者ID:zeroToAll,项目名称:tensorflow_practice,代码行数:12,代码来源:resnet_mnist.py


示例5: _build_network

  def _build_network(self, sess, is_training=True):
    with tf.variable_scope('vgg_16', 'vgg_16'):
      # select initializers
      if cfg.TRAIN.TRUNCATED:
        initializer = tf.truncated_normal_initializer(mean=0.0, stddev=0.01)
        initializer_bbox = tf.truncated_normal_initializer(mean=0.0, stddev=0.001)
      else:
        initializer = tf.random_normal_initializer(mean=0.0, stddev=0.01)
        initializer_bbox = tf.random_normal_initializer(mean=0.0, stddev=0.001)

      net = slim.repeat(self._image, 2, slim.conv2d, 64, [3, 3],
                        trainable=False, scope='conv1')
      net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='pool1')
      net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3],
                        trainable=False, scope='conv2')
      net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='pool2')
      net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3],
                        trainable=is_training, scope='conv3')
      net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='pool3')
      net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3],
                        trainable=is_training, scope='conv4')
      net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='pool4')
      net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3],
                        trainable=is_training, scope='conv5')
      self._act_summaries.append(net)
      self._layers['head'] = net
      # build the anchors for the image
      self._anchor_component()
      # region proposal network
      rois = self._region_proposal(net, is_training, initializer)
      # region of interest pooling
      if cfg.POOLING_MODE == 'crop':
        pool5 = self._crop_pool_layer(net, rois, "pool5")
      else:
        raise NotImplementedError

      pool5_flat = slim.flatten(pool5, scope='flatten')
      fc6 = slim.fully_connected(pool5_flat, 4096, scope='fc6')
      if is_training:
        fc6 = slim.dropout(fc6, keep_prob=0.5, is_training=True, scope='dropout6')
      fc7 = slim.fully_connected(fc6, 4096, scope='fc7')
      if is_training:
        fc7 = slim.dropout(fc7, keep_prob=0.5, is_training=True, scope='dropout7')
      # region classification
      cls_prob, bbox_pred = self._region_classification(fc7, 
                                                        is_training, 
                                                        initializer, 
                                                        initializer_bbox)

      self._score_summaries.update(self._predictions)

      return rois, cls_prob, bbox_pred
开发者ID:deeplxx,项目名称:tf-faster-rcnn,代码行数:52,代码来源:vgg16.py


示例6: _head_to_tail

  def _head_to_tail(self, pool5, is_training, reuse=False):
    with tf.variable_scope(self._scope, self._scope, reuse=reuse):
      pool5_flat = slim.flatten(pool5, scope='flatten')
      fc6 = slim.fully_connected(pool5_flat, 4096, scope='fc6')
      if is_training:
        fc6 = slim.dropout(fc6, keep_prob=0.5, is_training=True, 
                            scope='dropout6')
      fc7 = slim.fully_connected(fc6, 4096, scope='fc7')
      if is_training:
        fc7 = slim.dropout(fc7, keep_prob=0.5, is_training=True, 
                            scope='dropout7')

    return fc7
开发者ID:BoAdBo,项目名称:AlphaPose,代码行数:13,代码来源:vgg16.py


示例7: clone_fn

    def clone_fn(batch_queue):
      """Allows data parallelism by creating multiple clones of network_fn."""
      images, b_input_mask, b_labels_input, b_box_delta_input, b_box_input = batch_queue.dequeue()
      anchors = tf.convert_to_tensor(config.ANCHOR_SHAPE, dtype=tf.float32)
      end_points = network_fn(images)
      end_points["viz_images"] = images
      conv_ds_14 = end_points['MobileNet/conv_ds_14/depthwise_conv']
      dropout = slim.dropout(conv_ds_14, keep_prob=0.5, is_training=True)
      num_output = config.NUM_ANCHORS * (config.NUM_CLASSES + 1 + 4)
      predict = slim.conv2d(dropout, num_output, kernel_size=(3, 3), stride=1, padding='SAME',
                            activation_fn=None,
                            weights_initializer=tf.truncated_normal_initializer(stddev=0.0001),
                            scope="MobileNet/conv_predict")

      with tf.name_scope("Interpre_prediction") as scope:
        pred_box_delta, pred_class_probs, pred_conf, ious, det_probs, det_boxes, det_class = \
          interpre_prediction(predict, b_input_mask, anchors, b_box_input)
        end_points["viz_det_probs"] = det_probs
        end_points["viz_det_boxes"] = det_boxes
        end_points["viz_det_class"] = det_class

      with tf.name_scope("Losses") as scope:
        losses(b_input_mask, b_labels_input, ious, b_box_delta_input, pred_class_probs, pred_conf, pred_box_delta)

      return end_points
开发者ID:tigercut,项目名称:MobileNet,代码行数:25,代码来源:train_object_detector.py


示例8: metric_net

def metric_net(img, scope, df_dim=64, reuse=False, train=True):

    bn = functools.partial(slim.batch_norm, scale=True, is_training=train,
                           decay=0.9, epsilon=1e-5, updates_collections=None)

    with tf.variable_scope(scope + '_discriminator', reuse=reuse):
        h0 = lrelu(conv(img, df_dim, 4, 2, scope='h0_conv'))    # h0 is (128 x 128 x df_dim)
        pool1 = Mpool(h0, [1, 2, 2, 1], [1, 2, 2, 1], padding='VALID')
		
        h1 = lrelu(conv(pool1, df_dim * 2, 4, 2, scope='h1_conv'))  # h1 is (32 x 32 x df_dim*2)
        pool2 = Mpool(h1, [1, 2, 2, 1], [1, 2, 2, 1], padding='VALID')
		
        h2 = lrelu(conv(pool2, df_dim * 4, 4, 2, scope='h2_conv'))  # h2 is (8 x 8 x df_dim*4)
        pool3 = Mpool(h2, [1, 2, 2, 1], [1, 2, 2, 1], padding='VALID')
		
        h3 = lrelu(conv(pool3, df_dim * 8, 4, 2, scope='h3_conv'))  # h3 is (2 x 2 x df_dim*4)
        pool4 = Mpool(h3, [1, 2, 2, 1], [1, 2, 2, 1], padding='VALID')
		
        shape = pool4.get_shape()
        flatten_shape = shape[1].value * shape[2].value * shape[3].value
        h3_reshape = tf.reshape(pool4, [-1, flatten_shape], name = 'h3_reshape')
		
        fc1 = lrelu(FC(h3_reshape, df_dim*2, scope='fc1'))
        dropout_fc1 = slim.dropout(fc1, 0.5, scope='dropout_fc1')  
        net = FC(dropout_fc1, df_dim, scope='fc2') 
        
        #print_activations(net)
        #print_activations(pool4)
        return net
开发者ID:jcolares,项目名称:Learning-via-Translation,代码行数:29,代码来源:models_spgan.py


示例9: build_arch_baseline

def build_arch_baseline(input, is_train: bool, num_classes: int):

    bias_initializer = tf.truncated_normal_initializer(
        mean=0.0, stddev=0.01)  # tf.constant_initializer(0.0)
    # The paper didnot mention any regularization, a common l2 regularizer to weights is added here
    weights_regularizer = tf.contrib.layers.l2_regularizer(5e-04)

    tf.logging.info('input shape: {}'.format(input.get_shape()))

    # weights_initializer=initializer,
    with slim.arg_scope([slim.conv2d, slim.fully_connected], trainable=is_train, biases_initializer=bias_initializer, weights_regularizer=weights_regularizer):
        with tf.variable_scope('relu_conv1') as scope:
            output = slim.conv2d(input, num_outputs=32, kernel_size=[
                                 5, 5], stride=1, padding='SAME', scope=scope, activation_fn=tf.nn.relu)
            output = slim.max_pool2d(output, [2, 2], scope='max_2d_layer1')

            tf.logging.info('output shape: {}'.format(output.get_shape()))

        with tf.variable_scope('relu_conv2') as scope:
            output = slim.conv2d(output, num_outputs=64, kernel_size=[
                                 5, 5], stride=1, padding='SAME', scope=scope, activation_fn=tf.nn.relu)
            output = slim.max_pool2d(output, [2, 2], scope='max_2d_layer2')

            tf.logging.info('output shape: {}'.format(output.get_shape()))

        output = slim.flatten(output)
        output = slim.fully_connected(output, 1024, scope='relu_fc3', activation_fn=tf.nn.relu)
        tf.logging.info('output shape: {}'.format(output.get_shape()))
        output = slim.dropout(output, 0.5, scope='dp')
        output = slim.fully_connected(output, num_classes, scope='final_layer', activation_fn=None)
        tf.logging.info('output shape: {}'.format(output.get_shape()))
        return output
开发者ID:lzqkean,项目名称:deep_learning,代码行数:32,代码来源:capsnet_em.py


示例10: create_inner_block

def create_inner_block(
        incoming, scope, nonlinearity=tf.nn.elu,
        weights_initializer=tf.truncated_normal_initializer(1e-3),
        bias_initializer=tf.zeros_initializer(), regularizer=None,
        increase_dim=False, summarize_activations=True):
    n = incoming.get_shape().as_list()[-1]
    stride = 1
    if increase_dim:
        n *= 2
        stride = 2

    incoming = slim.conv2d(
        incoming, n, [3, 3], stride, activation_fn=nonlinearity, padding="SAME",
        normalizer_fn=_batch_norm_fn, weights_initializer=weights_initializer,
        biases_initializer=bias_initializer, weights_regularizer=regularizer,
        scope=scope + "/1")
    if summarize_activations:
        tf.summary.histogram(incoming.name + "/activations", incoming)

    incoming = slim.dropout(incoming, keep_prob=0.6)

    incoming = slim.conv2d(
        incoming, n, [3, 3], 1, activation_fn=None, padding="SAME",
        normalizer_fn=None, weights_initializer=weights_initializer,
        biases_initializer=bias_initializer, weights_regularizer=regularizer,
        scope=scope + "/2")
    return incoming
开发者ID:BenJamesbabala,项目名称:deep_sort,代码行数:27,代码来源:generate_detections.py


示例11: resface36

def resface36(images, keep_probability, 
             phase_train=True, bottleneck_layer_size=512, 
             weight_decay=0.0, reuse=None):
    '''
    conv name
    conv[conv_layer]_[block_index]_[block_layer_index]
    '''
    with tf.variable_scope('Conv1'):
        net = resface_pre(images,64,scope='Conv1_pre')
        net = slim.repeat(net,2,resface_block,64,scope='Conv_1')
    with tf.variable_scope('Conv2'):
        net = resface_pre(net,128,scope='Conv2_pre')
        net = slim.repeat(net,4,resface_block,128,scope='Conv_2')
    with tf.variable_scope('Conv3'):
        net = resface_pre(net,256,scope='Conv3_pre')
        net = slim.repeat(net,8,resface_block,256,scope='Conv_3')
    with tf.variable_scope('Conv4'):
        net = resface_pre(net,512,scope='Conv4_pre')
        #net = resface_block(Conv4_pre,512,scope='Conv4_1')
        net = slim.repeat(net,1,resface_block,512,scope='Conv4')

    with tf.variable_scope('Logits'):
        #pylint: disable=no-member
        #net = slim.avg_pool2d(net, net.get_shape()[1:3], padding='VALID',
        #                      scope='AvgPool')
        net = slim.flatten(net)
        net = slim.dropout(net, keep_probability, is_training=phase_train,
                           scope='Dropout')
    net = slim.fully_connected(net, bottleneck_layer_size, activation_fn=None, 
            scope='Bottleneck', reuse=False)    
    return net,''
开发者ID:Joker316701882,项目名称:Additive-Margin-Softmax,代码行数:31,代码来源:resface.py


示例12: LResnet50E_IR

def LResnet50E_IR(images, keep_probability, 
             phase_train=True, bottleneck_layer_size=512, 
             weight_decay=0.0, reuse=None):
    '''
    conv name
    conv[conv_layer]_[block_index]_[block_layer_index]
    
    for resnet50 n_units=[3,4,14,3], consider one unit is dim_reduction_layer
    repeat n_units=[2,3,13,2]
    '''
    with tf.variable_scope('Conv1'):
        net = slim.conv2d(images,64,scope='Conv1_pre')
        net = slim.batch_norm(net,scope='Conv1_bn')
    with tf.variable_scope('Conv2'):
        net = resface_block(net,64,stride=2,dim_match=False,scope='Conv2_pre')
        net = slim.repeat(net,2,resface_block,64,1,True,scope='Conv2_main')
    with tf.variable_scope('Conv3'):
        net = resface_block(net,128,stride=2,dim_match=False,scope='Conv3_pre')
        net = slim.repeat(net,3,resface_block,128,1,True,scope='Conv3_main')
    with tf.variable_scope('Conv4'):
        net = resface_block(net,256,stride=2,dim_match=False,scope='Conv4_pre')
        net = slim.repeat(net,13,resface_block,256,1,True,scope='Conv4_main')
    with tf.variable_scope('Conv5'):
        net = resface_block(net,512,stride=2,dim_match=False,scope='Conv5_pre')
        net = slim.repeat(net,2,resface_block,512,1,True,scope='Conv5_main')

    with tf.variable_scope('Logits'):
        net = slim.batch_norm(net,activation_fn=None,scope='bn1')
        net = slim.dropout(net, keep_probability, is_training=phase_train,scope='Dropout')        
        net = slim.flatten(net)
    
    net = slim.fully_connected(net, bottleneck_layer_size, biases_initializer=tf.contrib.layers.xavier_initializer(), scope='fc1')
    net = slim.batch_norm(net, activation_fn=None, scope='Bottleneck')

    return net,''
开发者ID:Joker316701882,项目名称:Additive-Margin-Softmax,代码行数:35,代码来源:insightface.py


示例13: build_single_inceptionv1

def build_single_inceptionv1(train_tfdata, is_train, dropout_keep_prob):
    with slim.arg_scope(inception.inception_v1_arg_scope()):
        identity, end_points = inception.inception_v1(train_tfdata, dropout_keep_prob = dropout_keep_prob, is_training=is_train)
        net = slim.avg_pool2d(end_points['Mixed_5c'], [7, 7], stride=1, scope='MaxPool_0a_7x7')
        net = slim.dropout(net, dropout_keep_prob, scope='Dropout_0b')
        feature = tf.squeeze(net, [1, 2])
    return identity, feature
开发者ID:seindlut,项目名称:deep_p2s,代码行数:7,代码来源:build_subnet.py


示例14: __init__

	def __init__(self,is_training):
		
		self.input_image = tf.placeholder(dtype=tf.float32,shape=[None,64,64,3],name='input_image')
		
		self.input_label = tf.placeholder(dtype=tf.float32,shape=[None,100],name='input_label')

		self.input_nlcd = tf.placeholder(dtype=tf.float32,shape=[None,15],name='input_nlcd')

		#logits, end_points = resnet_v2.resnet_v2_50(self.input_image, num_classes=100, is_training=True)

		# flatten_hist = tf.reshape(self.input_image,[-1,96])

		self.keep_prob = tf.placeholder(tf.float32)

		weights_regularizer=slim.l2_regularizer(FLAGS.weight_decay)


		flatten_hist = tf.reshape(self.input_image,[-1,3*64*64])
		flatten_hist = tf.concat([flatten_hist,self.input_nlcd],1)
		x = slim.fully_connected(flatten_hist, 512,weights_regularizer=weights_regularizer,scope='decoder/fc_1')
		x = slim.fully_connected(x, 1024,weights_regularizer=weights_regularizer, scope='decoder/fc_2')
		flatten_hist = slim.fully_connected(x, 512,weights_regularizer=weights_regularizer, scope='decoder/fc_3')

		all_logits = []
		all_output = []

		for i in range(100):
			if i == 0 :
				current_input_x = flatten_hist
			else:
				current_output = tf.concat(all_output,1)
				current_input_x = tf.concat([flatten_hist,current_output],1)

			x = slim.fully_connected(current_input_x, 256,weights_regularizer=weights_regularizer)
			x = slim.fully_connected(x, 100,weights_regularizer=weights_regularizer)
			#x = slim.fully_connected(x, 17,weights_regularizer=weights_regularizer)

			x = slim.dropout(x,keep_prob=self.keep_prob,is_training=is_training)
			all_logits.append(slim.fully_connected(x, 1, activation_fn=None, weights_regularizer=weights_regularizer))
			all_output.append(tf.sigmoid(all_logits[i]))

		final_logits = tf.concat(all_logits,1)
		final_output = tf.sigmoid(final_logits)

		self.output = final_output
		self.ce_loss = tf.reduce_mean(tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=self.input_label,logits=final_logits),1))

		slim.losses.add_loss(self.ce_loss)
		tf.summary.scalar('ce_loss',self.ce_loss)
		
		# l2 loss
		self.l2_loss = tf.add_n(slim.losses.get_regularization_losses())
		tf.summary.scalar('l2_loss',self.l2_loss)

		#total loss
		self.total_loss = slim.losses.get_total_loss()
		tf.summary.scalar('total_loss',self.total_loss)

		#self.output = tf.sigmoid(x)
开发者ID:ParsonsZeng,项目名称:MEDL_CVAE,代码行数:59,代码来源:resnet.py


示例15: build_graph

def build_graph(top_k):
    # with tf.device('/cpu:0'):
    keep_prob = tf.placeholder(dtype=tf.float32, shape=[], name='keep_prob')
    images = tf.placeholder(dtype=tf.float32, shape=[None, 64, 64, 1], name='image_batch')
    labels = tf.placeholder(dtype=tf.int64, shape=[None], name='label_batch')

    conv_1 = slim.conv2d(images, 64, [3, 3], 1, padding='SAME', scope='conv1')
    max_pool_1 = slim.max_pool2d(conv_1, [2, 2], [2, 2], padding='SAME')
    conv_2 = slim.conv2d(max_pool_1, 128, [3, 3], padding='SAME', scope='conv2')
    max_pool_2 = slim.max_pool2d(conv_2, [2, 2], [2, 2], padding='SAME')
    conv_3 = slim.conv2d(max_pool_2, 256, [3, 3], padding='SAME', scope='conv3')
    max_pool_3 = slim.max_pool2d(conv_3, [2, 2], [2, 2], padding='SAME')

    flatten = slim.flatten(max_pool_3)
    fc1 = slim.fully_connected(slim.dropout(flatten, keep_prob), 1024, activation_fn=tf.nn.tanh, scope='fc1')
    logits = slim.fully_connected(slim.dropout(fc1, keep_prob), FLAGS.charset_size, activation_fn=None, scope='fc2')
        # logits = slim.fully_connected(flatten, FLAGS.charset_size, activation_fn=None, reuse=reuse, scope='fc')
    loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels))
    accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(logits, 1), labels), tf.float32))

    global_step = tf.get_variable("step", [], initializer=tf.constant_initializer(0.0), trainable=False)
    rate = tf.train.exponential_decay(2e-4, global_step, decay_steps=2000, decay_rate=0.97, staircase=True)
    train_op = tf.train.AdamOptimizer(learning_rate=rate).minimize(loss, global_step=global_step)
    probabilities = tf.nn.softmax(logits)

    tf.summary.scalar('loss', loss)
    tf.summary.scalar('accuracy', accuracy)
    merged_summary_op = tf.summary.merge_all()
    predicted_val_top_k, predicted_index_top_k = tf.nn.top_k(probabilities, k=top_k)
    accuracy_in_top_k = tf.reduce_mean(tf.cast(tf.nn.in_top_k(probabilities, labels, top_k), tf.float32))

    return {'images': images,
            'labels': labels,
            'keep_prob': keep_prob,
            'top_k': top_k,
            'global_step': global_step,
            'train_op': train_op,
            'loss': loss,
            'accuracy': accuracy,
            'accuracy_top_k': accuracy_in_top_k,
            'merged_summary_op': merged_summary_op,
            'predicted_distribution': probabilities,
            'predicted_index_top_k': predicted_index_top_k,
            'predicted_val_top_k': predicted_val_top_k}
开发者ID:SiyuanWei,项目名称:tensorflow-101,代码行数:44,代码来源:chinese_rec.py


示例16: inference

 def inference(self):
     x = tf.reshape(self.x, shape=[-1, self.input_shape[0], self.input_shape[1], self.input_shape[2]])
     with slim.arg_scope([slim.conv2d, slim.fully_connected],
                         weights_initializer=tf.contrib.layers.xavier_initializer(),
                         weights_regularizer=slim.l2_regularizer(1e-6)):
         model = slim.conv2d(x, 96, [11, 11], 4, padding='VALID', scope='conv1')
         model = slim.max_pool2d(model, [3, 3], 2, scope='pool1')
         model = slim.conv2d(model, 256, [5, 5], 1, scope='conv2')
         model = slim.max_pool2d(model, [3, 3], 2, scope='pool2')
         model = slim.conv2d(model, 384, [3, 3], 1, scope='conv3')
         model = slim.conv2d(model, 384, [3, 3], 1, scope='conv4')
         model = slim.conv2d(model, 256, [3, 3], 1, scope='conv5')
         model = slim.max_pool2d(model, [3, 3], 2, scope='pool5')
         model = slim.flatten(model)
         model = slim.fully_connected(model, 4096, activation_fn=None, scope='fc1')
         model = slim.dropout(model, 0.5, is_training=self.is_training, scope='do1')
         model = slim.fully_connected(model, 4096, activation_fn=None, scope='fc2')
         model = slim.dropout(model, 0.5, is_training=self.is_training, scope='do2')
         model = slim.fully_connected(model, self.nclasses, activation_fn=None, scope='fc3')
     return model
开发者ID:Dasona,项目名称:DIGITS,代码行数:20,代码来源:alexnet.py


示例17: inference

 def inference(self):
     x = tf.reshape(self.x, shape=[-1, self.input_shape[0], self.input_shape[1], self.input_shape[2]])
     with slim.arg_scope([slim.conv2d, slim.fully_connected],
                         weights_initializer=tf.contrib.layers.xavier_initializer(),
                         weights_regularizer=slim.l2_regularizer(0.0005)):
         model = slim.repeat(x, 2, slim.conv2d, 64, [3, 3], scope='conv1')
         model = slim.max_pool2d(model, [2, 2], scope='pool1')
         model = slim.repeat(model, 2, slim.conv2d, 128, [3, 3], scope='conv2')
         model = slim.max_pool2d(model, [2, 2], scope='pool2')
         model = slim.repeat(model, 3, slim.conv2d, 256, [3, 3], scope='conv3')
         model = slim.max_pool2d(model, [2, 2], scope='pool3')
         model = slim.repeat(model, 3, slim.conv2d, 512, [3, 3], scope='conv4')
         model = slim.max_pool2d(model, [2, 2], scope='pool4')
         model = slim.repeat(model, 3, slim.conv2d, 512, [3, 3], scope='conv5')
         model = slim.max_pool2d(model, [2, 2], scope='pool5')
         model = slim.flatten(model, scope='flatten5')
         model = slim.fully_connected(model, 4096, scope='fc6')
         model = slim.dropout(model, 0.5, is_training=self.is_training, scope='do6')
         model = slim.fully_connected(model, 4096, scope='fc7')
         model = slim.dropout(model, 0.5, is_training=self.is_training, scope='do7')
         model = slim.fully_connected(model, self.nclasses, activation_fn=None, scope='fcX8')
     return model
开发者ID:Dasona,项目名称:DIGITS,代码行数:22,代码来源:vgg16.py


示例18: conv_net

def conv_net(inputs, hparams):
  """Builds the ConvNet from Kelz 2016."""
  with slim.arg_scope(
      [slim.conv2d, slim.fully_connected],
      activation_fn=tf.nn.relu,
      weights_initializer=tf.contrib.layers.variance_scaling_initializer(
          factor=2.0, mode='FAN_AVG', uniform=True)):

    net = inputs
    i = 0
    for (conv_temporal_size, conv_freq_size,
         num_filters, freq_pool_size, dropout_amt) in zip(
             hparams.temporal_sizes, hparams.freq_sizes, hparams.num_filters,
             hparams.pool_sizes, hparams.dropout_keep_amts):
      net = slim.conv2d(
          net,
          num_filters, [conv_temporal_size, conv_freq_size],
          scope='conv' + str(i),
          normalizer_fn=slim.batch_norm)
      if freq_pool_size > 1:
        net = slim.max_pool2d(
            net, [1, freq_pool_size],
            stride=[1, freq_pool_size],
            scope='pool' + str(i))
      if dropout_amt < 1:
        net = slim.dropout(net, dropout_amt, scope='dropout' + str(i))
      i += 1

    # Flatten while preserving batch and time dimensions.
    dims = tf.shape(net)
    net = tf.reshape(
        net, (dims[0], dims[1], net.shape[2].value * net.shape[3].value),
        'flatten_end')

    net = slim.fully_connected(net, hparams.fc_size, scope='fc_end')
    net = slim.dropout(net, hparams.fc_dropout_keep_amt, scope='dropout_end')

    return net
开发者ID:cghawthorne,项目名称:magenta,代码行数:38,代码来源:model.py


示例19: Encoder_fc3_dropout

def Encoder_fc3_dropout(x,
                        num_output=85,
                        is_training=True,
                        reuse=False,
                        name="3D_module"):
    """
    3D inference module. 3 MLP layers (last is the output)
    With dropout  on first 2.
    Input:
    - x: N x [|img_feat|, |3D_param|]
    - reuse: bool

    Outputs:
    - 3D params: N x num_output
      if orthogonal: 
           either 85: (3 + 24*3 + 10) or 109 (3 + 24*4 + 10) for factored axis-angle representation
      if perspective:
          86: (f, tx, ty, tz) + 24*3 + 10, or 110 for factored axis-angle.
    - variables: tf variables
    """
    if reuse:
        print('Reuse is on!')
    with tf.variable_scope(name, reuse=reuse) as scope:
        net = slim.fully_connected(x, 1024, scope='fc1')
        net = slim.dropout(net, 0.5, is_training=is_training, scope='dropout1')
        net = slim.fully_connected(net, 1024, scope='fc2')
        net = slim.dropout(net, 0.5, is_training=is_training, scope='dropout2')
        small_xavier = variance_scaling_initializer(
            factor=.01, mode='FAN_AVG', uniform=True)
        net = slim.fully_connected(
            net,
            num_output,
            activation_fn=None,
            weights_initializer=small_xavier,
            scope='fc3')

    variables = tf.contrib.framework.get_variables(scope)
    return net, variables
开发者ID:andrewjong,项目名称:hmr,代码行数:38,代码来源:models.py


示例20: construct_net

 def construct_net(self,is_trained = True):
     with slim.arg_scope([slim.conv2d], padding='VALID',
                         weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
                         weights_regularizer=slim.l2_regularizer(0.0005)):
         net = slim.conv2d(self.input_images,6,[5,5],1,padding='SAME',scope='conv1')
         net = slim.max_pool2d(net, [2, 2], scope='pool2')
         net = slim.conv2d(net,16,[5,5],1,scope='conv3')
         net = slim.max_pool2d(net, [2, 2], scope='pool4')
         net = slim.conv2d(net,120,[5,5],1,scope='conv5')
         net = slim.flatten(net, scope='flat6')
         net = slim.fully_connected(net, 84, scope='fc7')
         net = slim.dropout(net, self.dropout,is_training=is_trained, scope='dropout8')
         digits = slim.fully_connected(net, 10, scope='fc9')
     return digits
开发者ID:dcrmg,项目名称:LeNet,代码行数:14,代码来源:lenet.py



注:本文中的tensorflow.contrib.slim.dropout函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python slim.flatten函数代码示例发布时间:2022-05-27
下一篇:
Python slim.conv2d函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap