• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python slim.max_pool2d函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.contrib.slim.max_pool2d函数的典型用法代码示例。如果您正苦于以下问题:Python max_pool2d函数的具体用法?Python max_pool2d怎么用?Python max_pool2d使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了max_pool2d函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: network_det

	def network_det(self,inputs,reuse=False):

		if reuse:
			tf.get_variable_scope().reuse_variables()

		with slim.arg_scope([slim.conv2d, slim.fully_connected],
							 activation_fn = tf.nn.relu,
							 weights_initializer = tf.truncated_normal_initializer(0.0, 0.01)):
			
			conv1 = slim.conv2d(inputs, 96, [11,11], 4, padding= 'VALID', scope='conv1')
			max1 = slim.max_pool2d(conv1, [3,3], 2, padding= 'VALID', scope='max1')

			conv2 = slim.conv2d(max1, 256, [5,5], 1, scope='conv2')
			max2 = slim.max_pool2d(conv2, [3,3], 2, padding= 'VALID', scope='max2')
			conv3 = slim.conv2d(max2, 384, [3,3], 1, scope='conv3')

			conv4 = slim.conv2d(conv3, 384, [3,3], 1, scope='conv4')
			conv5 = slim.conv2d(conv4, 256, [3,3], 1, scope='conv5')
			pool5 = slim.max_pool2d(conv5, [3,3], 2, padding= 'VALID', scope='pool5')
			
			shape = int(np.prod(pool5.get_shape()[1:]))
			fc6 = slim.fully_connected(tf.reshape(pool5, [-1, shape]), 4096, scope='fc6')
			
			fc_detection = slim.fully_connected(fc6, 512, scope='fc_det1')
			out_detection = slim.fully_connected(fc_detection, 2, scope='fc_det2', activation_fn = None)
			
		return out_detection
开发者ID:dmehr,项目名称:HyperFace-TensorFlow-implementation,代码行数:27,代码来源:model_prediction.py


示例2: create_test_network_2

def create_test_network_2():
  """Aligned network for test.

  The graph corresponds to a variation to the example from the second figure in
  go/cnn-rf-computation#arbitrary-computation-graphs. Layers 2 and 3 are changed
  to max-pooling operations. Since the functionality is the same as convolution,
  the network is aligned and the receptive field size is the same as from the
  network created using create_test_network_1().

  Returns:
    g: Tensorflow graph object (Graph proto).
  """
  g = ops.Graph()
  with g.as_default():
    # An input test image with unknown spatial resolution.
    x = array_ops.placeholder(
        dtypes.float32, (None, None, None, 1), name='input_image')
    # Left branch.
    l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
    # Right branch.
    l2_pad = array_ops.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]])
    l2 = slim.max_pool2d(l2_pad, [3, 3], stride=2, scope='L2', padding='VALID')
    l3 = slim.max_pool2d(l2, [1, 1], stride=2, scope='L3', padding='VALID')
    # Addition.
    nn.relu(l1 + l3, name='output')
  return g
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:26,代码来源:receptive_field_test.py


示例3: localization_VGG16

	def localization_VGG16(self,inputs):

		with tf.variable_scope('localization_network'):
			with slim.arg_scope([slim.conv2d, slim.fully_connected],
								 activation_fn = tf.nn.relu,
								 weights_initializer = tf.constant_initializer(0.0)):
				
				net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')
				net = slim.max_pool2d(net, [2, 2], scope='pool1')
				net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
				net = slim.max_pool2d(net, [2, 2], scope='pool2')
				net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
				net = slim.max_pool2d(net, [2, 2], scope='pool3')
				net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')
				net = slim.max_pool2d(net, [2, 2], scope='pool4')
				net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5')
				net = slim.max_pool2d(net, [2, 2], scope='pool5')
				shape = int(np.prod(net.get_shape()[1:]))

				net = slim.fully_connected(tf.reshape(net, [-1, shape]), 4096, scope='fc6')
				net = slim.fully_connected(net, 1024, scope='fc7')
				identity = np.array([[1., 0., 0.],
									[0., 1., 0.]])
				identity = identity.flatten()
				net = slim.fully_connected(net, 6, biases_initializer = tf.constant_initializer(identity) , scope='fc8')
			
		return net
开发者ID:dmehr,项目名称:HyperFace-TensorFlow-implementation,代码行数:27,代码来源:model.py


示例4: conv_net_kelz

def conv_net_kelz(inputs):
  """Builds the ConvNet from Kelz 2016."""
  with slim.arg_scope(
      [slim.conv2d, slim.fully_connected],
      activation_fn=tf.nn.relu,
      weights_initializer=tf.contrib.layers.variance_scaling_initializer(
          factor=2.0, mode='FAN_AVG', uniform=True)):
    net = slim.conv2d(
        inputs, 32, [3, 3], scope='conv1', normalizer_fn=slim.batch_norm)

    net = slim.conv2d(
        net, 32, [3, 3], scope='conv2', normalizer_fn=slim.batch_norm)
    net = slim.max_pool2d(net, [1, 2], stride=[1, 2], scope='pool2')
    net = slim.dropout(net, 0.25, scope='dropout2')

    net = slim.conv2d(
        net, 64, [3, 3], scope='conv3', normalizer_fn=slim.batch_norm)
    net = slim.max_pool2d(net, [1, 2], stride=[1, 2], scope='pool3')
    net = slim.dropout(net, 0.25, scope='dropout3')

    # Flatten while preserving batch and time dimensions.
    dims = tf.shape(net)
    net = tf.reshape(net, (dims[0], dims[1],
                           net.shape[2].value * net.shape[3].value), 'flatten4')

    net = slim.fully_connected(net, 512, scope='fc5')
    net = slim.dropout(net, 0.5, scope='dropout5')

    return net
开发者ID:Alice-ren,项目名称:magenta,代码行数:29,代码来源:model.py


示例5: localization_squeezenet

	def localization_squeezenet(self, inputs):

		with tf.variable_scope('localization_network'):	
			with slim.arg_scope([slim.conv2d], activation_fn = tf.nn.relu,
									padding = 'SAME',
									weights_initializer = tf.constant_initializer(0.0)):

				conv1 = slim.conv2d(inputs, 64, [3,3], 2, padding = 'VALID', scope='conv1')
				pool1 = slim.max_pool2d(conv1, [2,2], 2, scope='pool1')
				fire2 = self.fire_module(pool1, 16, 64, scope = 'fire2')
				fire3 = self.fire_module(fire2, 16, 64, scope = 'fire3', res_connection=True)
				fire4 = self.fire_module(fire3, 32, 128, scope = 'fire4')
				pool4 = slim.max_pool2d(fire4, [2,2], 2, scope='pool4')
				fire5 = self.fire_module(pool4, 32, 128, scope = 'fire5', res_connection=True)
				fire6 = self.fire_module(fire5, 48, 192, scope = 'fire6')
				fire7 = self.fire_module(fire6, 48, 192, scope = 'fire7', res_connection=True)
				fire8 = self.fire_module(fire7, 64, 256, scope = 'fire8')
				pool8 = slim.max_pool2d(fire8, [2,2], 2, scope='pool8')
				fire9 = self.fire_module(pool8, 64, 256, scope = 'fire9', res_connection=True)
				conv10 = slim.conv2d(fire9, 128, [1,1], 1, scope='conv10')
				shape = int(np.prod(conv10.get_shape()[1:]))
				identity = np.array([[1., 0., 0.],
									[0., 1., 0.]])
				identity = identity.flatten()
				fc11 = slim.fully_connected(tf.reshape(conv10, [-1, shape]), 6, biases_initializer = tf.constant_initializer(identity), scope='fc11')
		return fc11
开发者ID:dmehr,项目名称:HyperFace-TensorFlow-implementation,代码行数:26,代码来源:model.py


示例6: build_arch_baseline

def build_arch_baseline(input, is_train: bool, num_classes: int):

    bias_initializer = tf.truncated_normal_initializer(
        mean=0.0, stddev=0.01)  # tf.constant_initializer(0.0)
    # The paper didnot mention any regularization, a common l2 regularizer to weights is added here
    weights_regularizer = tf.contrib.layers.l2_regularizer(5e-04)

    tf.logging.info('input shape: {}'.format(input.get_shape()))

    # weights_initializer=initializer,
    with slim.arg_scope([slim.conv2d, slim.fully_connected], trainable=is_train, biases_initializer=bias_initializer, weights_regularizer=weights_regularizer):
        with tf.variable_scope('relu_conv1') as scope:
            output = slim.conv2d(input, num_outputs=32, kernel_size=[
                                 5, 5], stride=1, padding='SAME', scope=scope, activation_fn=tf.nn.relu)
            output = slim.max_pool2d(output, [2, 2], scope='max_2d_layer1')

            tf.logging.info('output shape: {}'.format(output.get_shape()))

        with tf.variable_scope('relu_conv2') as scope:
            output = slim.conv2d(output, num_outputs=64, kernel_size=[
                                 5, 5], stride=1, padding='SAME', scope=scope, activation_fn=tf.nn.relu)
            output = slim.max_pool2d(output, [2, 2], scope='max_2d_layer2')

            tf.logging.info('output shape: {}'.format(output.get_shape()))

        output = slim.flatten(output)
        output = slim.fully_connected(output, 1024, scope='relu_fc3', activation_fn=tf.nn.relu)
        tf.logging.info('output shape: {}'.format(output.get_shape()))
        output = slim.dropout(output, 0.5, scope='dp')
        output = slim.fully_connected(output, num_classes, scope='final_layer', activation_fn=None)
        tf.logging.info('output shape: {}'.format(output.get_shape()))
        return output
开发者ID:lzqkean,项目名称:deep_learning,代码行数:32,代码来源:capsnet_em.py


示例7: make_tower

 def make_tower(net):
     net = slim.conv2d(net, 20, [5, 5], padding='VALID', scope='conv1')
     net = slim.max_pool2d(net, [2, 2], padding='VALID', scope='pool1')
     net = slim.conv2d(net, 50, [5, 5], padding='VALID', scope='conv2')
     net = slim.max_pool2d(net, [2, 2], padding='VALID', scope='pool2')
     net = slim.flatten(net)
     net = slim.fully_connected(net, 500, scope='fc1')
     net = slim.fully_connected(net, 2, activation_fn=None, scope='fc2')
     return net
开发者ID:Dasona,项目名称:DIGITS,代码行数:9,代码来源:siamese-TF.py


示例8: build_graph

def build_graph(top_k):
    keep_prob = tf.placeholder(dtype=tf.float32, shape=[], name='keep_prob')
    images = tf.placeholder(dtype=tf.float32, shape=[None, 64, 64, 1], name='image_batch')
    labels = tf.placeholder(dtype=tf.int64, shape=[None], name='label_batch')
    is_training = tf.placeholder(dtype=tf.bool, shape=[], name='train_flag')
    with tf.device('/gpu:0'):
        with slim.arg_scope([slim.conv2d, slim.fully_connected],
                            normalizer_fn=slim.batch_norm,
                            normalizer_params={'is_training': is_training}):
            conv3_1 = slim.conv2d(images, 64, [3, 3], 1, padding='SAME', scope='conv3_1')
            max_pool_1 = slim.max_pool2d(conv3_1, [2, 2], [2, 2], padding='SAME', scope='pool1')
            conv3_2 = slim.conv2d(max_pool_1, 128, [3, 3], padding='SAME', scope='conv3_2')
            max_pool_2 = slim.max_pool2d(conv3_2, [2, 2], [2, 2], padding='SAME', scope='pool2')
            conv3_3 = slim.conv2d(max_pool_2, 256, [3, 3], padding='SAME', scope='conv3_3')
            max_pool_3 = slim.max_pool2d(conv3_3, [2, 2], [2, 2], padding='SAME', scope='pool3')
            conv3_4 = slim.conv2d(max_pool_3, 512, [3, 3], padding='SAME', scope='conv3_4')
            conv3_5 = slim.conv2d(conv3_4, 512, [3, 3], padding='SAME', scope='conv3_5')
            max_pool_4 = slim.max_pool2d(conv3_5, [2, 2], [2, 2], padding='SAME', scope='pool4')

            flatten = slim.flatten(max_pool_4)
            fc1 = slim.fully_connected(slim.dropout(flatten, keep_prob), 1024,
                                       activation_fn=tf.nn.relu, scope='fc1')
            logits = slim.fully_connected(slim.dropout(fc1, keep_prob), FLAGS.charset_size, activation_fn=None,
                                          scope='fc2')
        loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels))
        accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(logits, 1), labels), tf.float32))

        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        if update_ops:
            updates = tf.group(*update_ops)
            loss = control_flow_ops.with_dependencies([updates], loss)

        global_step = tf.get_variable("step", [], initializer=tf.constant_initializer(0.0), trainable=False)
        optimizer = tf.train.AdamOptimizer(learning_rate=0.1)
        train_op = slim.learning.create_train_op(loss, optimizer, global_step=global_step)
        probabilities = tf.nn.softmax(logits)

        tf.summary.scalar('loss', loss)
        tf.summary.scalar('accuracy', accuracy)
        merged_summary_op = tf.summary.merge_all()
        predicted_val_top_k, predicted_index_top_k = tf.nn.top_k(probabilities, k=top_k)
        accuracy_in_top_k = tf.reduce_mean(tf.cast(tf.nn.in_top_k(probabilities, labels, top_k), tf.float32))

    return {'images': images,
            'labels': labels,
            'keep_prob': keep_prob,
            'top_k': top_k,
            'global_step': global_step,
            'train_op': train_op,
            'loss': loss,
            'is_training': is_training,
            'accuracy': accuracy,
            'accuracy_top_k': accuracy_in_top_k,
            'merged_summary_op': merged_summary_op,
            'predicted_distribution': probabilities,
            'predicted_index_top_k': predicted_index_top_k,
            'predicted_val_top_k': predicted_val_top_k}
开发者ID:oraSC,项目名称:Chinese-Character-Recognition,代码行数:57,代码来源:chinese_character_recognition_bn.py


示例9: row_column_max_pooling

def row_column_max_pooling(bottom, prefix='', window=(7, 7)):
    column_mx = slim.max_pool2d(bottom, [window[0], 1],
        stride=[window[0], 1], scope=prefix + '_column_max')
    row_mx = slim.max_pool2d(bottom, [1, window[1]],
        stride=[1, window[1]], scope=prefix + '_row_max')

    column_mean = slim.avg_pool2d(column_mx, [1, window[1]],
        stride=[1, window[1]], scope=prefix + '_column_mean')
    row_mean = slim.avg_pool2d(row_mx, [window[0], 1],
        stride=[window[0], 1], scope=prefix + '_row_mean')

    return row_mean + column_mean
开发者ID:Zumbalamambo,项目名称:light_head_rcnn,代码行数:12,代码来源:rfcn_plus_plus_opr.py


示例10: _build_network

  def _build_network(self, sess, is_training=True):
    with tf.variable_scope('vgg_16', 'vgg_16'):
      # select initializers
      if cfg.TRAIN.TRUNCATED:
        initializer = tf.truncated_normal_initializer(mean=0.0, stddev=0.01)
        initializer_bbox = tf.truncated_normal_initializer(mean=0.0, stddev=0.001)
      else:
        initializer = tf.random_normal_initializer(mean=0.0, stddev=0.01)
        initializer_bbox = tf.random_normal_initializer(mean=0.0, stddev=0.001)

      net = slim.repeat(self._image, 2, slim.conv2d, 64, [3, 3],
                        trainable=False, scope='conv1')
      net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='pool1')
      net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3],
                        trainable=False, scope='conv2')
      net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='pool2')
      net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3],
                        trainable=is_training, scope='conv3')
      net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='pool3')
      net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3],
                        trainable=is_training, scope='conv4')
      net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='pool4')
      net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3],
                        trainable=is_training, scope='conv5')
      self._act_summaries.append(net)
      self._layers['head'] = net
      # build the anchors for the image
      self._anchor_component()
      # region proposal network
      rois = self._region_proposal(net, is_training, initializer)
      # region of interest pooling
      if cfg.POOLING_MODE == 'crop':
        pool5 = self._crop_pool_layer(net, rois, "pool5")
      else:
        raise NotImplementedError

      pool5_flat = slim.flatten(pool5, scope='flatten')
      fc6 = slim.fully_connected(pool5_flat, 4096, scope='fc6')
      if is_training:
        fc6 = slim.dropout(fc6, keep_prob=0.5, is_training=True, scope='dropout6')
      fc7 = slim.fully_connected(fc6, 4096, scope='fc7')
      if is_training:
        fc7 = slim.dropout(fc7, keep_prob=0.5, is_training=True, scope='dropout7')
      # region classification
      cls_prob, bbox_pred = self._region_classification(fc7, 
                                                        is_training, 
                                                        initializer, 
                                                        initializer_bbox)

      self._score_summaries.update(self._predictions)

      return rois, cls_prob, bbox_pred
开发者ID:deeplxx,项目名称:tf-faster-rcnn,代码行数:52,代码来源:vgg16.py


示例11: construct_net

 def construct_net(self,is_trained = True):
     with slim.arg_scope([slim.conv2d], padding='VALID',
                         weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
                         weights_regularizer=slim.l2_regularizer(0.0005)):
         net = slim.conv2d(self.input_images,6,[5,5],1,padding='SAME',scope='conv1')
         net = slim.max_pool2d(net, [2, 2], scope='pool2')
         net = slim.conv2d(net,16,[5,5],1,scope='conv3')
         net = slim.max_pool2d(net, [2, 2], scope='pool4')
         net = slim.conv2d(net,120,[5,5],1,scope='conv5')
         net = slim.flatten(net, scope='flat6')
         net = slim.fully_connected(net, 84, scope='fc7')
         net = slim.dropout(net, self.dropout,is_training=is_trained, scope='dropout8')
         digits = slim.fully_connected(net, 10, scope='fc9')
     return digits
开发者ID:dcrmg,项目名称:LeNet,代码行数:14,代码来源:lenet.py


示例12: network

def network(inputs):
    '''Define the network'''
    with slim.arg_scope([slim.conv2d, slim.fully_connected],
                      activation_fn=tf.nn.relu,
                      weights_initializer=tf.truncated_normal_initializer(0.0, 0.01),
                      weights_regularizer=slim.l2_regularizer(0.0005)):
        net = tf.reshape(inputs,[-1,FLAGS.im_size ,FLAGS.im_size,3])
        net = slim.conv2d(net, 32, [3,3], scope='conv1')
        net = slim.max_pool2d(net, [4,4], scope = 'conv1')
        net = slim.conv2d(net,128,[3,3], scope = 'conv2')
        net = slim.max_pool2d(net,[4,4], scope = 'pool2')
        net = slim.flatten(net)
        net = slim.fully_connected(net,64, scope = 'fc')
        net = slim.fully_connected(net, n_classes, activation_fn = None, scope = 'output')
    return net
开发者ID:lun5,项目名称:tissue-component-classification,代码行数:15,代码来源:task.py


示例13: create_test_network

def create_test_network():
  """Convolutional neural network for test.

  Returns:
    g: Tensorflow graph object (Graph proto).
  """
  g = ops.Graph()
  with g.as_default():
    # An input test image with unknown spatial resolution.
    x = array_ops.placeholder(
        dtypes.float32, (None, None, None, 1), name='input_image')
    # Left branch before first addition.
    l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
    # Right branch before first addition.
    l2_pad = array_ops.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]], name='L2_pad')
    l2 = slim.conv2d(l2_pad, 1, [3, 3], stride=2, scope='L2', padding='VALID')
    l3 = slim.max_pool2d(l2, [3, 3], stride=2, scope='L3', padding='SAME')
    # First addition.
    l4 = nn.relu(l1 + l3, name='L4_relu')
    # Left branch after first addition.
    l5 = slim.conv2d(l4, 1, [1, 1], stride=2, scope='L5', padding='SAME')
    # Right branch after first addition.
    l6 = slim.conv2d(l4, 1, [3, 3], stride=2, scope='L6', padding='SAME')
    # Final addition.
    gen_math_ops.add(l5, l6, name='L7_add')

  return g
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:27,代码来源:graph_compute_order_test.py


示例14: AddMaxPool

  def AddMaxPool(self, prev_layer, index):
    """Add a maxpool layer.

    Args:
      prev_layer: Input tensor.
      index:      Position in model_str to start parsing

    Returns:
      Output tensor, end index in model_str.
    """
    pattern = re.compile(R'(Mp)({\w+})?(\d+),(\d+)(?:,(\d+),(\d+))?')
    m = pattern.match(self.model_str, index)
    if m is None:
      return None, None
    name = self._GetLayerName(m.group(0), index, m.group(2))
    height = int(m.group(3))
    width = int(m.group(4))
    y_stride = height if m.group(5) is None else m.group(5)
    x_stride = width if m.group(6) is None else m.group(6)
    self.reduction_factors[1] *= y_stride
    self.reduction_factors[2] *= x_stride
    return slim.max_pool2d(
        prev_layer, [height, width], [y_stride, x_stride],
        padding='SAME',
        scope=name), m.end()
开发者ID:Peratham,项目名称:models,代码行数:25,代码来源:vgslspecs.py


示例15: inference

 def inference(self):
     x = tf.reshape(self.x, shape=[-1, self.input_shape[0], self.input_shape[1], self.input_shape[2]])
     # scale (divide by MNIST std)
     x = x * 0.0125
     with slim.arg_scope([slim.conv2d, slim.fully_connected],
                         weights_initializer=tf.contrib.layers.xavier_initializer(),
                         weights_regularizer=slim.l2_regularizer(0.0005)):
         model = slim.conv2d(x, 20, [5, 5], padding='VALID', scope='conv1')
         model = slim.max_pool2d(model, [2, 2], padding='VALID', scope='pool1')
         model = slim.conv2d(model, 50, [5, 5], padding='VALID', scope='conv2')
         model = slim.max_pool2d(model, [2, 2], padding='VALID', scope='pool2')
         model = slim.flatten(model)
         model = slim.fully_connected(model, 500, scope='fc1')
         model = slim.dropout(model, 0.5, is_training=self.is_training, scope='do1')
         model = slim.fully_connected(model, self.nclasses, activation_fn=None, scope='fc2')
         return model
开发者ID:Dasona,项目名称:DIGITS,代码行数:16,代码来源:lenet.py


示例16: _build_base

  def _build_base(self):
    with tf.variable_scope(self._scope, self._scope):
      net = resnet_utils.conv2d_same(self._image, 64, 7, stride=2, scope='conv1')
      net = tf.pad(net, [[0, 0], [1, 1], [1, 1], [0, 0]])
      net = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID', scope='pool1')

    return net
开发者ID:StanislawAntol,项目名称:tf-faster-rcnn,代码行数:7,代码来源:resnet_v1.py


示例17: build_feature_pyramid

    def build_feature_pyramid(self):

        '''
        reference: https://github.com/CharlesShang/FastMaskRCNN
        build P2, P3, P4, P5, P6
        :return: multi-scale feature map
        '''

        feature_pyramid = {}
        with tf.variable_scope('feature_pyramid'):
            with slim.arg_scope([slim.conv2d], weights_regularizer=slim.l2_regularizer(self.rpn_weight_decay)):
                feature_pyramid['P5'] = slim.conv2d(self.feature_maps_dict['C5'],
                                                    num_outputs=256,
                                                    kernel_size=[1, 1],
                                                    stride=1,
                                                    scope='build_P5')

                feature_pyramid['P6'] = slim.max_pool2d(feature_pyramid['P5'],
                                                        kernel_size=[2, 2], stride=2, scope='build_P6')
                # P6 is down sample of P5

                for layer in range(4, 1, -1):
                    p, c = feature_pyramid['P' + str(layer + 1)], self.feature_maps_dict['C' + str(layer)]
                    up_sample_shape = tf.shape(c)
                    up_sample = tf.image.resize_nearest_neighbor(p, [up_sample_shape[1], up_sample_shape[2]],
                                                                 name='build_P%d/up_sample_nearest_neighbor' % layer)

                    c = slim.conv2d(c, num_outputs=256, kernel_size=[1, 1], stride=1,
                                    scope='build_P%d/reduce_dimension' % layer)
                    p = up_sample + c
                    p = slim.conv2d(p, 256, kernel_size=[3, 3], stride=1,
                                    padding='SAME', scope='build_P%d/avoid_aliasing' % layer)
                    feature_pyramid['P' + str(layer)] = p

        return feature_pyramid
开发者ID:mbossX,项目名称:RRPN_FPN_Tensorflow,代码行数:35,代码来源:build_rpn.py


示例18: create_test_network

def create_test_network():
  """Convolutional neural network for test.

  Returns:
    name_to_node: Dict keyed by node name, each entry containing the node's
      NodeDef.
  """
  g = ops.Graph()
  with g.as_default():
    # An input test image with unknown spatial resolution.
    x = array_ops.placeholder(
        dtypes.float32, (None, None, None, 1), name='input_image')
    # Left branch before first addition.
    l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
    # Right branch before first addition.
    l2_pad = array_ops.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]], name='L2_pad')
    l2 = slim.conv2d(l2_pad, 1, [3, 3], stride=2, scope='L2', padding='VALID')
    l3 = slim.max_pool2d(l2, [3, 3], stride=2, scope='L3', padding='SAME')
    # First addition.
    l4 = nn.relu(l1 + l3, name='L4_relu')
    # Left branch after first addition.
    l5 = slim.conv2d(l4, 1, [1, 1], stride=2, scope='L5', padding='SAME')
    # Right branch after first addition.
    l6 = slim.conv2d(l4, 1, [3, 3], stride=2, scope='L6', padding='SAME')
    # Final addition.
    gen_math_ops.add(l5, l6, name='L7_add')

  name_to_node = graph_compute_order.parse_graph_nodes(g.as_graph_def())
  return name_to_node
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:29,代码来源:parse_layer_parameters_test.py


示例19: build_backbones

 def build_backbones(self):
     inputs = self.inputs
     with slim.arg_scope([slim.conv2d, slim.fully_connected],
                         padding='SAME', weights_initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.01),
                         weights_regularizer=slim.l2_regularizer(0.0005),
                         activation_fn=tf.nn.relu):
         net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')
         net = slim.max_pool2d(net, [2, 2], scope='pool1')
         net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
         net = slim.max_pool2d(net, [2, 2], scope='pool2')
         net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
         net = slim.max_pool2d(net, [2, 2], scope='pool3')
         net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')
         net = slim.max_pool2d(net, [2, 2], scope='pool4')
         net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5')
         net = slim.max_pool2d(net, [2, 2], scope='pool5')
         self.vgg_head = net
开发者ID:jacke121,项目名称:tf_rfcn,代码行数:17,代码来源:vgg16_rfcn.py


示例20: build_model

    def build_model(self, input_image, center_map, batch_size):
        self.batch_size = batch_size
        self.input_image = input_image
        self.center_map = center_map
        with tf.variable_scope('pooled_center_map'):
            # center map is a gaussion template which gather the respose
            self.center_map = slim.avg_pool2d(self.center_map,
                                              [9, 9], stride=8,
                                              padding='SAME',
                                              scope='center_map')

        with slim.arg_scope([slim.conv2d],
                            padding='SAME',
                            activation_fn=tf.nn.relu,
                            weights_initializer=tf.contrib.layers.xavier_initializer()):
            with tf.variable_scope('sub_stages'):
                net = slim.conv2d(input_image, 64, [3, 3], scope='sub_conv1')
                net = slim.conv2d(net, 64, [3, 3], scope='sub_conv2')
                net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='sub_pool1')
                net = slim.conv2d(net, 128, [3, 3], scope='sub_conv3')
                net = slim.conv2d(net, 128, [3, 3], scope='sub_conv4')
                net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='sub_pool2')
                net = slim.conv2d(net, 256, [3, 3], scope='sub_conv5')
                net = slim.conv2d(net, 256, [3, 3], scope='sub_conv6')
                net = slim.conv2d(net, 256, [3, 3], scope='sub_conv7')
                net = slim.conv2d(net, 256, [3, 3], scope='sub_conv8')
                net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='sub_pool3')
                net = slim.conv2d(net, 512, [3, 3], scope='sub_conv9')
                net = slim.conv2d(net, 512, [3, 3], scope='sub_conv10')
                net = slim.conv2d(net, 512, [3, 3], scope='sub_conv11')
                net = slim.conv2d(net, 512, [3, 3], scope='sub_conv12')
                net = slim.conv2d(net, 512, [3, 3], scope='sub_conv13')
                net = slim.conv2d(net, 512, [3, 3], scope='sub_conv14')

                self.sub_stage_img_feature = slim.conv2d(net, 128, [3, 3],
                                                         scope='sub_stage_img_feature')

            with tf.variable_scope('stage_1'):
                conv1 = slim.conv2d(self.sub_stage_img_feature, 512, [1, 1],
                                    scope='conv1')
                self.stage_heatmap.append(slim.conv2d(conv1, self.joints, [1, 1],
                                                      scope='stage_heatmap'))

            for stage in range(2, self.stages + 1):
                self._middle_conv(stage)
开发者ID:HumbleBee14,项目名称:convolutional-pose-machines-tensorflow,代码行数:45,代码来源:cpm_hand_slim.py



注:本文中的tensorflow.contrib.slim.max_pool2d函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python resnet_utils.resnet_arg_scope函数代码示例发布时间:2022-05-27
下一篇:
Python slim.l2_regularizer函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap