• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python layers.conv2d函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.contrib.layers.conv2d函数的典型用法代码示例。如果您正苦于以下问题:Python conv2d函数的具体用法?Python conv2d怎么用?Python conv2d使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了conv2d函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: conv2d_same

def conv2d_same(inputs, num_outputs, kernel_size, stride, rate=1, scope=None):
  """Strided 2-D convolution with 'SAME' padding.

  When stride > 1, then we do explicit zero-padding, followed by conv2d with
  'VALID' padding.

  Note that

     net = conv2d_same(inputs, num_outputs, 3, stride=stride)

  is equivalent to

     net = tf.contrib.layers.conv2d(inputs, num_outputs, 3, stride=1,
     padding='SAME')
     net = subsample(net, factor=stride)

  whereas

     net = tf.contrib.layers.conv2d(inputs, num_outputs, 3, stride=stride,
     padding='SAME')

  is different when the input's height or width is even, which is why we add the
  current function. For more details, see ResnetUtilsTest.testConv2DSameEven().

  Args:
    inputs: A 4-D tensor of size [batch, height_in, width_in, channels].
    num_outputs: An integer, the number of output filters.
    kernel_size: An int with the kernel_size of the filters.
    stride: An integer, the output stride.
    rate: An integer, rate for atrous convolution.
    scope: Scope.

  Returns:
    output: A 4-D tensor of size [batch, height_out, width_out, channels] with
      the convolution output.
  """
  if stride == 1:
    return layers_lib.conv2d(
        inputs,
        num_outputs,
        kernel_size,
        stride=1,
        rate=rate,
        padding='SAME',
        scope=scope)
  else:
    kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
    pad_total = kernel_size_effective - 1
    pad_beg = pad_total // 2
    pad_end = pad_total - pad_beg
    inputs = array_ops.pad(
        inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
    return layers_lib.conv2d(
        inputs,
        num_outputs,
        kernel_size,
        stride=stride,
        rate=rate,
        padding='VALID',
        scope=scope)
开发者ID:LUTAN,项目名称:tensorflow,代码行数:60,代码来源:resnet_utils.py


示例2: conv_model

def conv_model(X, Y_, mode):
    XX = tf.reshape(X, [-1, 28, 28, 1])
    biasInit = tf.constant_initializer(0.1, dtype=tf.float32)
    Y1 = layers.conv2d(XX,  num_outputs=6,  kernel_size=[6, 6], biases_initializer=biasInit)
    Y2 = layers.conv2d(Y1, num_outputs=12, kernel_size=[5, 5], stride=2, biases_initializer=biasInit)
    Y3 = layers.conv2d(Y2, num_outputs=24, kernel_size=[4, 4], stride=2, biases_initializer=biasInit)
    Y4 = layers.flatten(Y3)
    Y5 = layers.relu(Y4, 200, biases_initializer=biasInit)
    # to deactivate dropout on the dense layer, set keep_prob=1
    Y5d = layers.dropout(Y5, keep_prob=0.75, noise_shape=None, is_training=mode==learn.ModeKeys.TRAIN)
    Ylogits = layers.linear(Y5d, 10)
    predict = tf.nn.softmax(Ylogits)
    classes = tf.cast(tf.argmax(predict, 1), tf.uint8)

    loss = conv_model_loss(Ylogits, Y_, mode)
    train_op = conv_model_train_op(loss, mode)
    eval_metrics = conv_model_eval_metrics(classes, Y_, mode)

    return learn.ModelFnOps(
        mode=mode,
        # You can name the fields of your predictions dictionary as you like.
        predictions={"predictions": predict, "classes": classes},
        loss=loss,
        train_op=train_op,
        eval_metric_ops=eval_metrics
    )
开发者ID:spwcd,项目名称:QTML,代码行数:26,代码来源:task.py


示例3: discriminator_stego_nn

    def discriminator_stego_nn(self, img, reuse=False):
        with tf.variable_scope('S_network'):

            if reuse:
                tf.get_variable_scope().reuse_variables()

            net = img
            net = self.image_processing_layer(img)
            net = self.batch_norm(net, scope='d_s_bn0')
            net = conv2d(net, self.df_dim, kernel_size=[5, 5], stride=[2, 2],
                         activation_fn=self.leaky_relu, scope='d_s_h0_conv')

            net = self.batch_norm(net, scope='d_s_bn1')
            net = conv2d(net, self.df_dim * 2, kernel_size=[5, 5], stride=[2, 2],
                         activation_fn=self.leaky_relu, scope='d_s_h1_conv')

            net = self.batch_norm(net, scope='d_s_bn2')
            net = conv2d(net, self.df_dim * 4, kernel_size=[5, 5], stride=[2, 2],
                         activation_fn=self.leaky_relu, scope='d_s_h2_conv')

            net = self.batch_norm(net, scope='d_s_bn3')
            net = conv2d(net, self.df_dim * 8, kernel_size=[5, 5], stride=[2, 2],
                         activation_fn=self.leaky_relu, scope='d_s_h3_conv')

            net = self.batch_norm(net, scope='d_s_bn4')

            net = tf.reshape(net, [self.conf.batch_size, -1])
            net = linear(net, 1, activation_fn=tf.nn.sigmoid, scope='d_s_h4_lin',
                         weights_initializer=tf.random_normal_initializer(stddev=0.02))

            return net
开发者ID:dvolkhonskiy,项目名称:Adversarial-Model-For-Steganography,代码行数:31,代码来源:sgan.py


示例4: vgg_a

def vgg_a(inputs,
          num_classes=1000,
          is_training=True,
          dropout_keep_prob=0.5,
          spatial_squeeze=True,
          scope='vgg_a'):
  """Oxford Net VGG 11-Layers version A Example.

  Note: All the fully_connected layers have been transformed to conv2d layers.
        To use in classification mode, resize input to 224x224.

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    num_classes: number of predicted classes.
    is_training: whether or not the model is being trained.
    dropout_keep_prob: the probability that activations are kept in the dropout
      layers during training.
    spatial_squeeze: whether or not should squeeze the spatial dimensions of the
      outputs. Useful to remove unnecessary dimensions for classification.
    scope: Optional scope for the variables.

  Returns:
    the last op containing the log predictions and end_points dict.
  """
  with variable_scope.variable_scope(scope, 'vgg_a', [inputs]) as sc:
    end_points_collection = sc.original_name_scope + '_end_points'
    # Collect outputs for conv2d, fully_connected and max_pool2d.
    with arg_scope(
        [layers.conv2d, layers_lib.max_pool2d],
        outputs_collections=end_points_collection):
      net = layers_lib.repeat(
          inputs, 1, layers.conv2d, 64, [3, 3], scope='conv1')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')
      net = layers_lib.repeat(net, 1, layers.conv2d, 128, [3, 3], scope='conv2')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')
      net = layers_lib.repeat(net, 2, layers.conv2d, 256, [3, 3], scope='conv3')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool3')
      net = layers_lib.repeat(net, 2, layers.conv2d, 512, [3, 3], scope='conv4')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool4')
      net = layers_lib.repeat(net, 2, layers.conv2d, 512, [3, 3], scope='conv5')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool5')
      # Use conv2d instead of fully_connected layers.
      net = layers.conv2d(net, 4096, [7, 7], padding='VALID', scope='fc6')
      net = layers_lib.dropout(
          net, dropout_keep_prob, is_training=is_training, scope='dropout6')
      net = layers.conv2d(net, 4096, [1, 1], scope='fc7')
      net = layers_lib.dropout(
          net, dropout_keep_prob, is_training=is_training, scope='dropout7')
      net = layers.conv2d(
          net,
          num_classes, [1, 1],
          activation_fn=None,
          normalizer_fn=None,
          scope='fc8')
      # Convert end_points_collection into a end_point dict.
      end_points = utils.convert_collection_to_dict(end_points_collection)
      if spatial_squeeze:
        net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed')
        end_points[sc.name + '/fc8'] = net
      return net, end_points
开发者ID:1000sprites,项目名称:tensorflow,代码行数:60,代码来源:vgg.py


示例5: bottleneck

def bottleneck(inputs,
               depth,
               depth_bottleneck,
               stride,
               rate=1,
               outputs_collections=None,
               scope=None):
  """Bottleneck residual unit variant with BN before convolutions.

  This is the full preactivation residual unit variant proposed in [2]. See
  Fig. 1(b) of [2] for its definition. Note that we use here the bottleneck
  variant which has an extra bottleneck layer.

  When putting together two consecutive ResNet blocks that use this unit, one
  should use stride = 2 in the last unit of the first block.

  Args:
    inputs: A tensor of size [batch, height, width, channels].
    depth: The depth of the ResNet unit output.
    depth_bottleneck: The depth of the bottleneck layers.
    stride: The ResNet unit's stride. Determines the amount of downsampling of
      the units output compared to its input.
    rate: An integer, rate for atrous convolution.
    outputs_collections: Collection to add the ResNet unit output.
    scope: Optional variable_scope.

  Returns:
    The ResNet unit's output.
  """
  with variable_scope.variable_scope(scope, 'bottleneck_v2', [inputs]) as sc:
    depth_in = utils.last_dimension(inputs.get_shape(), min_rank=4)
    preact = layers.batch_norm(
        inputs, activation_fn=nn_ops.relu, scope='preact')
    if depth == depth_in:
      shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
    else:
      shortcut = layers_lib.conv2d(
          preact,
          depth, [1, 1],
          stride=stride,
          normalizer_fn=None,
          activation_fn=None,
          scope='shortcut')

    residual = layers_lib.conv2d(
        preact, depth_bottleneck, [1, 1], stride=1, scope='conv1')
    residual = resnet_utils.conv2d_same(
        residual, depth_bottleneck, 3, stride, rate=rate, scope='conv2')
    residual = layers_lib.conv2d(
        residual,
        depth, [1, 1],
        stride=1,
        normalizer_fn=None,
        activation_fn=None,
        scope='conv3')

    output = shortcut + residual

    return utils.collect_named_outputs(outputs_collections, sc.name, output)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:59,代码来源:resnet_v2.py


示例6: build_atari

def build_atari(minimap, screen, info, msize, ssize, num_action):
  # Extract features
  mconv1 = layers.conv2d(tf.transpose(minimap, [0, 2, 3, 1]),
                         num_outputs=16,
                         kernel_size=8,
                         stride=4,
                         scope='mconv1')
  mconv2 = layers.conv2d(mconv1,
                         num_outputs=32,
                         kernel_size=4,
                         stride=2,
                         scope='mconv2')
  sconv1 = layers.conv2d(tf.transpose(screen, [0, 2, 3, 1]),
                         num_outputs=16,
                         kernel_size=8,
                         stride=4,
                         scope='sconv1')
  sconv2 = layers.conv2d(sconv1,
                         num_outputs=32,
                         kernel_size=4,
                         stride=2,
                         scope='sconv2')
  info_fc = layers.fully_connected(layers.flatten(info),
                                   num_outputs=256,
                                   activation_fn=tf.tanh,
                                   scope='info_fc')

  # Compute spatial actions, non spatial actions and value
  feat_fc = tf.concat([layers.flatten(mconv2), layers.flatten(sconv2), info_fc], axis=1)
  feat_fc = layers.fully_connected(feat_fc,
                                   num_outputs=256,
                                   activation_fn=tf.nn.relu,
                                   scope='feat_fc')

  spatial_action_x = layers.fully_connected(feat_fc,
                                            num_outputs=ssize,
                                            activation_fn=tf.nn.softmax,
                                            scope='spatial_action_x')
  spatial_action_y = layers.fully_connected(feat_fc,
                                            num_outputs=ssize,
                                            activation_fn=tf.nn.softmax,
                                            scope='spatial_action_y')
  spatial_action_x = tf.reshape(spatial_action_x, [-1, 1, ssize])
  spatial_action_x = tf.tile(spatial_action_x, [1, ssize, 1])
  spatial_action_y = tf.reshape(spatial_action_y, [-1, ssize, 1])
  spatial_action_y = tf.tile(spatial_action_y, [1, 1, ssize])
  spatial_action = layers.flatten(spatial_action_x * spatial_action_y)

  non_spatial_action = layers.fully_connected(feat_fc,
                                              num_outputs=num_action,
                                              activation_fn=tf.nn.softmax,
                                              scope='non_spatial_action')
  value = tf.reshape(layers.fully_connected(feat_fc,
                                            num_outputs=1,
                                            activation_fn=None,
                                            scope='value'), [-1])

  return spatial_action, non_spatial_action, value
开发者ID:fanyp17,项目名称:pysc2-agents,代码行数:58,代码来源:network.py


示例7: conv_model

def conv_model(X, Y_):
   XX = tf.reshape(X, [-1, 28, 28, 1])
   Y1 = layers.conv2d(XX,  num_outputs=6,  kernel_size=[6, 6])
   Y2 = layers.conv2d(Y1,  num_outputs=12, kernel_size=[5, 5], stride=2)
   Y3 = layers.conv2d(Y2,  num_outputs=24, kernel_size=[4, 4], stride=2)
   Y4 = layers.flatten(Y3)
   Y5 = layers.relu(Y4, 200)
   Ylogits = layers.linear(Y5, 10)
   predict = tf.nn.softmax(Ylogits)

   classes = tf.cast(tf.argmax(predict, 1), tf.uint8)
   loss = tf.nn.softmax_cross_entropy_with_logits(Ylogits, tf.one_hot(Y_, 10))
   train_op = layers.optimize_loss(loss, framework.get_global_step(), 0.003, "Adam")
   return {"predictions":predict, "classes": classes}, loss, train_op
开发者ID:pooneetthaper,项目名称:stuff,代码行数:14,代码来源:hlversion.py


示例8: conv_model

def conv_model(X, Y_, mode):
    XX = tf.reshape(X, [-1, 28, 28, 1])
    biasInit = tf.constant_initializer(0.1, dtype=tf.float32)
    Y1 = layers.conv2d(XX,  num_outputs=6,  kernel_size=[6, 6], biases_initializer=biasInit)
    Y2 = layers.conv2d(Y1, num_outputs=12, kernel_size=[5, 5], stride=2, biases_initializer=biasInit)
    Y3 = layers.conv2d(Y2, num_outputs=24, kernel_size=[4, 4], stride=2, biases_initializer=biasInit)
    Y4 = layers.flatten(Y3)
    Y5 = layers.relu(Y4, 200, biases_initializer=biasInit)
    Ylogits = layers.linear(Y5, 10)
    predict = tf.nn.softmax(Ylogits)
    classes = tf.cast(tf.argmax(predict, 1), tf.uint8)
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(Ylogits, tf.one_hot(Y_, 10)))*100
    train_op = layers.optimize_loss(loss, framework.get_global_step(), 0.001, "Adam")
    return {"predictions":predict, "classes": classes}, loss, train_op
开发者ID:laventura,项目名称:tensorflow-mnist-tutorial,代码行数:14,代码来源:task.py


示例9: resBlock

def resBlock(x, num_outputs, kernel_size = 4, stride=1, activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm, scope=None):
    assert num_outputs%2==0 #num_outputs must be divided by channel_factor(2 here)
    with tf.variable_scope(scope, 'resBlock'):
        shortcut = x
        if stride != 1 or x.get_shape()[3] != num_outputs:
            shortcut = tcl.conv2d(shortcut, num_outputs, kernel_size=1, stride=stride, 
                        activation_fn=None, normalizer_fn=None, scope='shortcut')
        x = tcl.conv2d(x, num_outputs/2, kernel_size=1, stride=1, padding='SAME')
        x = tcl.conv2d(x, num_outputs/2, kernel_size=kernel_size, stride=stride, padding='SAME')
        x = tcl.conv2d(x, num_outputs, kernel_size=1, stride=1, activation_fn=None, padding='SAME', normalizer_fn=None)

        x += shortcut       
        x = normalizer_fn(x)
        x = activation_fn(x)
    return x
开发者ID:adong7639,项目名称:PRNet,代码行数:15,代码来源:predictor.py


示例10: __call__

	def __call__(self, x, reuse=False):
		with tf.variable_scope(self.name) as scope:
			if reuse:
				scope.reuse_variables()
			size = 64
			shared = tcl.conv2d(x, num_outputs=size, kernel_size=5, # bzx28x28x1 -> bzx14x14x64
						stride=2, activation_fn=tf.nn.relu)
			shared = tcl.conv2d(shared, num_outputs=size * 2, kernel_size=5, # 7x7x128
						stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm)
			shared = tcl.fully_connected(tcl.flatten( # reshape, 1
						shared), 1024, activation_fn=tf.nn.relu)
			
			#c = tcl.fully_connected(shared, 128, activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm)
			c = tcl.fully_connected(shared, 10, activation_fn=None) # 10 classes
			return c
开发者ID:1202kbs,项目名称:GAN,代码行数:15,代码来源:nets.py


示例11: __call__

	def __call__(self, x, reuse=True):
		with tf.variable_scope(self.name) as scope:
			if reuse:
				scope.reuse_variables()
			size = 64
			d = tcl.conv2d(x, num_outputs=size, kernel_size=3, # bzx64x64x3 -> bzx32x32x64
						stride=2, activation_fn=lrelu)
			d = tcl.conv2d(d, num_outputs=size * 2, kernel_size=3, # 16x16x128
						stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm)
			d = tcl.conv2d(d, num_outputs=size * 4, kernel_size=3, # 8x8x256
						stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm)
			d = tcl.conv2d(d, num_outputs=size * 8, kernel_size=3, # 4x4x512
						stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm)
			d = tcl.fully_connected(tcl.flatten( # reshape, 1
						d), 1, activation_fn=None)
			return d
开发者ID:1202kbs,项目名称:GAN,代码行数:16,代码来源:nets.py


示例12: get_bottlenet

    def get_bottlenet(self):
        """Form the network"""
        # get collections
        bottlelayer = namedtuple("bottlelayer",
                              ['kernel_shape','stride','bn_flag','padding','act_fn'])
        with tf.name_scope(self.scope):
            input_now = self.inputs
            for i, kernel in enumerate(self.bottle_params):
                with tf.name_scope('bottle_sub'+str('i')):
                    kernel = bottlelayer._make(kernel)
                    with tf.name_scope('conv2d'):
                        residual = conv2d(
                            inputs=input_now,
                            num_outputs=kernel.kernel_shape[-1],
                            kernel_size=kernel.kernel_shape[0:2],
                            padding=kernel.padding,
                            stride=kernel.stride,
                            )
                    if kernel.bn_flag:
                        residual = utils.get_batch_norm(residual,
                                                        self.is_training,
                                                        scope='batch_norm')
                    if kernel.act_fn is not None:
                        with tf.name_scope('activate'):
                            residual = kernel.act_fn(residual)
                    input_now = residual
            # add shortcut
            self.get_shortcut(self.stride,scope=self.scope+'_shortcut')
            residual = residual + self.shortcut
            if self.summary_flag:
                tf.summary.histogram('bottle_residual', residual)

        return residual
开发者ID:praveenkumarchandaliya,项目名称:res_ae,代码行数:33,代码来源:Bottleneck.py


示例13: autoencoder

def autoencoder(inputs):
    # encoder
    # 32 x 32 x 1   ->  16 x 16 x 32
    # 16 x 16 x 32  ->  8 x 8 x 16
    # 8 x 8 x 16    ->  2 x 2 x 8
    net = lays.conv2d(inputs, 32, [5, 5], stride=2, padding='SAME')
    net = lays.conv2d(net, 16, [5, 5], stride=2, padding='SAME')
    net = lays.conv2d(net, 8, [5, 5], stride=4, padding='SAME')
    # decoder
    # 2 x 2 x 8    ->  8 x 8 x 16
    # 8 x 8 x 16   ->  16 x 16 x 32
    # 16 x 16 x 32  ->  32 x 32 x 1
    net = lays.conv2d_transpose(net, 16, [5, 5], stride=4, padding='SAME')
    net = lays.conv2d_transpose(net, 32, [5, 5], stride=2, padding='SAME')
    net = lays.conv2d_transpose(net, 1, [5, 5], stride=2, padding='SAME', activation_fn=tf.nn.tanh)
    return net
开发者ID:AbsarF,项目名称:machine-learning-course,代码行数:16,代码来源:ae.py


示例14: get_shortcut

    def get_shortcut(self, stride, scope='shortcut'):
        """Reshape and repeat to get the shortcut of input

        Reference
        =========
        [1] TensorFlow 实战
        """
        def subsample(inputs, factor, scope):
            if factor == 1:
                return inputs
            else:
                # avg for auto encoder
                return avg_pool2d(inputs,[1,1],
                                  stride=factor,
                                  padding='SAME',
                                  scope=scope)
        if self.depth_in == self.depth_out:
            self.shortcut = subsample(self.inputs, stride, scope)
        else:
            self.shortcut = conv2d(
                inputs=self.inputs,
                num_outputs=self.depth_out,
                kernel_size=[1,1],
                stride=stride,
                padding='SAME',
                normalizer_fn=None,
                activation_fn=None,
                scope=scope)
开发者ID:praveenkumarchandaliya,项目名称:res_ae,代码行数:28,代码来源:Bottleneck_en.py


示例15: conv2d

def conv2d(input, filters=16, kernel_size=3, padding="same", stride=1, activation_fn=relu, reuse=None, name=None, data_format='NHWC',
           weights_initializer=xavier_initializer(), biases_initializer=tf.zeros_initializer(), weights_regularizer=None, biases_regularizer=None,
           normalizer_fn=None, normalizer_params=None):

   return contrib_layers.conv2d(input, num_outputs=filters, kernel_size=kernel_size, padding=padding, stride=stride, scope=name, data_format=data_format,
                        activation_fn=activation_fn, reuse=reuse, weights_initializer=weights_initializer, biases_initializer=biases_initializer,
                        weights_regularizer=weights_regularizer, biases_regularizer=biases_regularizer, normalizer_fn=normalizer_fn, normalizer_params=normalizer_params)
开发者ID:Tiyanak,项目名称:lip-reading,代码行数:7,代码来源:layers.py


示例16: inception_v1

def inception_v1(inputs,
                 num_classes=1000,
                 is_training=True,
                 dropout_keep_prob=0.8,
                 prediction_fn=layers_lib.softmax,
                 spatial_squeeze=True,
                 reuse=None,
                 scope='InceptionV1'):
  """Defines the Inception V1 architecture.

  This architecture is defined in:

    Going deeper with convolutions
    Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed,
    Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich.
    http://arxiv.org/pdf/1409.4842v1.pdf.

  The default image size used to train this network is 224x224.

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    num_classes: number of predicted classes.
    is_training: whether is training or not.
    dropout_keep_prob: the percentage of activation values that are retained.
    prediction_fn: a function to get predictions out of logits.
    spatial_squeeze: if True, logits is of shape is [B, C], if false logits is
        of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
    reuse: whether or not the network and its variables should be reused. To be
      able to reuse 'scope' must be given.
    scope: Optional variable_scope.

  Returns:
    logits: the pre-softmax activations, a tensor of size
      [batch_size, num_classes]
    end_points: a dictionary from components of the network to the corresponding
      activation.
  """
  # Final pooling and prediction
  with variable_scope.variable_scope(
      scope, 'InceptionV1', [inputs, num_classes], reuse=reuse) as scope:
    with arg_scope(
        [layers_lib.batch_norm, layers_lib.dropout], is_training=is_training):
      net, end_points = inception_v1_base(inputs, scope=scope)
      with variable_scope.variable_scope('Logits'):
        net = layers_lib.avg_pool2d(
            net, [7, 7], stride=1, scope='MaxPool_0a_7x7')
        net = layers_lib.dropout(net, dropout_keep_prob, scope='Dropout_0b')
        logits = layers.conv2d(
            net,
            num_classes, [1, 1],
            activation_fn=None,
            normalizer_fn=None,
            scope='Conv2d_0c_1x1')
        if spatial_squeeze:
          logits = array_ops.squeeze(logits, [1, 2], name='SpatialSqueeze')

        end_points['Logits'] = logits
        end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
  return logits, end_points
开发者ID:1000sprites,项目名称:tensorflow,代码行数:59,代码来源:inception_v1.py


示例17: conv_model

def conv_model(features, target):
    target = tf.one_hot(target, 10, 1.0, 0.0)
    features = tf.expand_dims(features, 3)
    features = tf.reduce_max(layers.conv2d(features, 12, [3, 3]), [1, 2])
    features = tf.reshape(features, [-1, 12])
    prediction, loss = learn.models.logistic_regression(features, target)
    train_op = layers.optimize_loss(loss, tf.contrib.framework.get_global_step(), optimizer="SGD", learning_rate=0.01)
    return tf.argmax(prediction, dimension=1), loss, train_op
开发者ID:ilblackdragon,项目名称:tf_examples,代码行数:8,代码来源:digits.py


示例18: testConv2DSameOdd

  def testConv2DSameOdd(self):
    n, n2 = 5, 3

    # Input image.
    x = create_test_input(1, n, n, 1)

    # Convolution kernel.
    w = create_test_input(1, 3, 3, 1)
    w = array_ops.reshape(w, [3, 3, 1, 1])

    variable_scope.get_variable('Conv/weights', initializer=w)
    variable_scope.get_variable('Conv/biases', initializer=array_ops.zeros([1]))
    variable_scope.get_variable_scope().reuse_variables()

    y1 = layers.conv2d(x, 1, [3, 3], stride=1, scope='Conv')
    y1_expected = math_ops.cast([[14, 28, 43, 58, 34],
                                 [28, 48, 66, 84, 46],
                                 [43, 66, 84, 102, 55],
                                 [58, 84, 102, 120, 64],
                                 [34, 46, 55, 64, 30]],
                                dtypes.float32)
    y1_expected = array_ops.reshape(y1_expected, [1, n, n, 1])

    y2 = resnet_utils.subsample(y1, 2)
    y2_expected = math_ops.cast([[14, 43, 34],
                                 [43, 84, 55],
                                 [34, 55, 30]],
                                dtypes.float32)
    y2_expected = array_ops.reshape(y2_expected, [1, n2, n2, 1])

    y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')
    y3_expected = y2_expected

    y4 = layers.conv2d(x, 1, [3, 3], stride=2, scope='Conv')
    y4_expected = y2_expected

    with self.cached_session() as sess:
      sess.run(variables.global_variables_initializer())
      self.assertAllClose(y1.eval(), y1_expected.eval())
      self.assertAllClose(y2.eval(), y2_expected.eval())
      self.assertAllClose(y3.eval(), y3_expected.eval())
      self.assertAllClose(y4.eval(), y4_expected.eval())
开发者ID:Albert-Z-Guo,项目名称:tensorflow,代码行数:42,代码来源:resnet_v1_test.py


示例19: network

    def network(self):
        net = self.images

        net = self.image_processing_layer(net)

        def get_init():
            return tf.truncated_normal_initializer(stddev=0.02)

        net = conv2d(net, 10, [7, 7], activation_fn=tf.nn.relu, name='conv1', weights_initializer=get_init())
        net = conv2d(net, 20, [5, 5], activation_fn=tf.nn.relu, name='conv2', weights_initializer=get_init())
        net = tf.nn.max_pool(net, [1, 4, 4, 1], [1, 1, 1, 1], padding='SAME')

        net = conv2d(net, 30, [3, 3], activation_fn=tf.nn.relu, name='conv3', weights_initializer=get_init())
        net = conv2d(net, 40, [3, 3], activation_fn=tf.nn.relu, name='conv4', weights_initializer=get_init())

        net = tf.nn.max_pool(net, [1, 2, 2, 1], [1, 1, 1, 1], padding='SAME')

        net = tf.reshape(net, [self.conf.batch_size, -1])

        net = linear(net, 100, activation_fn=tf.nn.tanh, name='FC1')
        out = linear(net, 2, activation_fn=tf.nn.softmax, name='out')
        return out
开发者ID:dvolkhonskiy,项目名称:Adversarial-Model-For-Steganography,代码行数:22,代码来源:steganalyzer.py


示例20: conv_layer

def conv_layer(net, filters=32, hyperparameter=False, activation=tf.nn.relu,
               stride=1, max_pool=True, var_coll=far.HYPERPARAMETERS_COLLECTIONS,
               conv_initialization=tf.contrib.layers.xavier_initializer_conv2d(tf.float32)):
    max_pool_stride = [1, 2, 2, 1]

    bn = lambda _inp: tcl.batch_norm(_inp, variables_collections=var_coll)

    net + tcl.conv2d(net.out, num_outputs=filters, stride=stride,
                     kernel_size=3, normalizer_fn=bn, activation_fn=None,
                     trainable=not hyperparameter,
                     variables_collections=var_coll, weights_initializer=conv_initialization)
    net + activation(net.out)
    if max_pool:
        net + tf.nn.max_pool(net.out, max_pool_stride, max_pool_stride, 'VALID')
开发者ID:codealphago,项目名称:FAR-HO,代码行数:14,代码来源:hyper_representation.py



注:本文中的tensorflow.contrib.layers.conv2d函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python layers.dropout函数代码示例发布时间:2022-05-27
下一篇:
Python ops.select函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap