• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python dnn.dnn_pool函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.sandbox.cuda.dnn.dnn_pool函数的典型用法代码示例。如果您正苦于以下问题:Python dnn_pool函数的具体用法?Python dnn_pool怎么用?Python dnn_pool使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了dnn_pool函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: feature_extractor

    def feature_extractor(input_data):
        # conv stage 0 (64x64=>32x32)
        h0_0 = dnn_conv(input_data, conv_w0_0, border_mode=(1, 1)) + conv_b0_0.dimshuffle("x", 0, "x", "x")
        h0_1 = dnn_conv(relu(h0_0), conv_w0_1, border_mode=(1, 1)) + conv_b0_1.dimshuffle("x", 0, "x", "x")
        h0 = dnn_pool(relu(h0_1), ws=(2, 2), stride=(2, 2))
        # conv stage 1 (32x32=>16x16)
        h1_0 = dnn_conv(h0, conv_w1_0, border_mode=(1, 1)) + conv_b1_0.dimshuffle("x", 0, "x", "x")
        h1_1 = dnn_conv(relu(h1_0), conv_w1_1, border_mode=(1, 1)) + conv_b1_1.dimshuffle("x", 0, "x", "x")
        h1 = dnn_pool(relu(h1_1), ws=(2, 2), stride=(2, 2))
        # conv stage 2 (16x16=>8x8)
        h2_0 = dnn_conv(h1, conv_w2_0, border_mode=(1, 1)) + conv_b2_0.dimshuffle("x", 0, "x", "x")
        h2_1 = dnn_conv(relu(h2_0), conv_w2_1, border_mode=(1, 1)) + conv_b2_1.dimshuffle("x", 0, "x", "x")
        h2_2 = dnn_conv(relu(h2_1), conv_w2_2, border_mode=(1, 1)) + conv_b2_2.dimshuffle("x", 0, "x", "x")
        h2 = dnn_pool(relu(h2_2), ws=(2, 2), stride=(2, 2))
        # conv stage 3 (8x8=>4x4)
        h3_0 = dnn_conv(h2, conv_w3_0, border_mode=(1, 1)) + conv_b3_0.dimshuffle("x", 0, "x", "x")
        h3_1 = dnn_conv(relu(h3_0), conv_w3_1, border_mode=(1, 1)) + conv_b3_1.dimshuffle("x", 0, "x", "x")
        h3_2 = dnn_conv(relu(h3_1), conv_w3_2, border_mode=(1, 1)) + conv_b3_2.dimshuffle("x", 0, "x", "x")
        h3 = dnn_pool(relu(h3_2), ws=(2, 2), stride=(2, 2))
        # conv stage 4 (4x4=>2x2)
        h4_0 = dnn_conv(h3, conv_w4_0, border_mode=(1, 1)) + conv_b4_0.dimshuffle("x", 0, "x", "x")
        h4_1 = dnn_conv(relu(h4_0), conv_w4_1, border_mode=(1, 1)) + conv_b4_1.dimshuffle("x", 0, "x", "x")
        h4_2 = dnn_conv(relu(h4_1), conv_w4_2, border_mode=(1, 1)) + conv_b4_2.dimshuffle("x", 0, "x", "x")
        h4 = dnn_pool(relu(h4_2), ws=(2, 2), stride=(2, 2))

        return T.flatten(h4, 2)
开发者ID:taesupkim,项目名称:dcgan_code,代码行数:26,代码来源:vae_vgg_moment_match_face_0.py


示例2: dnn_pool3d2d

def dnn_pool3d2d(inputs, pool_shape, pool_stride, image_shape, mode='max'):
    """ Pool first all time-slices, so 2d-poolings over width and height.
    Then do a 1dpooling over the time (done as fake2d pooling with pooling shape
    1 for the ignored dimension."""
    for i in xrange(3):
        assert pool_shape[i] <= image_shape[i], ("pool shape should be less"
            " or equal than image shape, {:d} > {:d} for "
            "pool_shape: {:s}, image_shape:{:s}").format(pool_shape[i],
                image_shape[i], pool_shape, image_shape)
    output_shape = [((image_shape[i] - pool_shape[i]) // pool_stride[i]) + 1 
        for i in xrange(3)]
    output2d_pooled = gpu_alloc_empty(inputs.shape[0], inputs.shape[1],
        output_shape[0], output_shape[1], image_shape[2])
    for z in range(image_shape[2]):
        pooled_slice = dnn_pool(inputs[:,:,:,:,z], ws=pool_shape[0:2], 
            stride=pool_stride[0:2], mode=mode)
        output2d_pooled = T.set_subtensor(output2d_pooled[:,:,:,:,z], pooled_slice)
    
    
    # now 1d-pool over last dimension...
    # could use first or second dimension as input of pool1d..
    # compute maximum y index after first pooling
    output = gpu_alloc_empty(inputs.shape[0], inputs.shape[1],
        output_shape[0], output_shape[1], output_shape[2])
    max_y = output_shape[1]
    for y in range(max_y):
        # ignore first=0 dimension, alrdy pooled in loop before
        # so set stride and shape to 1 there
        final_pooled_slice = dnn_pool(output2d_pooled[:,:,:,y,:], 
            ws=(1, pool_shape[2]), 
            stride=(1, pool_stride[2]), mode=mode)
        output = T.set_subtensor(output[:,:,:,y,:], final_pooled_slice)

    return output
    
开发者ID:robintibor,项目名称:pylearn3dconv,代码行数:34,代码来源:pool.py


示例3: __init__

 def __init__(self,h_low,h_high,method="adascan_mod"):
     kern = build_filters(h_low,h_high)    
     sharedKern = theano.shared(kern,name='sharedKern')
     input = theano.tensor.tensor4(name='input')
     self.conv_fun   = theano.function([input],dnn_conv(input,sharedKern))
     self.down_x = theano.function([input],dnn_pool(input,(2,1),stride=(2,1),mode='average_inc_pad'))
     self.down_y = theano.function([input],dnn_pool(input,(1,2),stride=(1,2),mode='average_inc_pad'))
     self.h_low, self.h_high, self.method = h_low, h_high, method
开发者ID:jsharpna,项目名称:adascan,代码行数:8,代码来源:theano_scan.py


示例4: test_dnn_pool_desc_merge

def test_dnn_pool_desc_merge():
    if not cuda.dnn.dnn_available():
        raise SkipTest(cuda.dnn.dnn_available.msg)

    x = theano.tensor.ftensor4("x")
    y = dnn.dnn_pool(x, (2, 2))
    z = dnn.dnn_pool(x, (2, 2))
    f = theano.function([x], [y, z])
    descs = [n for n in f.maker.fgraph.apply_nodes if isinstance(n.op, dnn.GpuDnnPoolDesc)]
    assert len(descs) == 1, f.maker.fgraph
开发者ID:huamichaelchen,项目名称:Theano,代码行数:10,代码来源:test_dnn.py


示例5: model

def model(X,
    h2_u, h3_u,
    h2_s, h3_s,
    w, w2, g2, b2, w3, g3, b3, wy
    ):
    h = lrelu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2)))
    h2 = lrelu(batchnorm(dnn_conv(h, w2, subsample=(2, 2), border_mode=(2, 2)), g=g2, b=b2, u=h2_u, s=h2_s))
    h3 = lrelu(batchnorm(dnn_conv(h2, w3, subsample=(2, 2), border_mode=(2, 2)), g=g3, b=b3, u=h3_u, s=h3_s))
    h = T.flatten(dnn_pool(h, (4, 4), (4, 4), mode='max'), 2)
    h2 = T.flatten(dnn_pool(h2, (2, 2), (2, 2), mode='max'), 2)
    h3 = T.flatten(dnn_pool(h3, (1, 1), (1, 1), mode='max'), 2)
    f = T.concatenate([h, h2, h3], axis=1)
    return [f]
开发者ID:10sun,项目名称:dcgan_code,代码行数:13,代码来源:svhn_semisup_analysis.py


示例6: pool2d

def pool2d(x, pool_size, strides=(1, 1), border_mode='valid',
           dim_ordering='th', pool_mode='max'):
    if border_mode == 'same':
        # TODO: add implementation for border_mode="same"
        raise Exception('border_mode="same" not supported with Theano.')
    elif border_mode == 'valid':
        ignore_border = True
        padding = (0, 0)
    else:
        raise Exception('Invalid border mode: ' + str(border_mode))

    if dim_ordering not in {'th', 'tf'}:
        raise Exception('Unknown dim_ordering ' + str(dim_ordering))

    if dim_ordering == 'tf':
        x = x.dimshuffle((0, 3, 1, 2))

    if pool_mode == 'max':
        if _on_gpu() and dnn.dnn_available():
            pool_out = dnn_pool(x,
                                pool_size,
                                stride=strides,
                                mode='max')
        else:
            pool_out = downsample.max_pool_2d(x,
                                              ds=pool_size,
                                              st=strides,
                                              ignore_border=ignore_border,
                                              padding=padding,
                                              mode='max')
    elif pool_mode == 'avg':
        if _on_gpu() and dnn.dnn_available():
            pool_out = dnn_pool(x,
                                pool_size,
                                stride=strides,
                                mode='average_exc_pad')
        else:
            pool_out = downsample.max_pool_2d(x,
                                              ds=pool_size,
                                              st=strides,
                                              ignore_border=ignore_border,
                                              padding=padding,
                                              mode='average_exc_pad')
        
    else:
        raise Exception('Invalid pooling mode: ' + str(pool_mode))
    
    if dim_ordering == 'tf':
        pool_out = pool_out.dimshuffle((0, 2, 3, 1))
    return pool_out
开发者ID:kundajelab,项目名称:keras,代码行数:50,代码来源:theano_backend.py


示例7: apply

    def apply(self, input_):
        """Apply the pooling (subsampling) transformation.
        """
        if self.pooling_size == (1, 1, 1):
            return input_

        # Pooling on last two dimensions
        input_ = input_.reshape((input_.shape[0], input_.shape[1] * input_.shape[2], input_.shape[3], input_.shape[4]))
        p = dnn_pool(img=input_, ws=tuple(self.pooling_size[1:]), stride=tuple(self.step[1:]))
        p = p.reshape((p.shape[0], input_.shape[1], input_.shape[2], p.shape[2], p.shape[3]))
        # Pooling on first dimension
        p = p.reshape((p.shape[0], p.shape[1], p.shape[2], p.shape[3] * p.shape[4]))
        output = dnn_pool(img=p, ws=(self.pooling_size[0], 1), stride=(self.step[0], 1))
        output = output.reshape((output.shape[0], output.shape[1], output.shape[2], p.shape[3], p.shape[4]))
        return output
开发者ID:ablavatski,项目名称:tsa-rnn,代码行数:15,代码来源:conv3d.py


示例8: spp_predict

def spp_predict(fmaps, pyramid):
    """ From input confidence maps, perform "SPP" prediction across a scale pyramid and using
        spatial pruning of labels and confidences.

    Arguments:
        fmaps
            theano symbolic 4D tensor with shape (nb_images, nb_labels, nb_rows, nb_cols)
        pyramid
            python list of average pooling kernel sizes, e.g. [3, 5].
    Returns:
        symbolic (nb_images, nb_labels) tensor of spatially pooled multi-scale predictions.
    """
    # Step 1: average pooling of the confidences across multiple scales, then average pooling
    # of that using spatial information to get multi-scale spatial confidences.
    pooled_maps = fmaps
    nb_images, nb_labels, nb_rows, nb_cols = fmaps.shape
    
    for ws in pyramid:
        pooled_maps += resize(
            dnn_pool(fmaps, (ws, ws), (1, 1), mode='average'),
            (nb_rows, nb_cols)
        )
    pooled_maps /= len(pyramid) + 1
    # Step 2: spatial max-pooling across labels.
    label_conf, label_map = T.max_and_argmax(pooled_maps, axis=1, keepdims=True)
    bcast_labels = T.addbroadcast(T.arange(nb_labels).reshape([1, nb_labels, 1, 1]), 0, 2, 3)
    label_mask = T.eq(bcast_labels, label_map)

    return T.mean(label_mask * label_conf, axis=[2,3])
开发者ID:alexisVallet,项目名称:cnn-anime,代码行数:29,代码来源:spp_prediction.py


示例9: apply

 def apply(self, input):
     """
     Apply this discriminator module to the given input. This produces a
     collection of filter responses for feedforward and a spatial grid of
     discriminator outputs.
     """
     bm = int((self.filt_dim - 1) / 2) # use "same" mode convolutions
     ss = self.ds_stride               # stride for "learned downsampling"
     # apply first conv layer
     h1 = dnn_conv(input, self.w1, subsample=(1, 1), border_mode=(bm, bm))
     if self.apply_bn_1:
         h1 = batchnorm(h1, g=self.g1, b=self.b1)
     h1 = lrelu(h1)
     # apply second conv layer (may include downsampling)
     if self.use_pooling:
         h2 = dnn_conv(h1, self.w2, subsample=(1, 1), border_mode=(bm, bm))
         if self.apply_bn_2:
             h2 = batchnorm(h2, g=self.g2, b=self.b2)
         h2 = lrelu(h2)
         h2 = dnn_pool(h2, (ss,ss), stride=(ss, ss), mode='max', pad=(0, 0))
     else:
         h2 = dnn_conv(h1, self.w2, subsample=(ss, ss), border_mode=(bm, bm))
         if self.apply_bn_2:
             h2 = batchnorm(h2, g=self.g2, b=self.b2)
         h2 = lrelu(h2)
     
     # apply discriminator layer
     y = dnn_conv(h2, self.wd, subsample=(1, 1), border_mode=(bm, bm))
     y = sigmoid(T.flatten(y, 2)) # flatten to (batch_size, num_preds)
     return h2, y
开发者ID:ml-lab,项目名称:MatryoshkaNetworks,代码行数:30,代码来源:MatryoshkaModules.py


示例10: compute_output

    def compute_output(self, network, in_vw):
        mode = network.find_hyperparameter(["mode"])
        pool_size = network.find_hyperparameter(["pool_size"])
        dim = len(pool_size)
        # works for sizes 2 and 3
        assert dim in [2, 3]
        stride = network.find_hyperparameter(["pool_stride",
                                              "stride"],
                                             None)
        if stride is None:
            stride = pool_size
        pad = network.find_hyperparameter(["pool_pad", "pad"], (0,) * dim)
        assert dim == len(stride) == len(pad)
        if dim == 2:
            pool_axes = (2, 3)
        elif dim == 3:
            pool_axes = (2, 3, 4)
        out_shape = downsample.pool_output_shape(
            input_shape=in_vw.shape,
            axes=pool_axes,
            pool_shape=pool_size,
            strides=stride,
            pads=pad)
        out_var = dnn.dnn_pool(img=in_vw.variable,
                               ws=pool_size,
                               stride=stride,
                               pad=pad,
                               mode=mode)

        network.create_vw(
            "default",
            variable=out_var,
            shape=out_shape,
            tags={"output"},
        )
开发者ID:rewonc,项目名称:treeano,代码行数:35,代码来源:dnn.py


示例11: __init__

    def __init__(self, inputs=None, size=(1, 1), stride=None, pad=(0, 0), mode='max', ignore_border=True):
        """
        Parameters
        ----------
        inputs : tuple(shape, `Theano.TensorType`)
            tuple(shape, `Theano.TensorType`) or None describing the input to use for this layer.
            `shape` will be a monad tuple representing known sizes for each dimension in the `Theano.TensorType`.
            If 4D images as input, expect formatted as (batch_size, #channels, rows, cols).
        size : tuple(int) or int
            Downsample factor over (rows, columns). If it is an int, it will be the same size for rows and cols.
        stride : tuple(int) or int
            Stride size (step size), which is the number of shifts over rows/cols to get the
            next pool region. If it is an int, it will be the same size for rows and cols.
        pad : tuple(int) or int
            (pad_h, pad_w), pad zeros to extend beyond four borders
            of the images, pad_h is the size of the top and bottom margins,
            and pad_w is the size of the left and right margins. If it is an int, it will be the same
            size for rows and cols.
        mode : 'max', 'sum', 'average_inc_pad', 'average_exc_pad'
            Operation executed on each window. `max` and `sum` always exclude
            the padding in the computation. `average` gives you the choice to
            include or exclude it.
        ignore_border : bool
            If `size` doesn't divide the input `shape`, do we include an extra row/col of
            partial downsampling (False) or ignore it (True). When True, (5,5) input with size=(2,2)
            will generate a (2,2) output. (3,3) otherwise.
        """
        super(Pool2D, self).__init__(inputs=inputs, size=size, stride=stride, pad=pad)
        input_shape, self.input = self.inputs[0]
        if isinstance(size, int):
            size = (size, ) * 2
        if stride is None:
            stride = size
        if isinstance(stride, int):
            stride = (stride, ) * 2
        if isinstance(pad, int):
            pad = (pad, ) * 2

        assert len(size) == len(stride) == len(pad), "Size, stride, and pad must have the same number of dimensions."

        self.output_size = tuple(_pool_out_size(imgshape=input_shape,
                                                ds=size,
                                                st=stride,
                                                ignore_border=ignore_border,
                                                padding=pad))

        cudnn_modes = ['max', 'average_inc_pad', 'average_exc_pad']
        if has_cudnn and mode in cudnn_modes and ignore_border and self.input.ndim == 4:
            self.output = dnn_pool(img=self.input,
                                   ws=size,
                                   stride=stride,
                                   mode=mode,
                                   pad=pad)
        else:
            self.output = max_pool_2d(input=self.input,
                                      ds=size,
                                      st=stride,
                                      padding=pad,
                                      mode=mode,
                                      ignore_border=ignore_border)
开发者ID:alaminsjam,项目名称:OpenDeep,代码行数:60,代码来源:pooling.py


示例12: apply

    def apply(self, input_):
        """Apply the pooling (subsampling) transform

        Parameters
        ----------
        input_ : :class:`~tensor.TensorVariable`
            3D tensor with axes batch size, sequence, features

        Returns
        -------
        output: :class:`~tensor.TensorVariable`
            3D tensor with axes batch size, sequence, features
        """
        shuffled = input_.dimshuffle(0, 2, 1, 'x')

        # batch_size, num_filters, x_map, 1
        if self.step == None:
            st = (1,1)
        else:
            st = (self.step, 1)
        #output = max_pool_2d(shuffled, (self.pooling_length, 1), st=st)
        output = dnn_pool(shuffled, (self.pooling_length, 1), stride=st)

        sequence_out = output[:, :, :, 0].dimshuffle(0, 2, 1)

        return sequence_out
开发者ID:caomw,项目名称:MLFun,代码行数:26,代码来源:Conv1D.py


示例13: get_convpool

    def get_convpool(self, img, kerns, conv_b, subsample, border_mode, pooling, ws=None, stride=None, normalizing=False):

        conv_out = dnn.dnn_conv(
                img=img,
                kerns=kerns,
                subsample=subsample,
                border_mode=border_mode
                )

        conv_out += conv_b.dimshuffle('x',0,'x','x')
        conv_out = T.maximum(conv_out, 0)

        if pooling:
            pool_out = dnn.dnn_pool(
                    conv_out,
                    ws=ws,
                    stride=stride
                    )
        else:
            pool_out = conv_out

        if normalizing:
            norm_out = CrossChannelNormalization()(pool_out)
        else:
            norm_out = pool_out
        return norm_out
开发者ID:jazzsaxmafia,项目名称:video_recognition,代码行数:26,代码来源:convnet.py


示例14: pool2d

def pool2d(x, pool_size, strides=(1, 1), border_mode='valid',
           dim_ordering='th', pool_mode='max'):
    # ====== dim ordering ====== #
    if dim_ordering not in {'th', 'tf'}:
        raise Exception('Unknown dim_ordering ' + str(dim_ordering))
    if dim_ordering == 'tf':
        x = x.dimshuffle((0, 3, 1, 2))
    # ====== border mode ====== #
    if border_mode == 'same':
        w_pad = pool_size[0] - 2 if pool_size[0] % 2 == 1 else pool_size[0] - 1
        h_pad = pool_size[1] - 2 if pool_size[1] % 2 == 1 else pool_size[1] - 1
        padding = (w_pad, h_pad)
    elif border_mode == 'valid':
        padding = (0, 0)
    elif isinstance(border_mode, (tuple, list)):
        padding = tuple(border_mode)
    else:
        raise Exception('Invalid border mode: ' + str(border_mode))

    # ====== pooling ====== #
    if _on_gpu() and dnn.dnn_available():
        pool_out = dnn.dnn_pool(x, pool_size,
                                stride=strides,
                                mode=pool_mode,
                                pad=padding)
    else: # CPU veresion support by theano
        pool_out = pool.pool_2d(x, ds=pool_size, st=strides,
                                ignore_border=True,
                                padding=padding,
                                mode=pool_mode)

    if dim_ordering == 'tf':
        pool_out = pool_out.dimshuffle((0, 2, 3, 1))
    return pool_out
开发者ID:trungnt13,项目名称:odin_old,代码行数:34,代码来源:theano_backend.py


示例15: __init__

    def __init__(self, input, image_shape, filter_shape, convstride, padsize,
                 poolsize, poolstride,poolpad, W, b, lrn=False,
                 lib_conv='cudnn',
                 ):
        self.filter_size = filter_shape
        self.convstride = convstride
        self.padsize = padsize


        self.channel = image_shape[0]
        self.lrn = lrn
        self.lib_conv = lib_conv

        self.filter_shape = np.asarray(filter_shape)
        self.image_shape = np.asarray(image_shape)

        
        self.W = W#Weight(self.filter_shape)
        self.b = b#Weight(self.filter_shape[3])#, bias_init, std=0)
        
        input_shuffled = input.dimshuffle(3, 0, 1, 2)  # c01b to bc01
            # in01out to outin01
            # print image_shape_shuffled
            # print filter_shape_shuffled

        W_shuffled = self.W.val.dimshuffle(3, 0, 1, 2)  # c01b to bc01
        conv_out = dnn.dnn_conv(img=input_shuffled,
                                kerns=W_shuffled,
                                subsample=(convstride, convstride),
                                border_mode=padsize,
                                )
        conv_out = conv_out + self.b.val.dimshuffle('x', 0, 'x', 'x')        
        
        # ReLu
        self.output = T.maximum(conv_out, 0)
        
        # Pool
        self.poolsize = poolsize
        self.poolstride = poolstride 
        self.poolpad = poolpad      
        
        if self.poolsize != 1:
            self.output = dnn.dnn_pool(self.output,
                                       ws=(poolsize, poolsize),
                                       stride=(poolstride, poolstride),
                                       mode='max', pad=(poolpad, poolpad))

        self.output = self.output.dimshuffle(1, 2, 3, 0)  # bc01 to c01b
        
        # LRN
        if self.lrn:
            self.lrn_func = CrossChannelNormalization()
            # lrn_input = gpu_contiguous(self.output)
            self.output = self.lrn_func(self.output)
               
        self.params = [self.W.val, self.b.val]
        self.weight_type = ['W', 'b'] 
        print "conv ({}) layer with shape_in: {}".format(lib_conv,
                                                         str(image_shape))
开发者ID:hma02,项目名称:platoon,代码行数:59,代码来源:googlenet.py


示例16: apply

 def apply(self, input_):
     stride = self.stride
     if not stride:
         stride = self.pooling_size
     inter = input_**2
     inter = dnn_pool(inter, ws=self.pooling_size, stride=stride,
                      pad=self.pad, mode='average_inc_pad')
     return T.sqrt(inter)
开发者ID:AtousaTorabi,项目名称:cuboid,代码行数:8,代码来源:__init__.py


示例17: symb_forward

 def symb_forward(self, symb_input):
     return _dnn.dnn_pool(
         img=symb_input,
         ws=(self.k_w, self.k_h),
         stride=(self.d_w, self.d_h),
         mode='max',
         pad=(self.pad_w, self.pad_h)
     )
开发者ID:VisualComputingInstitute,项目名称:Beacon8,代码行数:8,代码来源:SpatialMaxPoolingCUDNN.py


示例18: process

 def process(self, input, tparams, BNparams):
     b, f, h0, w0 = input.shape
     result = []
     for h, w in self.pymamid:
         win_h = T.ceil(h0 / h).astype('int32')
         win_w = T.ceil(w0 / w).astype('int32')
         str_h = T.floor(h0 / h).astype('int32')
         str_w = T.floor(w0 / w).astype('int32')
         result.append(dnn_pool(
             img=input, ws=(win_h, win_w), mode=self.mode,
             stride=(str_h, str_w), pad=(0, 0)).reshape([b, -1]))
     return T.concatenate(result, axis=1)
开发者ID:wufangjie,项目名称:dnn,代码行数:12,代码来源:layers.py


示例19: fprop

    def fprop(self, x):
        pooled_out = cuDNN.dnn_pool(
            img=x,
            ws=(self.poolSize, self.poolSize),
            stride=(self.poolStride,self.poolStride),
            pad = (self.pad, self.pad),
            mode=self.mode
            )
        pooled_out = pooled_out if self.actFunc is None else self.actFunc(pooled_out)
        return pooled_out
    
# End PoolLayer
#-------------------------------------------------------------------------------
开发者ID:ybzhou,项目名称:Gemini,代码行数:13,代码来源:pool_layer.py


示例20: get_output_for

    def get_output_for(self, input, **kwargs):
        input_size = tuple(symb if fixed is None else fixed
                           for fixed, symb
                           in zip(self.input_shape[2:], input.shape[2:]))
        pool_list = []
        for pool_dim in self.pool_dims:
            win_size = tuple((i + pool_dim - 1) // pool_dim
                             for i in input_size)
            str_size = tuple(i // pool_dim for i in input_size)

            pool = dnn.dnn_pool(input, win_size, str_size, self.mode, (0, 0))
            pool = pool.flatten(3)
            pool_list.append(pool)

        return theano.tensor.concatenate(pool_list, axis=2)
开发者ID:Lasagne,项目名称:Lasagne,代码行数:15,代码来源:dnn.py



注:本文中的theano.sandbox.cuda.dnn.dnn_pool函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python rng_curand.CURAND_RandomStreams类代码示例发布时间:2022-05-27
下一篇:
Python dnn.dnn_conv函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap