• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python dnn.dnn_conv函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.sandbox.cuda.dnn.dnn_conv函数的典型用法代码示例。如果您正苦于以下问题:Python dnn_conv函数的具体用法?Python dnn_conv怎么用?Python dnn_conv使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了dnn_conv函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: apply

 def apply(self, input):
     """
     Apply this discriminator module to the given input. This produces a
     collection of filter responses for feedforward and a spatial grid of
     discriminator outputs.
     """
     bm = int((self.filt_dim - 1) / 2) # use "same" mode convolutions
     ss = self.ds_stride               # stride for "learned downsampling"
     # apply first conv layer
     h1 = dnn_conv(input, self.w1, subsample=(1, 1), border_mode=(bm, bm))
     if self.apply_bn_1:
         h1 = batchnorm(h1, g=self.g1, b=self.b1)
     h1 = lrelu(h1)
     # apply second conv layer (may include downsampling)
     if self.use_pooling:
         h2 = dnn_conv(h1, self.w2, subsample=(1, 1), border_mode=(bm, bm))
         if self.apply_bn_2:
             h2 = batchnorm(h2, g=self.g2, b=self.b2)
         h2 = lrelu(h2)
         h2 = dnn_pool(h2, (ss,ss), stride=(ss, ss), mode='max', pad=(0, 0))
     else:
         h2 = dnn_conv(h1, self.w2, subsample=(ss, ss), border_mode=(bm, bm))
         if self.apply_bn_2:
             h2 = batchnorm(h2, g=self.g2, b=self.b2)
         h2 = lrelu(h2)
     
     # apply discriminator layer
     y = dnn_conv(h2, self.wd, subsample=(1, 1), border_mode=(bm, bm))
     y = sigmoid(T.flatten(y, 2)) # flatten to (batch_size, num_preds)
     return h2, y
开发者ID:ml-lab,项目名称:MatryoshkaNetworks,代码行数:30,代码来源:MatryoshkaModules.py


示例2: get_output

    def get_output(self, train=False):
        X = self.get_input(train)
        border_mode = self.border_mode
        if on_gpu() and dnn.dnn_available():
            if border_mode == 'same':
                assert(self.subsample == (1, 1))
                pad_x = (self.nb_row - self.subsample[0]) // 2
                pad_y = (self.nb_col - self.subsample[1]) // 2
                conv_out = dnn.dnn_conv(img=X,
                                        kerns=self.W,
                                        border_mode=(pad_x, pad_y))
            else:
                conv_out = dnn.dnn_conv(img=X,
                                        kerns=self.W,
                                        border_mode=border_mode,
                                        subsample=self.subsample)
        else:
            if border_mode == 'same':
                border_mode = 'full'
                assert(self.subsample == (1, 1))

            conv_out = T.nnet.conv.conv2d(X, self.W,
                                          border_mode=border_mode,
                                          subsample=self.subsample,
                                          image_shape=self.input_shape,
                                          filter_shape=self.W_shape)
            if self.border_mode == 'same':
                shift_x = (self.nb_row - 1) // 2
                shift_y = (self.nb_col - 1) // 2
                conv_out = conv_out[:, :, shift_x:X.shape[2] + shift_x, shift_y:X.shape[3] + shift_y]

        return self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
开发者ID:rahulmohan,项目名称:keras,代码行数:32,代码来源:convolutional.py


示例3: discrim

def discrim(X, w, w2, g2, b2, w3, g3, b3, wy, by):
    h = lrelu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2)))
    h2 = lrelu(batchnorm(dnn_conv(h, w2, subsample=(2, 2), border_mode=(2, 2)), g=g2, b=b2))
    h3 = lrelu(batchnorm(dnn_conv(h2, w3, subsample=(2, 2), border_mode=(2, 2)), g=g3, b=b3))
    h3 = T.flatten(h3, 2)
    y = -softplus(T.dot(h3, wy)+by)
    return y
开发者ID:taesupkim,项目名称:dcgan_code,代码行数:7,代码来源:train_rbm_cifar10_24x24.py


示例4: encoder

def encoder( s, w1, w2, g2, b2, w3, g3, b3, w4, g4, b4, w5, g5, b5 ):
    h1 = lrelu( dnn_conv( s, w1, subsample=( 2, 2 ), border_mode = ( 2, 2 ) ) )
    h2 = lrelu( batchnorm( dnn_conv( h1, w2, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g2, b = b2 ) )
    h3 = lrelu( batchnorm( dnn_conv( h2, w3, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g3, b = b3 ) )
    h4 = lrelu( batchnorm( dnn_conv( h3, w4, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g4, b = b4 ) )
    z = lrelu( batchnorm( dnn_conv( h4, w5, subsample = ( 1, 1 ), border_mode = ( 0, 0 ) ), g = g5, b = b5 ) )
    return T.flatten( z, 2 )
开发者ID:igotyooo,项目名称:dcgan,代码行数:7,代码来源:train_ced_mse_d_dd.py


示例5: discrim

def discrim( t, w1, w2, g2, b2, w3, g3, b3, w4, g4, b4, w5 ):
    h1 = lrelu( dnn_conv( t, w1, subsample=( 2, 2 ), border_mode = ( 2, 2 ) ) )
    h2 = lrelu( batchnorm( dnn_conv( h1, w2, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g2, b = b2 ) )
    h3 = lrelu( batchnorm( dnn_conv( h2, w3, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g3, b = b3 ) )
    h4 = lrelu( batchnorm( dnn_conv( h3, w4, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g4, b = b4 ) )
    yd = sigmoid( T.dot( T.flatten( h4, 2 ), w5 ) )
    return yd
开发者ID:igotyooo,项目名称:dcgan,代码行数:7,代码来源:train_ced_mse_d_dd.py


示例6: discrim

def discrim(X, w, b, w2, g2, b2, w3, g3, b3, w4, g4, b4, wy, wy1):
    h0 = dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2))
    if args.db1:
        h0 += b.dimshuffle('x', 0, 'x', 'x')
    h1 = lrelu(h0)
    h1 = dropout(h1, args.dropout)
    h1 = dnn_conv(h1, w2, subsample=(2, 2), border_mode=(2, 2))
    if args.dbn:
        h1 = batchnorm(h1, g=g2, b=b2)
    else:
        h1 += b2.dimshuffle('x', 0, 'x', 'x')
    h2 = lrelu(h1)
    h2 = dropout(h2, args.dropout)
    h2 = dnn_conv(h2, w3, subsample=(2, 2), border_mode=(2, 2))
    if args.dbn:
        h2 = batchnorm(h2, g=g3, b=b3)
    else:
        h2 += b3.dimshuffle('x', 0, 'x', 'x')
    h3 = lrelu(h2)
    h3 = dropout(h3, args.dropout)
    h3 = dnn_conv(h3, w4, subsample=(2, 2), border_mode=(2, 2))
    if args.dbn:
        h3 = batchnorm(h3, g=g4, b=b4)
    else:
        h3 += b4.dimshuffle('x', 0, 'x', 'x')
    h4 = lrelu(h3)
    h4 = dropout(h4, args.dropout)
    h4 = T.flatten(h4, 2)
    y = sigmoid(T.dot(h4, wy))
    y1 = sigmoid(T.dot(h4, wy1))
    return y, y1
开发者ID:udibr,项目名称:dcgan_code,代码行数:31,代码来源:train_uncond_dcgan.py


示例7: discrim

def discrim(X, w, w2, g2, b2, w3, g3, b3, wy, by):
    h0 = dropout(relu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2))), p=0.5)
    h1 = dropout(relu(batchnorm(dnn_conv(h0, w2, subsample=(2, 2), border_mode=(2, 2)), g=g2, b=b2)), p=0.5)
    h2 = dropout(relu(batchnorm(dnn_conv(h1, w3, subsample=(2, 2), border_mode=(2, 2)), g=g3, b=b3)), p=0.5)
    h2 = T.flatten(h2, 2)
    y = -relu(T.dot(h2, wy)+by)
    return y
开发者ID:taesupkim,项目名称:dcgan_code,代码行数:7,代码来源:train_rbm_cifar10_16x16.py


示例8: get_output

    def get_output(self, train=False):
        X = self.get_input(train)
        newshape = (X.shape[0]*X.shape[1], X.shape[2], X.shape[3], X.shape[4])
        Y = theano.tensor.reshape(X, newshape) #collapse num_samples and num_timesteps
        border_mode = self.border_mode
        if on_gpu() and dnn.dnn_available():
            if border_mode == 'same':
                assert(self.subsample == (1, 1))
                pad_x = (self.nb_row - self.subsample[0]) // 2
                pad_y = (self.nb_col - self.subsample[1]) // 2
                conv_out = dnn.dnn_conv(img=Y,
                                        kerns=self.W,
                                        border_mode=(pad_x, pad_y))
            else:
                conv_out = dnn.dnn_conv(img=Y,
                                        kerns=self.W,
                                        border_mode=border_mode,
                                        subsample=self.subsample)
        else:
            if border_mode == 'same':
                border_mode = 'full'

            conv_out = theano.tensor.nnet.conv.conv2d(Y, self.W,
                border_mode=border_mode, subsample=self.subsample)

            if self.border_mode == 'same':
                shift_x = (self.nb_row - 1) // 2
                shift_y = (self.nb_col - 1) // 2
                conv_out = conv_out[:, :, shift_x:Y.shape[2] + shift_x, shift_y:Y.shape[3] + shift_y]

        output = self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
        newshape = (X.shape[0], X.shape[1], output.shape[1], output.shape[2], output.shape[3])
        return theano.tensor.reshape(output, newshape)
开发者ID:Michlong,项目名称:keras-extra,代码行数:33,代码来源:extra.py


示例9: get_output

    def get_output(self, train):
        X = self.get_input(train)
        border_mode = self.border_mode
        if dnn.dnn_available() and theano.config.device[:3] == 'gpu':
            if border_mode == 'same':
                assert(self.subsample == (1, 1))
                pad_x = (self.nb_row - self.subsample[0]) // 2
                pad_y = (self.nb_col - self.subsample[1]) // 2
                conv_out = dnn.dnn_conv(img=X,
                                        kerns=self.W,
                                        border_mode=(pad_x, pad_y))
            else:
                conv_out = dnn.dnn_conv(img=X,
                                        kerns=self.W,
                                        border_mode=border_mode,
                                        subsample=self.subsample)
        else:
            if border_mode == 'same':
                border_mode = 'full'

            conv_out = T.nnet.conv.conv2d(X, self.W,
                                          border_mode=border_mode,
                                          subsample=self.subsample)
            if self.border_mode == 'same':
                shift_x = (self.nb_row - 1) // 2
                shift_y = (self.nb_col - 1) // 2
                conv_out = conv_out[:, :, shift_x:X.shape[2] + shift_x, shift_y:X.shape[3] + shift_y]

        return self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
开发者ID:chuckgu,项目名称:Alphabeta,代码行数:29,代码来源:Convolutional_Layer.py


示例10: discrim

 def discrim(X, w, w2, w3, wy):
     h = lrelu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2)))
     h2 = lrelu(batchnorm(dnn_conv(h, w2, subsample=(2, 2), border_mode=(2, 2))))
     h2 = T.flatten(h2, 2)
     h3 = lrelu(batchnorm(T.dot(h2, w3)))
     y = sigmoid(T.dot(h3, wy))
     return y
开发者ID:mehdidc,项目名称:dcgan,代码行数:7,代码来源:job.py


示例11: get_output

    def get_output(self, train=False):
        X = self.get_input(train)
        X = T.reshape(X, (X.shape[0], X.shape[1], X.shape[2], 1)).dimshuffle(0, 2, 1, 3)

        border_mode = self.border_mode
        if on_gpu() and dnn.dnn_available():
            if border_mode == 'same':
                assert(self.subsample_length == 1)
                pad_x = (self.filter_length - self.subsample_length) // 2
                conv_out = dnn.dnn_conv(img=X,
                                        kerns=self.W,
                                        border_mode=(pad_x, 0))
            else:
                conv_out = dnn.dnn_conv(img=X,
                                        kerns=self.W,
                                        border_mode=border_mode,
                                        subsample=self.subsample)
        else:
            if border_mode == 'same':
                assert(self.subsample_length == 1)
                border_mode = 'full'

            conv_out = T.nnet.conv.conv2d(X, self.W,
                                          border_mode=border_mode,
                                          subsample=self.subsample)
            if self.border_mode == 'same':
                shift_x = (self.filter_length - 1) // 2
                conv_out = conv_out[:, :, shift_x:X.shape[2] + shift_x, :]

        output = self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
        output = T.reshape(output, (output.shape[0], output.shape[1], output.shape[2])).dimshuffle(0, 2, 1)
        return output
开发者ID:Mofef,项目名称:keras,代码行数:32,代码来源:convolutional.py


示例12: domain_discrim

def domain_discrim( st, w1, w2, g2, b2, w3, g3, b3, w4, g4, b4, w5, g5, b5, w6 ):
    h1 = lrelu( dnn_conv( st, w1, subsample=( 2, 2 ), border_mode = ( 2, 2 ) ) )
    h2 = lrelu( batchnorm( dnn_conv( h1, w2, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g2, b = b2 ) )
    h3 = lrelu( batchnorm( dnn_conv( h2, w3, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g3, b = b3 ) )
    h4 = lrelu( batchnorm( dnn_conv( h3, w4, subsample = ( 2, 2 ), border_mode = ( 2, 2 ) ), g = g4, b = b4 ) )
    h5 = lrelu( batchnorm( dnn_conv( h4, w5, subsample = ( 1, 1 ), border_mode = ( 0, 0 ) ), g = g5, b = b5 ) )
    ydd = sigmoid( T.dot( T.flatten( h5, 2 ), w6 ) )
    return ydd
开发者ID:igotyooo,项目名称:dcgan,代码行数:8,代码来源:train_ced_mse_d_dd.py


示例13: discrim

def discrim(X, w, w2, g2, b2, w3, g3, b3, wy, wa):
    h0 = dropout(relu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2))), p=0.5)
    h1 = dropout(relu(batchnorm(dnn_conv(h0, w2, subsample=(2, 2), border_mode=(2, 2)), g=g2, b=b2)), p=0.5)
    h2 = dropout(relu(batchnorm(dnn_conv(h1, w3, subsample=(2, 2), border_mode=(2, 2)), g=g3, b=b3)), p=0.5)
    h2 = T.flatten(h2, 2)
    y = square(T.dot(h2, wy))
    y = T.dot(T.log(1+y), T.exp(wa))
    return y
开发者ID:taesupkim,项目名称:dcgan_code,代码行数:8,代码来源:train_pot_cifar10_16x16.py


示例14: feature_function

 def feature_function(input_data, is_train=True):
     h0 = relu(batchnorm(X=dnn_conv(input_data, conv_w0, subsample=(2, 2), border_mode=(2, 2)), g=bn_w0, b=bn_b0))
     h1 = relu(batchnorm(X=dnn_conv(h0,         conv_w1, subsample=(2, 2), border_mode=(2, 2)), g=bn_w1, b=bn_b1))
     h2 = relu(batchnorm(X=dnn_conv(h1,         conv_w2, subsample=(2, 2), border_mode=(2, 2)), g=bn_w2, b=bn_b2))
     h3 = relu(batchnorm(X=dnn_conv(h2,         conv_w3, subsample=(2, 2), border_mode=(2, 2)), g=bn_w3, b=bn_b3))
     h3 = T.flatten(h3, 2)
     f  = tanh(T.dot(h3, linear_w4)+linear_b4)
     return f
开发者ID:taesupkim,项目名称:dcgan_code,代码行数:8,代码来源:energy_rbm_svhn_0.py


示例15: discrim

def discrim(X, w, w2, g2, b2, w3, g3, b3, w4, g4, b4, wy):
    h = lrelu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2)))
    h2 = lrelu(batchnorm(dnn_conv(h, w2, subsample=(2, 2), border_mode=(2, 2)), g=g2, b=b2))
    h3 = lrelu(batchnorm(dnn_conv(h2, w3, subsample=(2, 2), border_mode=(2, 2)), g=g3, b=b3))
    h4 = lrelu(batchnorm(dnn_conv(h3, w4, subsample=(2, 2), border_mode=(2, 2)), g=g4, b=b4))
    h4 = T.flatten(h4, 2)
    y = sigmoid(T.dot(h4, wy))
    return y
开发者ID:igotyooo,项目名称:dcgan,代码行数:8,代码来源:train_uncond_dcgan.py


示例16: feature_function

 def feature_function(input_data, is_train=True):
     # layer 0 (conv)
     h0 = relu(dnn_conv(input_data, conv_w0, subsample=(2, 2), border_mode=(2, 2))+conv_b0.dimshuffle('x', 0, 'x', 'x'))
     # layer 1 (conv)
     h1 = relu(dnn_conv(        h0, conv_w1, subsample=(2, 2), border_mode=(2, 2))+conv_b1.dimshuffle('x', 0, 'x', 'x'))
     # layer 2 (conv)
     h2 = dnn_conv(        h1, conv_w2, subsample=(2, 2), border_mode=(2, 2))+conv_b2.dimshuffle('x', 0, 'x', 'x')
     feature = T.flatten(h2, 2)
     return feature
开发者ID:taesupkim,项目名称:dcgan_code,代码行数:9,代码来源:energy_rbm_cifar10_0.py


示例17: conv2d

def conv2d(x, kernel, strides=(1, 1), border_mode='valid', dim_ordering='th'):
    '''
    Run on cuDNN if available.
    border_mode: string, "same" or "valid".
    '''
    if dim_ordering not in {'th', 'tf'}:
        raise Exception('Unknown dim_ordering ' + str(dim_ordering))

    if dim_ordering == 'tf':
        # TF uses the last dimension as channel dimension,
        # instead of the 2nd one.
        # TH input shape: (samples, input_depth, rows, cols)
        # TF input shape: (samples, rows, cols, input_depth)
        # TH kernel shape: (depth, input_depth, rows, cols)
        # TF kernel shape: (rows, cols, input_depth, depth)
        x = x.dimshuffle((0, 3, 1, 2))
        kernel = kernel.dimshuffle((3, 2, 0, 1))

    if _on_gpu() and dnn.dnn_available():
        if border_mode == 'same':
            assert(strides == (1, 1))
            np_kernel = kernel.eval()
            pad_x = (np_kernel.shape[2] - strides[0]) // 2
            pad_y = (np_kernel.shape[3] - strides[1]) // 2
            conv_out = dnn.dnn_conv(img=x,
                                    kerns=kernel,
                                    border_mode=(pad_x, pad_y))
        else:
            conv_out = dnn.dnn_conv(img=x,
                                    kerns=kernel,
                                    border_mode=border_mode,
                                    subsample=strides)
    else:
        if border_mode == 'same':
            th_border_mode = 'full'
            assert(strides == (1, 1))
        elif border_mode == 'valid':
            th_border_mode = 'valid'
        else:
            raise Exception('Border mode not supported: ' + str(border_mode))

        conv_out = T.nnet.conv.conv2d(x, kernel,
                                      border_mode=th_border_mode,
                                      subsample=strides)
        if border_mode == 'same':
            shift_x = (kernel.shape[2] - 1) // 2
            shift_y = (kernel.shape[3] - 1) // 2
            conv_out = conv_out[:, :,
                                shift_x:x.shape[2] + shift_x,
                                shift_y:x.shape[3] + shift_y]
    if dim_ordering == 'tf':
        conv_out = conv_out.dimshuffle((0, 2, 3, 1))
    return conv_out
开发者ID:sfwlily,项目名称:keras,代码行数:53,代码来源:theano_backend.py


示例18: discrim

def discrim(X, Y, w, w2, w3, wy):
    yb = Y.dimshuffle(0, 1, 'x', 'x')
    X = conv_cond_concat(X, yb)
    h = lrelu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2)))
    h = conv_cond_concat(h, yb)
    h2 = lrelu(batchnorm(dnn_conv(h, w2, subsample=(2, 2), border_mode=(2, 2))))
    h2 = T.flatten(h2, 2)
    h2 = T.concatenate([h2, Y], axis=1)
    h3 = lrelu(batchnorm(T.dot(h2, w3)))
    h3 = T.concatenate([h3, Y], axis=1)
    y = sigmoid(T.dot(h3, wy))
    return y
开发者ID:aaajiao,项目名称:typeface,代码行数:12,代码来源:generate.py


示例19: feature_function

 def feature_function(input_data, is_train=True):
     # layer 0 (conv)
     h0 = relu(dnn_conv(input_data, conv_w0, subsample=(2, 2), border_mode=(2, 2))+conv_b0.dimshuffle('x', 0, 'x', 'x'))
     # layer 1 (conv)
     h1 = relu(dnn_conv(        h0, conv_w1, subsample=(2, 2), border_mode=(2, 2))+conv_b1.dimshuffle('x', 0, 'x', 'x'))
     # layer 2 (conv)
     h2 = relu(dnn_conv(        h1, conv_w2, subsample=(2, 2), border_mode=(2, 2))+conv_b2.dimshuffle('x', 0, 'x', 'x'))
     # layer 3 (conv)
     h3 = relu(dnn_conv(        h2, conv_w3, subsample=(2, 2), border_mode=(2, 2))+conv_b3.dimshuffle('x', 0, 'x', 'x'))
     h3 = T.flatten(h3, 2)
     f  = tanh(T.dot(h3, linear_w4)+linear_b4)
     return f
开发者ID:taesupkim,项目名称:dcgan_code,代码行数:12,代码来源:energy_rbm_face_1.py


示例20: model

def model(X,
    h2_u, h3_u,
    h2_s, h3_s,
    w, w2, g2, b2, w3, g3, b3, wy
    ):
    h = lrelu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2)))
    h2 = lrelu(batchnorm(dnn_conv(h, w2, subsample=(2, 2), border_mode=(2, 2)), g=g2, b=b2, u=h2_u, s=h2_s))
    h3 = lrelu(batchnorm(dnn_conv(h2, w3, subsample=(2, 2), border_mode=(2, 2)), g=g3, b=b3, u=h3_u, s=h3_s))
    h = T.flatten(dnn_pool(h, (4, 4), (4, 4), mode='max'), 2)
    h2 = T.flatten(dnn_pool(h2, (2, 2), (2, 2), mode='max'), 2)
    h3 = T.flatten(dnn_pool(h3, (1, 1), (1, 1), mode='max'), 2)
    f = T.concatenate([h, h2, h3], axis=1)
    return [f]
开发者ID:10sun,项目名称:dcgan_code,代码行数:13,代码来源:svhn_semisup_analysis.py



注:本文中的theano.sandbox.cuda.dnn.dnn_conv函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python dnn.dnn_pool函数代码示例发布时间:2022-05-27
下一篇:
Python dnn.dnn_available函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap