• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensor.dmatrix函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.tensor.dmatrix函数的典型用法代码示例。如果您正苦于以下问题:Python dmatrix函数的具体用法?Python dmatrix怎么用?Python dmatrix使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了dmatrix函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: get_hidden_layers

def get_hidden_layers(dbn, layers):
    print "... getting hidden layers"
    test_data, test_label = get_test_set()
    index = T.lscalar()
    hidden_features = []
    total_layers = len(layers)

    w = T.dmatrix("w")
    t = T.dmatrix("t")
    b = T.vector("b")
    z = T.dot(w,t)
    # function for testing model
    test_f = theano.function([w,t], z)

    #loop through each layer
    for i in xrange(total_layers):
        weights = layers[i][0]
        bias = layers[i][1]

        if i == 0:
            hidden_features.append( test_f(test_data,weights) )
        else:
            #use previous layer
            prev_layer = hidden_features[i-1]
            hidden_features.append( test_f(prev_layer,weights) )

    # apply sigmoid
    with open('hidden.pkl', 'w') as f:
        cPickle.dump(hidden_features, f)
开发者ID:Delvison,项目名称:Digit-Recognizer,代码行数:29,代码来源:pretrain.py


示例2: test_mixin_composition

def test_mixin_composition():
    # Check composed expressions as parameters
    a = theano.shared(0.0)
    b = theano.shared(-1.0)
    mu = a + b - 1.0
    sigma = T.abs_(a * b)
    p = Normal(mu=mu, sigma=sigma)
    assert a in p.parameters_
    assert b in p.parameters_

    # Compose parameters with observed variables
    a = theano.shared(1.0)
    b = theano.shared(0.0)
    y = T.dmatrix(name="y")
    p = Normal(mu=a * y + b)
    assert len(p.parameters_) == 3
    assert a in p.parameters_
    assert b in p.parameters_
    assert p.sigma in p.parameters_
    assert p.mu not in p.parameters_
    assert len(p.observeds_) == 1
    assert y in p.observeds_

    # Check signatures
    data_X = np.random.rand(10, 1)
    data_y = np.random.rand(10, 1)
    p.pdf(X=data_X, y=data_y)
    p.cdf(X=data_X, y=data_y)
    p.rvs(10, y=data_y)

    # Check error
    a = theano.shared(1.0)
    b = theano.shared(0.0)
    y = T.dmatrix()  # y must be named
    assert_raises(ValueError, Normal, mu=a * y + b)
开发者ID:cranmer,项目名称:carl,代码行数:35,代码来源:test_base.py


示例3: neural_net

    def neural_net(
            x=T.dmatrix(),    #our points, one point per row
            y=T.dmatrix(),    #our targets
            w=T.dmatrix(),    #first layer weights
            b=T.dvector(),    #first layer bias
            v=T.dmatrix(),    #second layer weights
            c=T.dvector(),    #second layer bias
            step=T.dscalar(), #step size for gradient descent
            l2_coef=T.dscalar() #l2 regularization amount
            ):
        """Idea A:
        """
        hid = T.tanh(T.dot(x, w) + b)
        pred = T.dot(hid, v) + c
        sse = T.sum((pred - y) * (pred - y))
        w_l2 = T.sum(T.sum(w*w))
        v_l2 = T.sum(T.sum(v*v))
        loss = sse + l2_coef * (w_l2 + v_l2)

        def symbolic_params(cls):
            return [cls.w, cls.b, cls.v, cls.c]

        def update(cls, x, y, **kwargs):
            params = cls.symbolic_params()
            gp = T.grad(cls.loss, params)
            return [], [In(p, update=p - cls.step * g) for p,g in zip(params, gp)]

        def predict(cls, x, **kwargs):
            return cls.pred, []

        return locals()
开发者ID:olivierverdier,项目名称:Theano,代码行数:31,代码来源:symbolic_module.py


示例4: createMLP

def createMLP(layers, s):
    l_in = lasagne.layers.InputLayer(shape=(None, s))
    prev_layer = l_in
    Ws = []
    for layer in layers:
        enc = lasagne.layers.DenseLayer(prev_layer, num_units=layer, nonlinearity=rectify, W=init.Uniform(0.01))
        Ws += [enc.W]
        drop = lasagne.layers.DropoutLayer(enc, p=0.5)
        prev_layer = drop
    idx = 1
    # creating mask
    mask = lasagne.layers.InputLayer(shape=(None, layers[-1]))
    prev_layer = lasagne.layers.ElemwiseMergeLayer([prev_layer, mask], merge_function=T.mul)
    for layer in layers[-2::-1]:
        print layer
        dec = lasagne.layers.DenseLayer(prev_layer, num_units=layer, nonlinearity=rectify, W=Ws[-idx].T)
        idx += 1
        drop = lasagne.layers.DropoutLayer(dec, p=0.0)
        prev_layer = drop
    model = lasagne.layers.DenseLayer(prev_layer, num_units=s, nonlinearity=identity, W=Ws[0].T)

    x_sym = T.dmatrix()
    mask_sym = T.dmatrix()
    all_params = lasagne.layers.get_all_params(model)
    output = lasagne.layers.get_output(model, inputs={l_in: x_sym, mask: mask_sym})
    loss_eval = lasagne.objectives.squared_error(output, x_sym).sum()
    loss_eval /= (2.*batch_size)
    updates = lasagne.updates.adam(loss_eval, all_params)

    return l_in, mask, model, theano.function([x_sym, mask_sym], loss_eval, updates=updates)
开发者ID:Enny1991,项目名称:MasterThesis,代码行数:30,代码来源:multi_connected_AE.py


示例5: test_infer_shape

 def test_infer_shape(self):
     admat = dmatrix()
     bdmat = dmatrix()
     admat_val = numpy.random.rand(3, 4)
     bdmat_val = numpy.random.rand(3, 4)
     self._compile_and_check([admat, bdmat], [SoftmaxGrad()(admat, bdmat)],
                         [admat_val, bdmat_val], SoftmaxGrad)
开发者ID:srifai,项目名称:Theano,代码行数:7,代码来源:test_nnet.py


示例6: LQLEP_wBarrier

def LQLEP_wBarrier( LQLEP    = Th.dscalar(), ldet = Th.dscalar(), v1 = Th.dvector(), 
                    N_spike  = Th.dscalar(), ImM  = Th.dmatrix(),  U = Th.dmatrix(),
                    V2       = Th.dvector(),    u = Th.dvector(),  C = Th.dmatrix(),
                    **other):
    '''
    The actual Linear-Quadratic-Exponential-Poisson log-likelihood, 
    as a function of theta and M, 
    with a barrier on the log-det term and a prior.
    '''
    sq_nonlinearity = V2**2.*Th.sum( Th.dot(U,C)*U, axis=[1])  #Th.sum(U**2,axis=[1])
    nonlinearity = V2 * Th.sqrt( Th.sum( Th.dot(U,C)*U, axis=[1])) #Th.sum(U**2,axis=[1]) )
    if other.has_key('uc'):
        LQLEP_wPrior = LQLEP + 0.5 * N_spike * ( 1./(ldet+250.)**2. \
                     - 0.000001 * Th.sum(Th.log(1.-4*sq_nonlinearity))) \
                     + 10. * Th.sum( (u[2:]+u[:-2]-2*u[1:-1])**2. ) \
                     + 10. * Th.sum( (other['uc'][2:]+other['uc'][:-2]-2*other['uc'][1:-1])**2. ) \
                     + 0.000000001 * Th.sum( v1**2. )
#                     + 100. * Th.sum( v1 )
    #                 + 0.0001*Th.sum( V2**2 )
    else:
        LQLEP_wPrior = LQLEP + 0.5 * N_spike * ( 1./(ldet+250.)**2. \
                     - 0.000001 * Th.sum(Th.log(1.-4*sq_nonlinearity))) \
                     + 10. * Th.sum( (u[2:]+u[:-2]-2*u[1:-1])**2. ) \
                     + 0.000000001 * Th.sum( v1**2. )
#                     + 100. * Th.sum( v1 )
    #                 + 0.0001*Th.sum( V2**2 )
    eigsImM,barrier = eig( ImM )
    barrier   = 1-(Th.sum(Th.log(eigsImM))>-250) * \
                  (Th.min(eigsImM)>0) * (Th.max(4*sq_nonlinearity)<1)
    other.update(locals())
    return named( **other )
开发者ID:kolia,项目名称:subunits,代码行数:31,代码来源:QuadPoiss.py


示例7: __init__

    def __init__(self,N,Nsub,NRGC,prior=1):
        self.N     = N
        self.Nsub  = Nsub
        self.NRGC  = NRGC
        U   = Th.dmatrix()                   # SYMBOLIC variables       #
        V1  = Th.dvector()                                              #
        V2  = Th.dvector()                                              #
        STA = Th.dvector()                                              #
        STC = Th.dmatrix()                                              #
        theta = Th.dot( U.T , V1 )                                      #
        UV1U  = Th.dot( U , theta )                                     #
        UV1V2U= Th.dot( V1 * U.T , (V2 * U.T).T )                       #
        posterior  = -0.5 * Th.sum( V1 * V2 * U.T*U.T ) \
                     -0.25* Th.sum( UV1V2U.T * UV1V2U ) \
                     -0.5 * Th.sum( UV1U * UV1U * UV1U *V2 *V2 * V1 ) \
                     -0.5 * Th.sum( UV1U * UV1U * V2 * V1 ) \
                     -0.5 * Th.sum( theta * theta ) \
                     + Th.dot( theta.T , STA ) \
                     + Th.sum( Th.dot( V1* V2*U.T , U ) \
                     * (STC + STA.T*STA) )
        dpost_dU  = Th.grad( cost           = posterior ,               #
                             wrt            = U         )               #
        dpost_dV1 = Th.grad( cost           = posterior ,               #
                             wrt            = V1        )               #
        dpost_dV2 = Th.grad( cost           = posterior ,               #
                             wrt            = V2        )               #
#        self.posterior  = function( [U,V2,V1,STA,STC],  UV1V2U)      #
        self.posterior  = function( [U,V2,V1,STA,STC],  posterior)      #
        self.dpost_dU   = function( [U,V2,V1,STA,STC], dpost_dU  )      #
        self.dpost_dV1  = function( [U,V2,V1,STA,STC], dpost_dV1 )      #
        self.dpost_dV2  = function( [U,V2,V1,STA,STC], dpost_dV2 )      #
开发者ID:kolia,项目名称:subunits,代码行数:31,代码来源:LQuadLExP_taylor.py


示例8: theano_setup

    def theano_setup(self):
    
        W = T.dmatrix('W')
        b = T.dvector('b')
        c = T.dvector('c')
        x = T.dmatrix('x')
    
        s = T.dot(x, W) + c
        # h = 1 / (1 + T.exp(-s))
        # h = T.nnet.sigmoid(s)
        h = T.tanh(s)
        # r = T.dot(h,W.T) + b
        # r = theano.printing.Print("r=")(2*T.tanh(T.dot(h,W.T) + b))
        ract = T.dot(h,W.T) + b
        r = self.output_scaling_factor * T.tanh(ract)
    
        #g  = function([W,b,c,x], h)
        #f  = function([W,b,c,h], r)
        #fg = function([W,b,c,x], r)
    
        # Another variable to be able to call a function
        # with a noisy x and compare it to a reference x.
        y = T.dmatrix('y')

        all_losses = ((r - y)**2)
        loss = T.sum(all_losses)
        #loss = ((r - y)**2).sum()
        
        self.theano_encode_decode = function([W,b,c,x], r)
        self.theano_all_losses = function([W,b,c,x,y], [all_losses, T.abs_(s), T.abs_(ract)])
        self.theano_gradients = function([W,b,c,x,y], [T.grad(loss, W), T.grad(loss, b), T.grad(loss, c)])
开发者ID:gyom,项目名称:cae.py,代码行数:31,代码来源:dae_theano.py


示例9: LR

def LR(x=None, y=None, v=None, c=None, l2_coef=None):
    # our points, one point per row
    if x is None:
        x = T.dmatrix()
    # targets , one per row
    if y is None:
        y = T.dmatrix()
    # first layer weights
    if v is None:
        v = T.dmatrix()
    # first layer biases
    if c is None:
        c = T.dvector()

    if l2_coef is None:
        l2_coef = T.dscalar()

    pred = T.dot(x, v) + c
    sse = T.sum((pred - y) * (pred - y))
    mse = sse / T.shape(y)[0]
    v_l2 = T.sum(T.sum(v*v))
    loss = mse + l2_coef * v_l2

    @symbolicmethod
    def params():
        return [v, c]

    return locals()
开发者ID:Jackwangyang,项目名称:Theano,代码行数:28,代码来源:symbolic_module.py


示例10: make_theano_functions

    def make_theano_functions(self) :
        x  = T.dmatrix('x')
        h1 = T.dot(x, self.w1.T) + self.b1
        a1 = 1. / (1. + T.exp(-h1))
        h2 = T.dot(a1,self.w2.T) + self.b2
        a2 = T.nnet.softmax(h2)
        
        f = theano.function([x], a2)

        y  = T.dmatrix('y')
        loss = T.mean(T.sum(y*-T.log(a2), axis=1))

        gradw1 = T.grad(loss, self.w1)
        gradw2 = T.grad(loss, self.w2)
        gradb1 = T.grad(loss, self.b1)
        gradb2 = T.grad(loss, self.b2)

        gradf = theano.function(
                [x, y],
                [loss, a2],
                updates = [
                    (self.w1, self.w1-self.lr*gradw1),
                    (self.w2, self.w2-self.lr*gradw2),
                    (self.b1, self.b1-self.lr*gradb1),
                    (self.b2, self.b2-self.lr*gradb2)
                    ]
                )

        return f, gradf
开发者ID:olimastro,项目名称:ift6266,代码行数:29,代码来源:mlp.py


示例11: train

 def train( self, train_set, batch_size = 100 ):
 		for i in xrange(len(self.layers) - 1):
 				train_data = T.dmatrix('train_data')
 				x = T.dmatrix('x')
 				rng = numpy.random.RandomState(123)
 				theano_rng = RandomStreams(rng.randint(2 ** 10))
 				da = dA(
     				numpy_rng=rng,
     				theano_rng=theano_rng,
     				input=x,
     				n_visible=self.layers[i],
     				n_hidden=self.layers[i+1]
 				)
 				cost, updates = da.get_cost_updates(
     				corruption_level=0.,
     				learning_rate=0.4
 				)
 				train_da = theano.function(
 						[train_data],
     				cost,
     				updates=updates,
     				givens={
         				x: train_data
     				}
     		)
     		
 				for epoch in xrange(200):
 						train_cost = []
 						for index in xrange(len(train_set)/batch_size):
 								train_cost.append(train_da(numpy.asarray(train_set[index * batch_size: (index + 1) * batch_size])))
 						print 'Training 1st ae epoch %d, cost ' % epoch, numpy.mean(train_cost)
 				train_set = da.get_hidden_values(train_set).eval()
 				self.dAs.append(da)
开发者ID:fwu8,项目名称:gong-kuang,代码行数:33,代码来源:mSdA.py


示例12: NNet

def NNet(x=None, y=None, n_hid_layers=2):
    # our points, one point per row
    if x is None:
        x = T.dmatrix()
    # targets , one per row
    if y is None:
        y = T.dmatrix()
    layers = []
    _x = x
    for i in xrange(n_hid_layers):
        layers.append(Layer(x=_x))
        _x = layers[-1].y
    classif = LR(x=_x)

    @symbolicmethod
    def params():
        rval = classif.params()
        for l in layers:
            rval.extend(l.params())
        print([id(r) for r in rval])
        return rval

    if 0:
        @symbolicmethod
        def update(x, y):
            pp = params()
            gp = T.grad(classif.loss, pp)
            return dict((p, p - 0.01*g) for p, g in zip(pp, gp))

    return locals()
开发者ID:Ambier,项目名称:Theano,代码行数:30,代码来源:symbolic_module.py


示例13: __init__

    def __init__(self, model, type_model):
        super(LatentTypeWithTuningCurve, self).__init__(model, type_model)

        # Also initialize the tuning curves
        self.mu = self.type_model['mu']
        self.sigma = self.type_model['sigma']

        # Create a basis for the stimulus response
        self.spatial_basis = create_basis(self.type_model['spatial_basis'])
        self.spatial_shape = self.type_model['spatial_shape']
        self.spatial_ndim = len(self.spatial_shape)
        (_,Bx) = self.spatial_basis.shape

        self.temporal_basis = create_basis(self.type_model['temporal_basis'])
        (_,Bt) = self.temporal_basis.shape

        # Save the filter sizes
        self.Bx = Bx
        self.Bt = Bt

        # Initialize interpolated bases
        self.initialize_basis()

        # Initialize RxBx and RxBt matrices for the per-type tuning curves
        self.w_x = T.dmatrix('w_x')
        self.w_t = T.dmatrix('w_t')

        # Create function handles for the stimulus responses
        self.stim_resp_t = T.dot(self.temporal_basis, self.w_t)
        self.stim_resp_x = T.dot(self.spatial_basis, self.w_x)

        # Add the probability of these tuning curves to the log probability
        self.log_p += -0.5/self.sigma**2 *T.sum((self.w_x-self.mu)**2) + \
                      -0.5/self.sigma**2 *T.sum((self.w_t-self.mu)**2)
开发者ID:remtcs,项目名称:theano_pyglm,代码行数:34,代码来源:latent.py


示例14: test_free_energy

    def test_free_energy(self):
        self.setUpAssociativeRBM()
        rbm = self.rbm
        w = rbm.W.get_value(borrow=True)
        u = rbm.U.get_value(borrow=True)
        v = T.dmatrix("v")
        v2 = T.dmatrix("v2")
        v_bias = rbm.v_bias.eval()
        v_bias2 = rbm.v_bias2.eval()
        h_bias = rbm.h_bias.eval()

        res = rbm.free_energy(v, v2)
        f = theano.function([v, v2], [res])
        theano_res = f(self.x, self.y)

        # Test for case only v1 is present
        n1 = - np.dot(self.x, v_bias)
        n2 = - np.dot(self.y, v_bias2)
        n3 = - np.sum(np.log(1 + np.exp(h_bias + np.dot(self.x, w) + np.dot(self.y, u))))
        np_res = n1 + n2 + n3

        print theano_res
        print np_res

        diff = theano_res == np_res
        self.assertTrue(np.all(diff))
开发者ID:LeonBai,项目名称:AssociationLearning,代码行数:26,代码来源:test_associative_rbm.py


示例15: theano_sed

def theano_sed():
    """
    Function to create a theano function to compute the euclidian distances efficiently
    Returns:
        theano.compile.function_module.Function: Compiled function

    """

    theano.config.compute_test_value = "ignore"

    # Set symbolic variable as matrix (with the XYZ coords)
    coord_T_x1 = T.dmatrix()
    coord_T_x2 = T.dmatrix()

    # Euclidian distances function
    def squared_euclidean_distances(x_1, x_2):
        sqd = T.sqrt(T.maximum(
            (x_1 ** 2).sum(1).reshape((x_1.shape[0], 1)) +
            (x_2 ** 2).sum(1).reshape((1, x_2.shape[0])) -
            2 * x_1.dot(x_2.T), 0
        ))
        return sqd

    # Compiling function
    f = theano.function([coord_T_x1, coord_T_x2],
                        squared_euclidean_distances(coord_T_x1, coord_T_x2),
                        allow_input_downcast=False)
    return f
开发者ID:chinasio,项目名称:gempy,代码行数:28,代码来源:coKriging.py


示例16: asho_test

def asho_test():
	import theano.tensor as T
	x = T.dmatrix('x')
	w = T.dmatrix('w')
	y = T.dot(x,w)

	f = function([x,w],y)	
开发者ID:ehsankddm,项目名称:thesis,代码行数:7,代码来源:test.py


示例17: UV12_input

def UV12_input(V1=Th.dmatrix(),
               STAs=Th.dmatrix(),
               STCs=Th.dtensor3(),
               N_spikes=Th.dvector(),
               **other):
    other.update(locals())
    return named(**other)
开发者ID:kolia,项目名称:subunits,代码行数:7,代码来源:QuadPoiss.py


示例18: __init__

  def __init__(self,
      np_rng             = np.random.RandomState(1234),
      theano_rng         = None,
      n_in               = 424 * 424 * 3,
      n_out              = 37, # galaxy classes
      hidden_layer_sizes = [500, 500],
      corruption_levels  = [0.1, 0.2]):

    self.np_rng = np_rng
    if not theano_rng: theano_rng = RandomStreams(np_rng.randint(2 ** 30))
    self.n_in = n_in
    self.n_out = n_out
    self.hidden_layer_sizes = hidden_layer_sizes
    self.corruption_levels = corruption_levels

    self.sigmoid_layers = []
    self.da_layers = []
    self.params = []
    self.n_layers = len(hidden_layer_sizes)

    assert self.n_layers > 0, 'must have some hidden layers'

    self.x = T.dmatrix('x')
    self.y = T.dmatrix('y')

    self.build_layers()
开发者ID:zacstewart,项目名称:kaggle_galaxy_zoo,代码行数:26,代码来源:autoencoder.py


示例19: __init__

    def __init__(self, beta=0.1, n_in=1, n_out=1):
        self.__beta = beta
        self.__x = T.dmatrix('x')
        self.__y = T.dmatrix('y')
        self.__n_in = n_in
        self.__n_out = n_out

        self.__clf_model = _LogisticRegressionModel(d_input=self.__x, 
            n_in=self.__n_in,
            n_out=self.__n_out)
        self.__cost = self.__clf_model.negative_log_likelihood(self.__y)

        # compute the gradient of cost with respect to theta = (W,b)
        self.__g_W = T.grad(cost=self.__cost, wrt=self.__clf_model.W)
        self.__g_b = T.grad(cost=self.__cost, wrt=self.__clf_model.b)

        # start-snippet-3
        # specify how to update the parameters of the model as a list of
        # (variable, update expression) pairs.
        self.__updates = [(self.__clf_model.W, 
            self.__clf_model.W - self.__beta * self.__g_W),
               (self.__clf_model.b, self.__clf_model.b 
                - self.__beta * self.__g_b)]

        self.__train_model = theano.function(
            inputs=[self.__x, self.__y],
            outputs=[self.__cost, self.__clf_model.y_pred, self.__g_W, self.__g_b],
            updates=self.__updates,
        )

        self.__prediction_model = theano.function(
            inputs=[self.__clf_model.input],
            outputs=self.__clf_model.y_pred
        )
开发者ID:smukherj1,项目名称:theano-fun,代码行数:34,代码来源:LogisticRegression.py


示例20: test_pickle

def test_pickle():
    """Test that a module can be pickled"""
    M = Module()
    M.x = (T.dmatrix())
    M.y = (T.dmatrix())
    a = T.dmatrix()
    M.f = Method([a], a + M.x + M.y)
    M.g = Method([a], a * M.x * M.y)

    mode = get_mode()
    m = M.make(x=numpy.zeros((4,5)), y=numpy.ones((2,3)), mode=mode)

    m_dup = cPickle.loads(cPickle.dumps(m, protocol=-1))

    assert numpy.all(m.x == m_dup.x) and numpy.all(m.y == m_dup.y)

    m_dup.x[0,0] = 3.142
    assert m_dup.f.input_storage[1].data[0,0] == 3.142
    assert m.x[0,0] == 0.0 #ensure that m is not aliased to m_dup

    #check that the unpickled version has the same argument/property aliasing
    assert m_dup.x is m_dup.f.input_storage[1].data
    assert m_dup.y is m_dup.f.input_storage[2].data
    assert m_dup.x is m_dup.g.input_storage[1].data
    assert m_dup.y is m_dup.g.input_storage[2].data
开发者ID:SinaHonari,项目名称:Theano,代码行数:25,代码来源:test_module.py



注:本文中的theano.tensor.dmatrix函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensor.dot函数代码示例发布时间:2022-05-27
下一篇:
Python tensor.dmatrices函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap