• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python scipy.diag函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中scipy.diag函数的典型用法代码示例。如果您正苦于以下问题:Python diag函数的具体用法?Python diag怎么用?Python diag使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了diag函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: orlancz

def orlancz(A, v0, k):
    """
    full orthogonalized Lanczos algorithm (Krylov approximation of a matrix)
        input: 
            A: matrix to approximate
            v0: initial vector (should be in matrix form) 
            k: number of Krylov steps 
        output: 
            V: matrix (large, N*k) containing the orthogonal vectors
            H: matrix (small, k*k) containing the Krylov approximation of A
            
    Author: Vasile Gradinaru, 21.10.2008 (Zuerich)
    """
    print 'FULL ORTHOGONAL LANCZOS METHOD !'
    from numpy import finfo, sqrt
    reps = 10*sqrt(finfo(float).eps)
    V = mat( v0.copy() / norm(v0) )
    alpha =  zeros(k)  
    beta =  zeros(k+1)  
    for m in xrange(k):
        #vt = A * V[ :, m]
        vt = multMv( A , V[ :, m])
        if m > 0: vt -= beta[m] * V[:, m-1]
        alpha[m] = (V[:, m].H * vt )[0, 0]
        vt -= alpha[m] * V[:, m]
        beta[m+1] = norm(vt)
        # reortogonalization
        h1 = multMv(V.H, vt)
        vt -= multMv(V, h1)
        if norm(h1) > reps: vt -= multMv(V, (multMv(V.H,vt)))
        #
        V =  hstack( (V, vt.copy() / beta[m+1] ) ) 
    rbeta = beta[1:-1]    
    H = diag(alpha) + diag(rbeta, 1) + diag(rbeta, -1)
    return V,  H
开发者ID:raoulbq,项目名称:Arnoldi_PyCpp,代码行数:35,代码来源:pyarnoldi.py


示例2: lanczos

def lanczos(A, v0, k):
    """
    Lanczos algorithm (Krylov approximation of a matrix)
        input: 
            A: matrix to approximate
            v0: initial vector (should be in matrix form) 
            k: number of Krylov steps 
        output: 
            V: matrix (large, N*k) containing the orthogonal vectors
            H: matrix (small, k*k) containing the Krylov approximation of A
            
    Author: Vasile Gradinaru, 14.12.2007 (Rennes)
    """
    print 'LANCZOS METHOD !'
    V = mat( v0.copy() / norm(v0) )
    alpha =  zeros(k)  
    beta =  zeros(k+1)  
    for m in xrange(k):
        vt = multMv( A ,  V[ :, m])
        #vt = A * V[ :, m]
        if m > 0: vt -= beta[m] * V[:, m-1]
        alpha[m] = (V[:, m].H * vt )[0, 0]
        vt -= alpha[m] * V[:, m]
        beta[m+1] = norm(vt)
        V =  hstack( (V, vt.copy() / beta[m+1] ) ) 
    rbeta = beta[1:-1]    
    H = diag(alpha) + diag(rbeta, 1) + diag(rbeta, -1)
    return V,  H
开发者ID:raoulbq,项目名称:Arnoldi_PyCpp,代码行数:28,代码来源:pyarnoldi.py


示例3: LMLdebug

    def LMLdebug(self):
        """
        LML function for debug
        """
        assert self.N*self.P<5000, 'gp2kronSum:: N*P>=5000'

        y = SP.reshape(self.Y,(self.N*self.P), order='F') 
        V = SP.kron(SP.eye(self.P),self.F)

        XX = SP.dot(self.Xr,self.Xr.T)
        K  = SP.kron(self.Cr.K(),XX)
        K += SP.kron(self.Cn.K()+self.offset*SP.eye(self.P),SP.eye(self.N))

        # inverse of K
        cholK = LA.cholesky(K)
        Ki = LA.cho_solve((cholK,False),SP.eye(self.N*self.P))

        # Areml and inverse
        Areml = SP.dot(V.T,SP.dot(Ki,V))
        cholAreml = LA.cholesky(Areml)
        Areml_i = LA.cho_solve((cholAreml,False),SP.eye(self.K*self.P))

        # effect sizes and z
        b = SP.dot(Areml_i,SP.dot(V.T,SP.dot(Ki,y)))
        z = y-SP.dot(V,b)
        Kiz = SP.dot(Ki,z)

        # lml
        lml  = y.shape[0]*SP.log(2*SP.pi)
        lml += 2*SP.log(SP.diag(cholK)).sum()
        lml += 2*SP.log(SP.diag(cholAreml)).sum()
        lml += SP.dot(z,Kiz)
        lml *= 0.5

        return lml
开发者ID:PMBio,项目名称:mtSet,代码行数:35,代码来源:gp2kronSumLR.py


示例4: __init__

    def __init__(self,data,cov_matrix=False,loc=None):
        """Parameters
        ----------
        data : array of data, shape=(number points,number dim)
               If cov_matrix is True then data is the covariance matrix (see below)

        Keywords
        --------
        cov_matrix : bool (optional)
                     If True data is treated as a covariance matrix with shape=(number dim, number dim)
        loc        : the mean of the data if a covarinace matrix is given, shape=(number dim)
        """
        if cov_matrix:
            self.dim=data.shape[0]
            self.n=None
            self.data_t=None
            self.mu=loc
            self.evec,eval,V=sl.svd(data,full_matrices=False)
            self.sigma=sqrt(eval)
            self.Sigma=diag(1./self.sigma)
            self.B=dot(self.evec,self.Sigma)
            self.Binv=sl.inv(self.B)
        else:
            self.n,self.dim=data.shape #the shape of input data
            self.mu=data.mean(axis=0) #the mean of the data
            self.data_t=data-self.mu #remove the mean
            self.evec,eval,V=sl.svd(self.data_t.T,full_matrices=False) #get the eigenvectors (axes of the ellipsoid)
            data_p=dot(self.data_t,self.evec) #project the data onto the eigenvectors
            self.sigma=data_p.std(axis=0) #get the spread of the distribution (the axis ratos for the ellipsoid)
            self.Sigma=diag(1./self.sigma) #the eigenvalue matrix for the ellipsoid equation
            self.B=dot(self.evec,self.Sigma) #used in the ellipsoid equation
            self.Binv=sl.inv(self.B) #also useful to have around
开发者ID:cmp346,项目名称:densityplot,代码行数:32,代码来源:error_ellipse.py


示例5: newEpisode

    def newEpisode(self):
        if self.learning:
            params = ravel(self.explorationlayer.module.params)
            target = ravel(sum(self.history.getSequence(self.history.getNumSequences()-1)[2]) / 500)
        
            if target != 0.0:
                self.gp.addSample(params, target)
                if len(self.gp.trainx) > 20:
                    self.gp.trainx = self.gp.trainx[-20:, :]
                    self.gp.trainy = self.gp.trainy[-20:]
                    self.gp.noise = self.gp.noise[-20:]
                    
                self.gp._calculate()
                        
                # get new parameters where mean was highest
                max_cov = diag(self.gp.pred_cov).max()
                indices = where(diag(self.gp.pred_cov) == max_cov)[0]
                pick = indices[random.randint(len(indices))]
                new_param = self.gp.testx[pick]
            
                # check if that one exists already in gp training set
                if len(where(self.gp.trainx == new_param)[0]) > 0:
                    # add some normal noise to it
                    new_param += random.normal(0, 1, len(new_param))

                self.explorationlayer.module._setParameters(new_param)

            else:
                self.explorationlayer.drawRandomWeights()
        
        # don't call StateDependentAgent.newEpisode() because it randomizes the params
        LearningAgent.newEpisode(self)
开发者ID:HKou,项目名称:pybrain,代码行数:32,代码来源:statedependentgp.py


示例6: diag_Ctilde_o_Sr

 def diag_Ctilde_o_Sr(self, i):
     if i < self.Cg.getNumberParams():
         r = sp.kron(sp.diag(self.LcGradCgLc(i)), self.Sr())
     else:
         _i = i - self.Cg.getNumberParams()
         r = sp.kron(sp.diag(self.LcGradCnLc(_i)), sp.ones(self.dim_r))
     return r
开发者ID:jeffhsu3,项目名称:limix,代码行数:7,代码来源:cov2kronSum.py


示例7: _LMLgrad_lik

    def _LMLgrad_lik(self,hyperparams):
        """derivative of the likelihood parameters"""

	logtheta = hyperparams['covar']
        try:   
            KV = self.get_covariances(hyperparams)
        except linalg.LinAlgError:
            LG.error("exception caught (%s)" % (str(hyperparams)))
            return 1E6
	
        #loop through all dimensions
        #logdet term:
        Kd = 2*KV['Knoise']
        dldet = 0.5*(Kd*KV['Si']).sum(axis=0)
        #quadratic term
        y_roti = KV['y_roti']
        dlquad = -0.5 * (y_roti * Kd * y_roti).sum(axis=0)
        if VERBOSE:
            dldet_  = SP.zeros([self.d])
            dlquad_ = SP.zeros([self.d])
            for d in xrange(self.d):
                _K = KV['K'] + SP.diag(KV['Knoise'][:,d])
                _Ki = SP.linalg.inv(_K)
                dldet_[d] = 0.5* SP.dot(_Ki,SP.diag(Kd[:,d])).trace()
                dlquad_[d] = -0.5*SP.dot(self.y[:,d],SP.dot(_Ki,SP.dot(SP.diag(Kd[:,d]),SP.dot(_Ki,self.y[:,d]))))

            assert (SP.absolute(dldet-dldet_)<1E-3).all(), 'outch'
            assert (SP.absolute(dlquad-dlquad_)<1E-3).all(), 'outch'


        LMLgrad = dldet + dlquad
        RV = {'lik': LMLgrad}
    
        return RV
开发者ID:AngelBerihuete,项目名称:pygp,代码行数:34,代码来源:gplvm_ard.py


示例8: build_sample_nn

def build_sample_nn():
	means = [(-1,0),(2,4),(3,1)]
	cov = [diag([1,1]), diag([0.5,1.2]), diag([1.5,0.7])]
	alldata = ClassificationDataSet(2, 1, nb_classes=3)
	for n in xrange(400):
	      for klass in range(3):
	                input = multivariate_normal(means[klass],cov[klass])
	                alldata.addSample(input, [klass])

	tstdata_temp, trndata_temp = alldata.splitWithProportion(0.25)

	tstdata = ClassificationDataSet(2, 1, nb_classes=3)
	for n in xrange(0, tstdata_temp.getLength()):
	    tstdata.addSample( tstdata_temp.getSample(n)[0], tstdata_temp.getSample(n)[1] )

	trndata = ClassificationDataSet(2, 1, nb_classes=3)
	for n in xrange(0, trndata_temp.getLength()):
	    trndata.addSample( trndata_temp.getSample(n)[0], trndata_temp.getSample(n)[1] )

	trndata._convertToOneOfMany( )
	tstdata._convertToOneOfMany( )

	fnn = buildNetwork( trndata.indim, 5, trndata.outdim, outclass=SoftmaxLayer )


	trainer = BackpropTrainer( fnn, dataset=trndata, momentum=0.1, verbose=True, weightdecay=0.01)

	return trainer, fnn, tstdata
开发者ID:jamesfisk,项目名称:thesisc,代码行数:28,代码来源:neural.py


示例9: get_whitener

def get_whitener( A, k ):
    """Return the matrix W that whitens A, i.e. W^T A W = I. Assumes A
    is k-rank"""

    U, D, V = svdk(A, k)
    Ds = sqrt(D)
    Di = 1./Ds
    return U.dot(diag(Di)), U.dot(diag(Ds))
开发者ID:sidaw,项目名称:polymom,代码行数:8,代码来源:MixtureModelSpectral.py


示例10: segmented

def segmented():
    
    radius = 5 
    sigmaI = 0.02 
    sigmaX = 3.0 
    height = img.shape[0]
    width = img.shape[1]
    flatImg = img.flatten()
    darkImg = flatImg
    brightImg = flatImg
    
    nodes = img.flatten()
    
    W = spar.lil_matrix((nodes.size, nodes.size),dtype=float)
    D = sp.zeros((1,nodes.size))
    
    for row in range(height):
        for col in range(width):				
            for k in range(row-radius,row+radius):
                for l in range(col-radius,col+radius):
                    try:
                        w = weight(row,col,k,l)
                        W[row*width+col,k*width+l] = w
                        D[0,row*width+col] += w		
                    except:
                        continue
                        
    D = spar.spdiags(D, 0, nodes.size, nodes.size)

    Q = D - W
     
    D1 = D.todense()
    Q1 = Q.todense()
    
    diags = sp.diag(D1)
    DminusHalf = sp.diag(diags**-0.5)
    
    
    segQ = sp.dot(sp.dot(DminusHalf, Q1),DminusHalf)
    vals, vecs = la.eig(segQ)
    
    vecind = sp.argsort(vals)[1]
    theVec = vecs[vecind]

    for i in range(0,height**2):
        if theVec[i] < 0:
            darkImg[i] = 0.0
        else:
            brightImg[i] = 0.0
            
    
    darkImg = sp.reshape(darkImg, (height,height))
    brightImg = sp.reshape(brightImg, (height,height))
             
    
    
    
    return darkImg, flatImg, brightImg
开发者ID:snowdj,项目名称:byu_macro_boot_camp,代码行数:58,代码来源:Lab16b.py


示例11: exact_moments

def exact_moments( A, w ):
    """Get the exact moments of a components distribution"""

    k = len(w)
    P = A.dot( diag( w ) ).dot( A.T )
    #T = sum( [ w[i] * tensorify( A.T[i], A.T[i], A.T[i] ) for i in xrange( k ) ] )
    T = lambda theta: A.dot( diag( w) ).dot( diag( A.T.dot( theta ) ) ).dot( A.T )

    return P, T    
开发者ID:arunchaganty,项目名称:spectral,代码行数:9,代码来源:SphericalGaussians.py


示例12: MikotaPair

def MikotaPair(n):
    # Mikota pair acts as a nice test since the eigenvalues
    # are the squares of the integers n, n=1,2,...
    x = arange(1,n+1)
    B = diag(1./x)
    y = arange(n-1,0,-1)
    z = arange(2*n-1,0,-2)
    A = diag(z)-diag(y,-1)-diag(y,1)
    return A,B
开发者ID:317070,项目名称:scipy,代码行数:9,代码来源:test_lobpcg.py


示例13: MikotaPair

def MikotaPair(n, dtype=np.dtype("d")):
    # Mikota pair acts as a nice test since the eigenvalues
    # are the squares of the integers n, n=1,2,...
    x = np.arange(1,n+1, dtype=dtype)
    B = diag(1./x)
    y = np.arange(n-1,0,-1, dtype=dtype)
    z = np.arange(2*n-1,0,-2, dtype=dtype)
    A = diag(z)-diag(y,-1)-diag(y,1)
    return A, B
开发者ID:primme,项目名称:primme,代码行数:9,代码来源:tests.py


示例14: main

def main():
    means = [(-1,0),(2,4),(3,1)]
    cov = [diag([1,1]), diag([0.5,1.2]), diag([1.5,0.7])]
    alldata = ClassificationDataSet(2, 1, nb_classes=3)
    for n in xrange(400):
        for klass in range(3):
            input = multivariate_normal(means[klass],cov[klass])
            alldata.addSample(input, [klass])
    tstdata, trndata = alldata.splitWithProportion( 0.25 )
    trndata._convertToOneOfMany( )
    tstdata._convertToOneOfMany( )
    print "Number of training patterns: ", len(trndata)
    print "Input and output dimensions: ", trndata.indim, trndata.outdim
    print "First sample (input, target, class):"
    print trndata['input'][0], trndata['target'][0], trndata['class'][0]
    
    fnn = buildNetwork( trndata.indim, 5, trndata.outdim, outclass=SoftmaxLayer )
    trainer = BackpropTrainer( fnn, dataset=trndata, momentum=0.1, verbose=True, weightdecay=0.01)
    ticks = arange(-3.,6.,0.2)
    X, Y = meshgrid(ticks, ticks)
    # need column vectors in dataset, not arrays
    griddata = ClassificationDataSet(2,1, nb_classes=3)
    for i in xrange(X.size):
        griddata.addSample([X.ravel()[i],Y.ravel()[i]], [0])
    griddata._convertToOneOfMany()  # this is still needed to make the fnn feel comfy
    
    for i in range(20):
        trainer.trainEpochs(1)
    
        trnresult = percentError( trainer.testOnClassData(),
                                  trndata['class'] )
        tstresult = percentError( trainer.testOnClassData(
               dataset=tstdata ), tstdata['class'] )
    
        print "epoch: %4d" % trainer.totalepochs, \
              "  train error: %5.2f%%" % trnresult, \
              "  test error: %5.2f%%" % tstresult
        
        out = fnn.activateOnDataset(griddata)
        out = out.argmax(axis=1)  # the highest output activation gives the class
        out = out.reshape(X.shape)
        figure(1)
        ioff()  # interactive graphics off
        clf()   # clear the plot
        hold(True) # overplot on
        for c in [0,1,2]:
            here, _ = where(tstdata['class']==c)
            plot(tstdata['input'][here,0],tstdata['input'][here,1],'o')
        if out.max()!=out.min():  # safety check against flat field
            contourf(X, Y, out)   # plot the contour
        ion()   # interactive graphics on
        draw()  # update the plot
        
    ioff()
    show()
开发者ID:jac241,项目名称:Machine-Learning-Neural-Network,代码行数:55,代码来源:nnexample.py


示例15: exact_moments

def exact_moments( alphas, topics ):
    """Get the exact moments of a components distribution"""

    a0 = alphas.sum()
    O = topics

    P = 1/((a0 +1)*a0) * O.dot( diag( alphas ) ).dot( O.T )
    T = lambda theta: 2/((a0+2)*(a0 +1)*a0) * O.dot( diag( O.T.dot(
        theta ) ) ).dot( diag( alphas ) ).dot( O.T )

    return P, T    
开发者ID:arunchaganty,项目名称:spectral,代码行数:11,代码来源:TwoSVD.py


示例16: diag_Ctilde_o_Sr

 def diag_Ctilde_o_Sr(self, i):
     np_r = self.Cr.getNumberParams()
     np_g = self.Cg.getNumberParams()
     if i < np_r:
         r = sp.kron(sp.diag(self.LcGradCrLc(i)), self.diagWrWr())
     elif i < (np_r + np_g):
         _i = i - np_r
         r = sp.kron(sp.diag(self.LcGradCgLc(_i)), self.Sr())
     else:
         _i = i - np_r - np_g
         r = sp.kron(sp.diag(self.LcGradCnLc(_i)), sp.ones(self.dim_r))
     return r
开发者ID:BioinformaticsArchive,项目名称:limix,代码行数:12,代码来源:cov3kronSumLR.py


示例17: _logDerivsFactorSigma

    def _logDerivsFactorSigma(self, samples, mu, invSigma, factorSigma):
        """ Compute the log-derivatives w.r.t. the factorized covariance matrix components.
        This implementation should be faster than the one in Vanilla. """
        res = zeros((len(samples), self.numDistrParams - self.numParameters))
        invA = inv(factorSigma)
        diagInvA = diag(diag(invA))

        for i, sample in enumerate(samples):
            s = dot(invA.T, (sample - mu))
            R = outer(s, dot(invA, s)) - diagInvA
            res[i] = triu2flat(R)
        return res
开发者ID:DanSGraham,项目名称:code,代码行数:12,代码来源:nes.py


示例18: get_dummy_data

def get_dummy_data():
	means = [(-1,0),(2,4),(3,1)]
	cov = [diag([1,1]), diag([0.5,1.2]), diag([1.5,0.7])]
	X = []
	y = []

	for n in xrange(400):
	      for klass in range(3):
	                input = multivariate_normal(means[klass],cov[klass])
	                X.append(input)
	                y.append(klass)
	return X, y
开发者ID:jamesfisk,项目名称:thesisc,代码行数:12,代码来源:neural.py


示例19: ampfit

def ampfit(data, covariance, theory, rank_thresh=1e-12, diag_only=False):
    """Fits the amplitude of the theory curve to the data.

    Finds `amp` such that `amp`*`theory` is the best fit to `data`.

    Returns
    -------
    amp : float
        Fitted amplitude.
    error : float
        Error on fitted amplitude.
    """

    data = sp.asarray(data)
    covariance = sp.asarray(copy.deepcopy(covariance))
    theory = sp.asarray(theory)

    if len(data.shape) != 1:
        raise ValueError("`data` must be a 1D vector.")

    n = len(data)

    if data.shape != theory.shape:
        raise ValueError("`theory` must be the same shape as `data`.")

    if covariance.shape != (n,n):
        msg = "`covariance` must be a square matrix compatible with data."
        raise ValueError(msg)

    if diag_only:
        covariance = sp.diag(sp.diag(covariance))
        print data
        print sp.diag(sp.sqrt(covariance))

    covariance_inverse = linalg.inv(covariance)
    weighted_data = sp.dot(covariance_inverse, data)
    amp = sp.dot(theory, weighted_data)
    normalization = sp.dot(covariance_inverse, theory)
    normalization = sp.dot(theory, normalization)
    amp /= normalization
    error = sp.sqrt(1/normalization)

    u, s, v = np.linalg.svd(covariance)
    dof = np.sum(s > rank_thresh)
    resid = data - amp * theory
    chi2 = sp.dot(covariance_inverse, resid)
    chi2 = sp.dot(resid, chi2)
    pte = sp.stats.chisqprob(chi2, dof - 1)

    return {"amp":amp, "error":error, \
            "chi2":chi2, "dof":dof - 1, \
            "pte":pte}
开发者ID:OMGitsHongyu,项目名称:analysis_IM,代码行数:52,代码来源:misc.py


示例20: _update_cache

    def _update_cache(self):
        """
        Update cache
        """
        cov_params_have_changed = self.Cr.params_have_changed or self.Cn.params_have_changed

        if self.Xr_has_changed:
            start = TIME.time()
            """ Row SVD Bg + Noise """
            Urstar,S,V = NLA.svd(self.Xr)
            self.cache['Srstar'] = SP.concatenate([S**2,SP.zeros(self.N-S.shape[0])])
            self.cache['Lr']     = Urstar.T
            self.mean.setRowRotation(Lr=self.cache['Lr'])

            smartSum(self.time,'cache_XXchanged',TIME.time()-start)
            smartSum(self.count,'cache_XXchanged',1)
        
        if cov_params_have_changed:
            start = TIME.time()
            """ Col SVD Noise """
            S2,U2 = LA.eigh(self.Cn.K()+self.offset*SP.eye(self.P))
            self.cache['Sc2'] = S2
            US2   = SP.dot(U2,SP.diag(SP.sqrt(S2)))
            USi2  = SP.dot(U2,SP.diag(SP.sqrt(1./S2)))
            """ Col SVD region """
            A     = SP.reshape(self.Cr.getParams(),(self.P,self.rank),order='F')
            Astar = SP.dot(USi2.T,A)
            Ucstar,S,V = NLA.svd(Astar)
            self.cache['Scstar'] = SP.concatenate([S**2,SP.zeros(self.P-S.shape[0])])
            self.cache['Lc']     = SP.dot(Ucstar.T,USi2.T)

            """ pheno """
            self.mean.setColRotation(self.cache['Lc'])


        if cov_params_have_changed or self.Xr_has_changed:
            """ S """
            self.cache['s'] = SP.kron(self.cache['Scstar'],self.cache['Srstar'])+1
            self.cache['d'] = 1./self.cache['s']
            self.cache['D'] = SP.reshape(self.cache['d'],(self.N,self.P), order='F')

            """ pheno """
            self.cache['LY']  = self.mean.evaluate()
            self.cache['DLY'] = self.cache['D']*self.cache['LY']

            smartSum(self.time,'cache_colSVDpRot',TIME.time()-start)
            smartSum(self.count,'cache_colSVDpRot',1)

        self.Y_has_changed = False
        self.Xr_has_changed = False
        self.Cr.params_have_changed = False
        self.Cn.params_have_changed = False
开发者ID:PMBio,项目名称:mtSet,代码行数:52,代码来源:gp2kronSumSvd.py



注:本文中的scipy.diag函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python scipy.diff函数代码示例发布时间:2022-05-27
下一篇:
Python scipy.delete函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap