• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python linalg.slogdet函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中numpy.linalg.slogdet函数的典型用法代码示例。如果您正苦于以下问题:Python slogdet函数的具体用法?Python slogdet怎么用?Python slogdet使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了slogdet函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: logdet_low_rank

def logdet_low_rank(Ainv, U, C, V, diag=False):
    """

    logdet(A+UCV) = logdet(C^{-1} + V A^{-1} U) +  logdet(C) + logdet(A).

    :param Ainv: NxN
    :param U: NxK
    :param C: KxK
    :param V: KxN
    :return:
    """
    Cinv = inv(C)
    sC, ldC = slogdet(C)
    assert sC > 0

    if diag:
        ldA = -log(Ainv).sum()

        tmp1 = einsum('ij,j,jk->ik', V, Ainv, U)
        s1, ld1 = slogdet(Cinv + tmp1)
        assert s1 > 0

    else:
        sAinv, ldAinv = slogdet(Ainv)
        ldA = -ldAinv
        assert sAinv > 0

        s1, ld1 = slogdet(Cinv + V.dot(Ainv).dot(U))
        assert s1 > 0

    return  ld1 + ldC + ldA
开发者ID:slinderman,项目名称:eigenglm,代码行数:31,代码来源:utils.py


示例2: initialize_params

    def initialize_params(self):
        """ 
        Initialize the params.Sigma_s/Sigma_i, 
        params.J_i/J_s, params.logdet_Sigma_i/logdet_Sigma_s,
        based on i_std and s_std

        """
        params = self.params
        dim_s = self.dim_s
        dim_i = self.dim_i
        s_std = self.s_std
        i_std = self.i_std
        nSuperpixels = self.nSuperpixels

        # Covariance for each superpixel is a diagonal matrix
        for i in range(dim_s):
            params.Sigma_s.cpu[:,i,i].fill(s_std**2)  
            params.prior_sigma_s_sum.cpu[:,i,i].fill(s_std**4) 

        for i in range(dim_i):
            params.Sigma_i.cpu[:,i,i].fill((i_std)**2)
        params.Sigma_i.cpu[:,1,1].fill((i_std/2)**2) # To account for scale differences between the L,A,B

        #calculate the inverse of covariance
        params.J_i.cpu[:]=map(inv,params.Sigma_i.cpu)
        params.J_s.cpu[:]=map(inv,params.Sigma_s.cpu)
        
        # calculate the log of the determinant of covriance
        for i in range(nSuperpixels):
            junk,params.logdet_Sigma_i.cpu[i] = slogdet(params.Sigma_i.cpu[i])
            junk,params.logdet_Sigma_s.cpu[i] = slogdet(params.Sigma_s.cpu[i])
        del junk

        self.update_params_cpu2gpu()
开发者ID:freifeld,项目名称:fastSCSP,代码行数:34,代码来源:Superpixels_NaN.py


示例3: drawBeta

def drawBeta(k,s,w,size=1):
    """Draw beta from its distribution (Eq.9 Rasmussen 2000) using ARS
    Make it robust with an expanding range in case of failure"""
    nd = w.shape[0]
    
    # precompute some things for speed
    logdetw = slogdet(w)[1]
    temp = 0
    for sj in s:
        sj = np.reshape(sj,(nd,nd))
        temp += slogdet(sj)[1]
        temp -= np.trace(np.dot(w,sj))
    
    lb = nd - 1.0
    flag = True
    cnt = 0
    while flag:
        xi = lb + np.logspace(-3-cnt,1+cnt,200)       # update range if needed
        flag = False
        try:
            ars = ARS(logpbeta,logpbetaprime,xi=xi,lb=lb,ub=np.inf, \
                k=k, s=s, w=w, nd=nd, logdetw=logdetw, temp=temp)
        except:
            cnt += 1
            flag = True

    # draw beta but also pass random seed to ARS code
    return ars.draw(size,np.random.randint(MAXINT))
开发者ID:chrismessenger,项目名称:igmm,代码行数:28,代码来源:igmm.py


示例4: loglike

    def loglike(self, params):
        """
        Returns float
        Loglikelihood used in latent factor models

        Parameters
        ----------
        params : list
            Values of parameters to pass into masked elements of array

        Returns
        -------
        loglikelihood : float
        """

        latent = self.latent
        per = self.periods
        var_data_vert = self.var_data_vert
        var_data_vertm1 = self.var_data_vertm1

        lam_0, lam_1, delta_0, delta_1, mu, phi, \
            sigma, dtype = self.params_to_array(params)

        if self.fast_gen_pred:
            a_solve, b_solve = self.opt_gen_pred_coef(lam_0, lam_1, delta_0,
                                                      delta_1, mu, phi, sigma,
                                                      dtype)

        else:
            a_solve, b_solve = self.gen_pred_coef(lam_0, lam_1, delta_0,
                                                  delta_1, mu, phi, sigma,
                                                  dtype)
        # first solve for unknown part of information vector
        lat_ser, jacob, yield_errs  = self._solve_unobs(a_in=a_solve,
                                                        b_in=b_solve,
                                                        dtype=dtype)

        # here is the likelihood that needs to be used
        # use two matrices to take the difference
        var_data_use = var_data_vert.join(lat_ser)[1:]
        var_data_usem1 = var_data_vertm1.join(lat_ser.shift())[1:]

        errors = var_data_use.values.T - mu - np.dot(phi,
                                                     var_data_usem1.values.T)
        sign, j_logdt = nla.slogdet(jacob)
        j_slogdt = sign * j_logdt

        sign, sigma_logdt = nla.slogdet(np.dot(sigma, sigma.T))
        sigma_slogdt = sign * sigma_logdt

        var_yields_errs = np.var(yield_errs, axis=1)

        like = -(per - 1) * j_slogdt - (per - 1) * 1.0 / 2 * sigma_slogdt - \
               1.0 / 2 * np.sum(np.dot(np.dot(errors.T, \
               la.inv(np.dot(sigma, sigma.T))), errors)) - (per - 1) / 2.0 * \
               np.log(np.sum(var_yields_errs)) - 1.0 / 2 * \
               np.sum(yield_errs**2/var_yields_errs[None].T)

        return like
开发者ID:bartbkr,项目名称:affine,代码行数:59,代码来源:affine.py


示例5: logdet_low_rank2

def logdet_low_rank2(Ainv, U, C, V, diag=False):
    '''
    computes logdet(A+UCV) using https://en.wikipedia.org/wiki/Matrix_determinant_lemma
    '''
    if diag:
        ldA = -log(Ainv).sum()
        temp = C.dot(V).dot(U * Ainv[:,None])
    else:
        ldA = -slogdet(Ainv)[1]
        temp = C.dot(V).dot(Ainv).dot(U)
    temp.flat[::temp.shape[0]+1] += 1
    return slogdet(temp)[1] + ldA
开发者ID:sheqi,项目名称:pyglm,代码行数:12,代码来源:utils.py


示例6: train_sgd

	def train_sgd(self, X, **kwargs):
		# hyperparameters
		max_iter = kwargs.get('max_iter', 1)
		batch_size = kwargs.get('batch_size', min([100, X.shape[1]]))
		step_width = kwargs.get('step_width', 0.001)
		momentum = kwargs.get('momentum', 0.9)
		shuffle = kwargs.get('shuffle', True)
		pocket = kwargs.get('pocket', shuffle)

		# completed basis and filters
		A = self.A
		W = inv(A)

		# initial direction of momentum
		P = 0.

		if pocket:
			energy = mean(self.prior_energy(dot(W, X))) - slogdet(W)[1]

		for j in range(max_iter):
			if shuffle:
				# randomize order of data
				X = X[:, permutation(X.shape[1])]

			for i in range(0, X.shape[1], batch_size):
				batch = X[:, i:i + batch_size]

				if not batch.shape[1] < batch_size:
					# calculate gradient
					P = momentum * P + A.T - \
						dot(self.prior_energy_gradient(dot(W, batch)), batch.T) / batch_size

					# update parameters
					W += step_width * P
					A = inv(W)

		if pocket:
			# test for improvement of lower bound
			if mean(self.prior_energy(dot(W, X))) - slogdet(W)[1] > energy:
				if Distribution.VERBOSITY > 0:
					print 'No improvement.'

				# don't update parameters
				return False

		# update linear features
		self.A = A

		return True
开发者ID:lucastheis,项目名称:isa,代码行数:49,代码来源:ica.py


示例7: mvnkld

def mvnkld(mu0, mu1, sigma0, sigma1):
    """

    Returns the Kullback-Leibler Divergence (KLD) between two normal distributions.

    """
    k = len(mu0)
    assert k == len(mu1)
    delta = mu1 - mu0
    (sign0, logdet0) = linalg.slogdet(sigma0)
    (sign1, logdet1) = linalg.slogdet(sigma1)
    lndet = logdet0 - logdet1
    A = trace(linalg.solve(sigma1, sigma0))
    B = delta.T.dot(linalg.solve(sigma1, delta))
    return 0.5 * (A + B - k - lndet)
开发者ID:pjozog,项目名称:PylabUtils,代码行数:15,代码来源:mvnkld.py


示例8: calcNumericFit

def calcNumericFit(xVal, yVal, yErr):
    a11 = 0
    a21 = 0
    a12 = 0
    a22 = 0

    #Berechne die Koeffizientenmatrix
    for i in range(len(xVal)):
        a22 = a22 + 1/(yErr[i]**2)
        a12 = a12 + xVal[i]/yErr[i]**2
        a21 = a21 + xVal[i]/yErr[i]**2
        a11 = a11 + xVal[i]**2/yErr[i]**2
    (sign, logdet) = linalg.slogdet([[a11, a21], [a12, a22]]) 
    detCoeffMat = sign * np.exp(logdet)

    xy = 0
    xyxy = 0
    y0 = 0
    #Berechne die Koeffizienten
    for i in range(len(xVal)):
        xy = xy + xVal[i]*yVal[i]/(yErr[i]**2)
        xyxy = xyxy + xVal[i]*yVal[i]/yErr[i]**4
        y0 = y0 + yVal[i]/(yErr[i]**2)

    aBest = (1/detCoeffMat) * (xy * a22 - a21 * y0)
    bBest = (1/detCoeffMat) * (a11 * y0 - a21 * xy)

    #Berechne die Unsicherheiten
    aErr = np.sqrt(1/detCoeffMat * a22)
    bErr = np.sqrt(1/detCoeffMat * a11)
    return [aBest, bBest, aErr, bErr]
开发者ID:Encesat,项目名称:Datenanalyse,代码行数:31,代码来源:Datenauswertung.py


示例9: posdef_diag_dom

def posdef_diag_dom(n, m=10, s=None):
    """Generates a positive-definite, diagonally dominant n x n matrix.
    Arguments:
        n - width/height of the matrix
        m - additional multiplier for diagonal-dominance control
            (shouldn't be less than 10, though)
        s - optional seed for RNG
    """
    if m < 10:
        print "Multiplier should be >= 10. Using m=10 instead."
        m = 10
    np.random.seed(s)  # re-seeding RNG is needed for multiprocessing
    while True:
        signs = np.random.randint(2, size=(n, n))
        f = (signs == 0)
        signs[f] = -1
        signs = np.triu(signs, k=1)
        a = np.random.random((n, n))
        u = a * signs
        l = u.T
        a = l + u
        for i, row in enumerate(a):
            a[i, i] = (row * row).sum() * m
        if la.slogdet(a) != (0, np.inf):
            break
    return a
开发者ID:xor-xor,项目名称:gauss-jordan,代码行数:26,代码来源:nmgj_gen_mtx.py


示例10: mvnlogpdf_p

def mvnlogpdf_p (X, mu, PrecMat):
    """
    Multivariate Normal Log PDF

    Args:
        X      : NxD matrix of input data. Each ROW is a single sample.
        mu     : Dx1 vector for the mean.
        PrecMat: DxD precision matrix.

    Returns:
        Nx1 vector of log probabilities.
    """
    D = PrecMat.shape[0]
    X = _check_single_data(X, D)
    N = len(X)

    _, neglogdet = linalg.slogdet(PrecMat)
    normconst = -0.5 * (D * np.log(2 * constants.pi) - neglogdet)

    logpdf = np.empty((N, 1))
    for n, x in enumerate(X):
        d = x - mu
        logpdf[n] = normconst - 0.5 * d.dot(PrecMat.dot(d))

    return logpdf
开发者ID:markvdw,项目名称:mltools,代码行数:25,代码来源:prob.py


示例11: clik

def clik(lam,n,n2,n_eq,bigE,I,WS):
    """ Concentrated (negative) log-likelihood for SUR Error model
    
    Parameters
    ----------
    lam         : n_eq x 1 array of spatial autoregressive parameters
    n           : number of observations in each cross-section
    n2          : n/2
    n_eq        : number of equations
    bigE        : n by n_eq matrix with vectors of residuals for 
                  each equation
    I           : sparse Identity matrix
    WS          : sparse spatial weights matrix
    
    Returns
    -------
    -clik       : negative (for minimize) of the concentrated
                  log-likelihood function
    
    """
    WbigE = WS * bigE
    spfbigE = bigE - WbigE * lam.T
    sig = np.dot(spfbigE.T,spfbigE) / n
    ldet = la.slogdet(sig)[1]
    logjac = jacob(lam,n_eq,I,WS)
    clik = - n2 * ldet + logjac
    return -clik  # negative for minimize
开发者ID:jGaboardi,项目名称:pysal,代码行数:27,代码来源:sur_error.py


示例12: mvnlogpdf

def mvnlogpdf (X, mu, Sigma):
    """
    Multivariate Normal Log PDF

    Args:
        X    : NxD matrix of input data. Each ROW is a single sample.
        mu   : Dx1 vector for the mean.
        Sigma: DxD covariance matrix.

    Returns:
        Nx1 vector of log probabilities.
    """
    D = Sigma.shape[0]
    X = _check_single_data(X, D)
    N = len(X)

    _, logdet = linalg.slogdet(Sigma)
    normconst = -0.5 * (D * np.log(2 * constants.pi) + logdet)

    iS = linalg.inv(Sigma)
    logpdf = np.empty((N, 1))
    for n, x in enumerate(X):
        d = x - mu
        logpdf[n] = normconst - 0.5 * d.dot(iS.dot(d))

    return logpdf
开发者ID:markvdw,项目名称:mltools,代码行数:26,代码来源:prob.py


示例13: preprocess

 def preprocess(self):
     self.VarList = tuple(self.Vars)
     self.NumVars = len(self.VarList)
     self.VarVector = BlockMatrix((tuple(self.Vars[var] for var in self.VarList),))
     self.NumDims = self.VarVector.shape[1]
     self.Mean = BlockMatrix((tuple(self.Param[('Mean', var)] for var in self.VarList),))
     self.DemeanedVarVector = self.VarVector - self.Mean
     cov = [self.NumVars * [None] for _ in range(self.NumVars)]   # careful not to create same mutable object
     for i in range(self.NumVars):
         for j in range(i):
             if ('Cov', self.VarList[i], self.VarList[j]) in self.Param:
                 cov[i][j] = self.Param[('Cov', self.VarList[i], self.VarList[j])]
                 cov[j][i] = cov[i][j].T
             else:
                 cov[j][i] = self.Param[('Cov', self.VarList[j], self.VarList[i])]
                 cov[i][j] = cov[j][i].T
         cov[i][i] = self.Param[('Cov', self.VarList[i])]
     self.Cov = BlockMatrix(cov)
     try:
         cov = CompyledFunc(var_names_and_syms={}, dict_or_expr=self.Cov)()
         sign, self.LogDetCov = slogdet(cov)
         self.LogDetCov *= sign
         self.InvCov = inv(cov)
     except:
         pass
     self.PreProcessed = True
开发者ID:MBALearnsToCode,项目名称:ProbabPy,代码行数:26,代码来源:__init__.py


示例14: _loglike_mle

    def _loglike_mle(self, params):
        """
        Loglikelihood of AR(p) process using exact maximum likelihood
        """
        nobs = self.nobs
        X = self.X
        endog = self.endog
        k_ar = self.k_ar
        k_trend = self.k_trend

        # reparameterize according to Jones (1980) like in ARMA/Kalman Filter
        if self.transparams:
            params = self._transparams(params)

        # get mean and variance for pre-sample lags
        yp = endog[:k_ar].copy()
        if k_trend:
            c = [params[0]] * k_ar
        else:
            c = [0]
        mup = np.asarray(c / (1 - np.sum(params[k_trend:])))
        diffp = yp - mup[:, None]

        # get inv(Vp) Hamilton 5.3.7
        Vpinv = self._presample_varcov(params)

        diffpVpinv = np.dot(np.dot(diffp.T, Vpinv), diffp).item()
        ssr = sumofsq(endog[k_ar:].squeeze() - np.dot(X, params))

        # concentrating the likelihood means that sigma2 is given by
        sigma2 = 1.0 / nobs * (diffpVpinv + ssr)
        self.sigma2 = sigma2
        logdet = slogdet(Vpinv)[1]  # TODO: add check for singularity
        loglike = -1 / 2.0 * (nobs * (np.log(2 * np.pi) + np.log(sigma2)) - logdet + diffpVpinv / sigma2 + ssr / sigma2)
        return loglike
开发者ID:JerWatson,项目名称:statsmodels,代码行数:35,代码来源:ar_model.py


示例15: logjacobian

	def logjacobian(self, data):
		"""
		Returns the log-determinant of the Jabian matrix evaluated at the given
		data points.

		@type  data: array_like
		@param data: data points stored in columns

		@rtype: ndarray
		@return: the logarithm of the Jacobian determinants
		"""

		# completed filter matrix
		W = inv(self.ica.A)

		# determinant of linear transformation
		logjacobian = zeros([1, data.shape[1]]) + slogdet(W)[1]

		# linearly transform data
		data = dot(W, data)

		length = len(str(len(self.ica.marginals)))

		if Transform.VERBOSITY > 0:
			print ('{0:>' + str(length) + '}/{1}').format(0, len(self.ica.marginals)),

		for i, mog in enumerate(self.ica.marginals):
			logjacobian += UnivariateGaussianization(mog).logjacobian(data[[i]])

			if Transform.VERBOSITY > 0:
				print (('\b' * (length * 2 + 2)) + '{0:>' + str(length) + '}/{1}').format(i + 1, len(self.ica.marginals)),
		if Transform.VERBOSITY > 0:
			print

		return logjacobian
开发者ID:lucastheis,项目名称:isa,代码行数:35,代码来源:marginalgaussianization.py


示例16: __init__

 def __init__(self, d, nu, mu, Lambda):
     self.nu = nu
     self.d = d
     self.mu = mu
     self.precision = inv(Lambda)
     self.logdet = slogdet(Lambda)[1]
     self.Z = gammaln(nu / 2) + d / 2 * (math.log(nu) + math.log(math.pi)) - gammaln((nu + d) / 2)
开发者ID:ClarkWang12,项目名称:igmm-ddcrp,代码行数:7,代码来源:common.py


示例17: addItem

    def addItem(self, i, newf, oldf, newt, oldt, followset, s, ss):    
             
        if newt == -1:
            newt = self.K
            self.K += 1
            self.cholesky[newt] = cholesky(self.con.lambda0 + self.con.kappa0_outermu0).T
            
        n = len(followset)
        self.counts[newt] += n
        assert self.counts[oldt] >= 0
        assert self.counts[newt] > 0
        self.s[newt] += s
        self.ss[newt] += ss
        #self.denom[newt] = self.integrateOverParameters(self.counts[newt], self.s[newt], self.ss[newt])

        n = self.counts[newt]
        kappan = self.con.kappa0 + n
        mun = (self.con.kappa0 * self.con.mu0 + self.s[newt]) / kappan
        lambdan = self.con.lambda0 + self.ss[newt] + self.con.kappa0_outermu0 - kappan * np.outer(mun, mun)
        self.logdet[newt] = slogdet(lambdan)[1]
        #for ind in followset:
        #    cholupdate(self.cholesky[newt], self.data[ind].copy())
            
        for j in followset:
            self.assignments[j] = newt
        self.follow[i] = newf
        self.sit_behind[newf].add(i)
开发者ID:ClarkWang12,项目名称:igmm-ddcrp,代码行数:27,代码来源:ddcrp.py


示例18: lnprob_cov

    def lnprob_cov(C):

        # Get first term of loglikelihood expression (y * (1/C) * y.T)
        # Do computation using Cholesky decomposition
        try:
            
            U, luflag = cho_factor(C)
            
        except LinAlgError:

            # Matrix is not positive semi-definite, so replace it with the 
            #  positive semi-definite matrix that is nearest in the Frobenius norm

            E, EV = eigh(C)
            E[E<0] = 1e-12
            U, luflag = cho_factor(EV.dot(np.diag(Ep)).dot(EV.T))
            
        finally:

            x2 = cho_solve((U, luflag), dxy)
            L1 = dxy.dot(x2)

        # Get second term of loglikelihood expression (log det C)
        sign, L2 = slogdet(C)

        # Why am I always confused by this?
        thing_to_be_minimised = (L1 + L2)

        return thing_to_be_minimised
开发者ID:evanbiederstedt,项目名称:pyBAST,代码行数:29,代码来源:distortion.py


示例19: nll

	def nll(self, log_th, x_nd, y_n, grad=False, use_self_hyper=True):
		"""
		Returns the negative log-likelihood : -log[p(y|x,th)],
		where, abc are the LOG -- hyper-parameters. 
			If adbc==None, then it uses the self.{a,d,b,c}
			to compute the value and the gradient.

		@params:
			x_nd    : input vectors in R^d
			y_n     : output at the input vectors
			log_th  : vector of hyperparameters
			grad    : if TRUE, this function also returns
				      the partial derivatives of nll w.r.t
				      each (log) hyper-parameter.
		"""
		## make the data-points a 2D matrix:
		if x_nd.ndim==1:
			x_nd = np.atleast_2d(x_nd).T
		if y_n.ndim==1:
			y_n = np.atleast_2d(y_n).T

		if not use_self_hyper:
			self.set_log_hyperparam(log_th)

		log_th = np.squeeze(log_th)

		N,d = x_nd.shape
		assert len(y_n)==N, "x and y shape mismatch."

		K,K1,K2 = self.get_covmat(x_nd, get_both=True)

		## compute the determinant using LU factorization:
		sign, log_det = nla.slogdet(K)
		#assert sign > 0, "Periodic Cov : covariance matrix is not PSD."
		
		## compute the inverse of the covariance matrix through gaussian
		## elimination:
		K_inv = nla.solve(K, np.eye(N))
		Ki_y  = K_inv.dot(y_n)

		## negative log-likelihood:
		nloglik = 0.5*( N*self.log_2pi + log_det + y_n.T.dot(Ki_y))

		if grad:
			num_hyper = self.n1 + self.n2
			K_diff = K_inv - Ki_y.dot(Ki_y.T)
			dfX = np.zeros(num_hyper)

			dK1 = self.k1.get_dK_dth(log_th[:self.n1], x_nd, y_n, use_self_hyper)
			dK2 = self.k2.get_dk_dth(log_th[self.n1:], x_nd, y_n, use_self_hyper)

			for i in xrange(self.n1):
				dfX[i] = np.sum(np.sum( K_diff * dK1[i].dot(K2)))
			for i in xrange(self.n2):
				dfX[i+self.n1] = np.sum(np.sum( K_diff * K1.dot(dK2[i])))

			return nloglik, dfX

		return nloglik
开发者ID:ankush-me,项目名称:cdt_courses,代码行数:59,代码来源:cov.py


示例20: logmvstprob

def logmvstprob(x, mu, nu, d, Lambda):
    diff = x - mu[:,None]
    prob = gammaln((nu + d) / 2)
    prob -= gammaln(nu / 2)
    prob -= d / 2 * (math.log(nu) + math.log(math.pi))
    prob -= 0.5 * slogdet(Lambda)[1]
    prob -= (nu + d) / 2. * math.log(1 + 1. / nu * np.dot(np.dot(diff.T, inv(Lambda)), diff)[0][0])
    return prob
开发者ID:ClarkWang12,项目名称:igmm-ddcrp,代码行数:8,代码来源:common.py



注:本文中的numpy.linalg.slogdet函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python linalg.solve函数代码示例发布时间:2022-05-27
下一篇:
Python linalg.qr函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap