• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python linalg.solve_triangular函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中scipy.linalg.solve_triangular函数的典型用法代码示例。如果您正苦于以下问题:Python solve_triangular函数的具体用法?Python solve_triangular怎么用?Python solve_triangular使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了solve_triangular函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: aN_grad

    def aN_grad(self,x,L,n,dataObj,gradient=True,onlyGradient=False,logproductExpectations=None):
        """
        Computes a_{n} and it can compute its derivative. It evaluates a_{n},
        when grad and onlyGradient are False; it evaluates the a_{n} and computes its
        derivative when grad is True and onlyGradient is False, and computes only its
        gradient when gradient and onlyGradient are both True.
        
        Args:
            x: a_{n} is evaluated at x.
            L: Cholesky decomposition of the matrix A, where A is the covariance
               matrix of the past obsevations (x,w).
            n: Step of the algorithm.
            dataObj: Data object (it contains all the history).
            gradient: True if we want to compute the gradient; False otherwise.
            onlyGradient: True if we only want to compute the gradient; False otherwise.
            logproductExpectations: Vector with the logarithm of the product of the
                                    expectations of np.exp(-alpha2[j]*((z-W[i,j])**2))
                                    where W[i,:] is a point in the history.
                                    --Only with the SEK--
        """
        n1=self.n1
        n2=self.n2
        muStart=self._k[0].mu
        y2=dataObj.yHist[0:n+self._numberTraining]-muStart
        B=np.zeros(n+self._numberTraining)
        
        if logproductExpectations is None:
            for i in xrange(n+self._numberTraining):
                B[i]=self.B(x,dataObj.Xhist[i,:],self.n1,self.n2,self._k)
        else:
            for i in xrange(n+self._numberTraining):
                B[i]=self.B(x,dataObj.Xhist[i,:],self.n1,self.n2,self._k,logproductExpectations[i])
        
        inv1=linalg.solve_triangular(L,y2,lower=True)

        if onlyGradient:
            gradXB=self.gradXBforAn(x,n,B,self._k,
                                    dataObj.Xhist[0:n+self._numberTraining,0:n1],
                                    n1,self._numberTraining,
                                    dataObj.Xhist[0:n+self._numberTraining,n1:n1+n2],
                                    n2)
            temp4=linalg.solve_triangular(L,gradXB.transpose(),lower=True)
            gradAn=np.dot(inv1.transpose(),temp4)
            
    
            return gradAn

        inv2=linalg.solve_triangular(L,B.transpose(),lower=True)
        aN=muStart+np.dot(inv2.transpose(),inv1)
        if gradient==True:
            gradXB=self.gradXBforAn(x,n,B,self._k,
                                    dataObj.Xhist[0:n+self._numberTraining,0:n1],
                                    n1,self._numberTraining,
                                    dataObj.Xhist[0:n+self._numberTraining,n1:n1+n2],n2)
            temp4=linalg.solve_triangular(L,gradXB.transpose(),lower=True)
            gradAn=np.dot(inv1.transpose(),temp4)
            
            return aN,gradAn
        else:
            return aN
开发者ID:toscanosaul,项目名称:SBO,代码行数:60,代码来源:stat.py


示例2: predict

	def predict(self, Xq, predictUncertainty = True, predictObservations = False):

		# If the model has not learned, we should not predict anything
		self.requireLearned()

		# Set the learned result
		self._kernel.theta = self.kernelHyperparams.copy()

		# Compute the prediction using the standard Gaussian Process Prediction Algorithm
		alpha = la.solve_triangular(self.L.T, la.solve_triangular(self.L, self.y, lower = True, check_finite = False), check_finite = False)
		Kq = self._kernel(self.X, Xq)
		fqExp = np.dot(Kq.T, alpha) + self.yMean

		# Compute the covariance matrix using the standard Gaussian Process Prediction Algorithm if requested
		if predictUncertainty:

			v = la.solve_triangular(self.L, Kq, lower = True, check_finite = False)
			Kqq = self._kernel(Xq, Xq)
			fqVar = Kqq - np.dot(v.T, v)

		# Depending on if the user wants the one for latent function or output function, return the correct expected values and covariance matrix
		if predictUncertainty:
			if predictObservations:
				return(fqExp, fqVar + self.sigma**2 * np.eye(fqVar.shape[0]))
			else:
				return(fqExp, fqVar)
		else:
			return(fqExp)
开发者ID:KelvyHsu,项目名称:ocean-exploration,代码行数:28,代码来源:GaussianProcessModel.py


示例3: fit

    def fit(self, theta_start=[.1, .5, 0.], method='SLSQP', numba=True):
        """Fit DECO model to the data.

        """
        self.numba = numba
        self.param = ParamDCC(ndim=self.data.ndim)
        self.standardize_returns()
        self.param.corr_target = np.atleast_2d(np.corrcoef(self.data.std_ret.T))
        neg_ret = self.data.std_ret.T.copy()
        neg_ret[neg_ret > 0] = 0
        self.param.corr_neg_target = np.atleast_2d(np.corrcoef(neg_ret))

        # Compute lmbd to check for stationarity of Q
        factor, lower = scl.cho_factor(self.param.corr_target, lower=True)
        sandwich = scl.solve_triangular(factor, self.param.corr_neg_target,
                                        lower=lower)
        sandwich = scl.solve_triangular(factor, sandwich.T,
                                        lower=lower)
        self.lmbd = scl.eigvals(sandwich).real.max()

        options = {'disp': False, 'maxiter': int(1e6)}

        opt_out = sco.minimize(self.likelihood, theta_start,
                               method=method, options=options)

        self.param.abcorr = opt_out.x
        self.data.rho_series = pd.Series(self.data.rho_series,
                                         index=self.data.ret.index)
        self.estimate_innov()
        return opt_out
开发者ID:khrapovs,项目名称:multivol,代码行数:30,代码来源:dcc.py


示例4: negLogEvidence

		def negLogEvidence(theta, grad):

			if _optimiseLengthHyperparams:
				self.lengthScale = theta[:-2]
				self.gpLengthScale.setLearningResult(theta[-2:], 0)
			else:
				self.lengthScale = theta.copy()

			self.updateLengthScaleModel()

			self.S = self.kernel(self.X, self.X) + self.sigma**2 * self.I
			self.L = choleskyjitter(self.S)

			alpha = la.solve_triangular(self.L.T, la.solve_triangular(self.L, self.y, lower = True, check_finite = False), check_finite = False)
			negLogEvidenceValue = 0.5 * self.y.dot(alpha) + np.sum(np.log(self.L.diagonal()))
			
			# VERBOSE
			if VERBOSE:
				negLogEvidence.counter += 1

				if negLogEvidence.lastPrintedSec != time.gmtime().tm_sec:
					negLogEvidence.lastPrintedSec = time.gmtime().tm_sec
					if _optimiseLengthHyperparams:
						print(negLogEvidence.counter, '\t', self.lengthScale, self.stationaryLengthScale, self.gpLengthScale.kernelHyperparams, negLogEvidenceValue)
					else:
						print(negLogEvidence.counter, '\t', self.lengthScale, self.stationaryLengthScale, negLogEvidenceValue)

			return(negLogEvidenceValue)
开发者ID:KelvyHsu,项目名称:ocean-exploration,代码行数:28,代码来源:GaussianProcessModel.py


示例5: varN

    def varN(self,x,n,grad=False):
        temp=self._k.K(np.array(x).reshape((1,self.n1)))
        tempN=self._numberTraining+n
        sigmaVec=np.zeros((tempN,1))
        for i in xrange(tempN):
            sigmaVec[i,0]=self._k.K(np.array(x).reshape((1,self.n1)),self._Xhist[i:i+1,:])[:,0]
        A=self._k.A(self._Xhist[0:tempN,:],noise=self._noiseHist[0:tempN])
        L=np.linalg.cholesky(A)
        temp3=linalg.solve_triangular(L,sigmaVec,lower=True)
        temp2=np.dot(temp3.T,temp3)
        temp2=temp-temp2
        if grad==False:
            return temp2
        else:
            gradi=np.zeros(self.n1)
            x=np.array(x).reshape((1,self.n1))

            gradX=self.gradXKern(x,n,self)
            #gradX=np.zeros((n,self._n1))
            for j in xrange(self.n1):
              #  for i in xrange(n):
                  #  gradX[i,j]=self._k.K(x,self._X[i,:].reshape((1,self._n1)))*(2.0*self._alpha1[j]*(x[0,j]-self._X[i,j]))
                temp5=linalg.solve_triangular(L,gradX[:,j].T,lower=True)
                gradi[j]=np.dot(temp5.T,temp3)
            gradVar=-2.0*gradi
            return temp2,gradVar
开发者ID:toscanosaul,项目名称:SBO,代码行数:26,代码来源:statGeneral.py


示例6: variance

def variance(memory, predictor, processes = 1):
    """
    Computes predictive variance of latent function
    Arguments:
        memory(*)   :   Memory object learned from classifier learning
        predictor(*):   Predictor object cached with predictive covariance
    Keyword Arguments:
        processes   :   Number of cores to use for parallelising computations
    Returns:
        fq_var(*)   :   Predictive variance at query points
    (*) Accepts homogenous lists of described quantity
    """
    if isinstance(memory, list):
        memories_predictors = [(memory[i], predictor[i]) 
            for i in range(len(memory))]
        return parallel_starmap(variance, memories_predictors, 
            processes = processes)

    kernel = compose(memory.kerneldef)
    if memory.approxmethod == 'laplace':
        v = la.solve_triangular(memory.cache.get('L'), 
            (memory.cache.get('wsqrt') * predictor.Kq.T).T, 
            lower = True, check_finite = False)
    elif memory.approxmethod == 'pls':
        v = la.solve_triangular(memory.cache.get('L'), predictor.Kq,
            lower = True, check_finite = False)
    kqq = kernel(predictor.Xq, None, memory.hyperparams)
    fq_var = kqq - np.sum(v**2, axis = 0)
    return fq_var
开发者ID:KelvyHsu,项目名称:ocean-exploration,代码行数:29,代码来源:cpredict.py


示例7: chol_up_insert

def chol_up_insert(L, V12, V23, V22, Snn_noise_std_vec,insertionID):

    R = L.T
    N = R.shape[0]
    n = V22.shape[0]
    noise = np.diag(Snn_noise_std_vec ** 2)
    R11 = R[:insertionID, :insertionID]
    R33 = R[insertionID:, insertionID:]
    S11 = R11
    S12 = la.solve_triangular(R11.T, V12, lower=True)
    S13 = R[:insertionID,insertionID:]
    S22 = linalg.jitchol(V22+noise - S12.T.dot(S12)).T
    if V23.shape[1] != 0: # The data is being inserted between columns
        S23 = la.solve_triangular(S22.T,(V23-S12.T.dot(S13)), lower=True)
        S33 = linalg.jitchol(R33.T.dot(R33)-S23.T.dot(S23)).T
    else: #the data is being appended at the end of the matrix
        S23 = np.zeros((n,0))
        S33 = np.zeros((0,0))
    On1 = np.zeros((n, insertionID))
    On2 = np.zeros((N-insertionID, insertionID))
    On3 = np.zeros((N-insertionID,n))

    top = np.concatenate((S11, S12, S13), axis=1)
    middle = np.concatenate((On1, S22, S23), axis=1)
    bottom = np.concatenate((On2, On3, S33), axis=1)
    return np.concatenate((top, middle, bottom), axis=0).T
开发者ID:KelvyHsu,项目名称:ocean-exploration,代码行数:26,代码来源:train.py


示例8: expec_e

 def expec_e(self):
     """
     Noise of each observation
     """
     innovation = self.x - (self.y * self.a + self.c) # mu0_e == 0
     
     for f in range(self.F):
         inn_f = innovation[:, f][:, np.newaxis]
         
         # UPDATE e -----------------------------
         self.cov_e[f] = np.zeros((self.N, self.N))
         for p in range(self.P):
             pidxs = self.periods==p
             inn_fp = inn_f[pidxs]
                              
             K = self.K_target[pidxs][:, pidxs] * self.Lambda_e[f] / self.b[p, f] + 1e-6 * np.eye(self.T)
             
             a_diag = np.diag(self.a[pidxs, f])
             y_diag = np.diag(self.y[pidxs, 0])
             
             var_c = np.diag(np.diag(self.cov_c[f][pidxs][:, pidxs]))
             var_a = np.diag(np.diag(self.cov_a[f][pidxs][:, pidxs]))
             var_y = np.diag(np.diag(self.cov_y[pidxs][:, pidxs]))
             
             S_e = K + var_c + y_diag.dot(var_a).dot(y_diag.T) + a_diag.dot(var_y).dot(a_diag.T)
             Le = cholesky(S_e, lower=True, overwrite_a=True, check_finite=False)
             B = solve_triangular(Le, inn_fp, lower=True, overwrite_b=True, check_finite=False)
             A = solve_triangular(Le.T, B, overwrite_b=True, check_finite=False)           
             V = solve_triangular(Le, K, check_finite=False, lower=True)   
             
             self.e[pidxs, f] = K.dot(A).reshape(-1)
             self.cov_e[f][np.ix_(pidxs, pidxs)] = K - V.T.dot(V) 
开发者ID:edwinrobots,项目名称:forecastcombination,代码行数:32,代码来源:bayesian_forecast_combination.py


示例9: expec_Lambda_b

 def expec_Lambda_b(self):
     """
     Parameters of the noise in general -- captures the increase in noise over time, and its relationship with y.
     """              
     for f in range(self.F):                         
         # UPDATE Lambda ------------------------       
         shape_Lambda = self.T + 1 + self.shape0_Lambda + self.P
         scale_Lambda = self.scale0_Lambda * self.K_time[0:self.T, :][:, 0:self.T]
         for p in range(self.P):
             pidxs = self.periods==p
             inn_f = self.e[pidxs, f:f+1] # deviations from mean of 0
             inn_fp = inn_f.dot(inn_f.T) + self.cov_e[f][pidxs][:, pidxs]
             scale_Lambda += inn_fp / self.K_target[pidxs][:, pidxs] * self.b[p, f]
         self.Lambda_e[f] = scale_Lambda / (shape_Lambda - self.T - 1)# P x P                            
                         
         # UPDATE b --------------------------- Check against bird paper.            
         shape_b = self.lambda_e[f] + self.T/2.0
         expec_log_b = np.zeros(self.P)
         for p in range(self.P):
             pidxs = self.periods==p
             inn_f = self.e[pidxs, f]
             var_e = np.diag(np.diag(self.cov_e[f][pidxs][:, pidxs]))
             inn_fp = inn_f.dot(inn_f) + var_e            
             
             L_Lambda = cholesky(self.Lambda_e[f] * self.K_target[pidxs][:, pidxs] + 1e-6  * np.eye(self.T), lower=True, check_finite=False)
             B = solve_triangular(L_Lambda, inn_fp, overwrite_b=True, check_finite=False, lower=True)
             A = solve_triangular(L_Lambda.T, B, overwrite_b=True, check_finite=False)
             rate_b = self.lambda_e[f] + np.trace(A)/2.0
             self.b[p, f] = shape_b / rate_b
             expec_log_b[p] = psi(shape_b) - np.log(rate_b)
                    
         # UPDATE lambda -----------------------
         shape_lambda = self.shape0_lambda + 0.5 * self.N
         scale_Lambda = self.scale0_Lambda - 0.5 * np.sum(1 + expec_log_b - self.b[:, f])
         self.lambda_e[f] = shape_lambda / scale_Lambda            
开发者ID:edwinrobots,项目名称:forecastcombination,代码行数:35,代码来源:bayesian_forecast_combination.py


示例10: expec_a

    def expec_a(self):
        for f in range(self.F):
            innovation = self.x[:, f:f+1] - (self.y * self.mu0_a + self.c[:, f:f+1] + self.e[:, f:f+1]) # observation minus prior over forecasters' predictions 
            K = self.s_a[f] * self.K       
            
            y_diag = np.diag(self.y.reshape(-1))
            
            var_y = np.diag(np.diag(self.cov_y))
            var_c = np.diag(np.diag(self.cov_c[f]))
            var_e = np.diag(np.diag(self.cov_e[f]))
            
            S_a = y_diag.dot(K).dot(y_diag.T) + self.mu0_a**2 * var_y + var_c + var_e
            La = cholesky(S_a, lower=True, overwrite_a=True, check_finite=False)
            B = solve_triangular(La, innovation, lower=True, overwrite_b=True, check_finite=False)
            A = solve_triangular(La.T, B, overwrite_b=True, check_finite=False)           
            V = solve_triangular(La, y_diag.dot(K), check_finite=False, lower=True)
            self.a[:, f] = self.mu0_a + K.dot(y_diag).dot(A).reshape(-1)
            self.cov_a[f] = K - V.T.dot(V)

            rate0_s = self.s0_a
            shape_s = 1 + 0.5 * self.N 
            af = self.a[:, f][:, np.newaxis]
            
            var_a = np.diag(np.diag(self.cov_a[f]))
            B = solve_triangular(self.L_K, af.dot(af.T).T + y_diag.dot(var_a).dot(y_diag.T).T, lower=True, overwrite_b=True)
            A = solve_triangular(self.L_K.T, B, overwrite_b=True)            
            
            rate_s = rate0_s + 0.5 * np.trace(A)
开发者ID:edwinrobots,项目名称:forecastcombination,代码行数:28,代码来源:bayesian_forecast_combination.py


示例11: muN

    def muN(self,x,n,L,X,temp1,kern,grad=False,onlyGrad=False):
        muStart=kern.mu
        x=np.array(x).reshape((1,self.n1))
        tempN=self._numberTraining+n
        B=np.zeros([1,tempN])
        
        for i in xrange(tempN):
            B[:,i]=self._k.K(x,X[i:i+1,:])

        temp2=linalg.solve_triangular(L,B.T,lower=True)
        
        if grad:
            gradX=self.gradXKern(x,n,self._k,self._numberTraining,X,self.n1)
            gradi=np.zeros(self.n1)
            
            for j in xrange(self.n1):
                temp5=linalg.solve_triangular(L,gradX[:,j].T,lower=True)
                gradi[j]=np.dot(temp5,temp1)
            
        
        if onlyGrad:
            return gradi
        
        a=muStart+np.dot(temp2.T,temp1)
        if grad==False:
            return a

        return a,gradi
开发者ID:toscanosaul,项目名称:BGO,代码行数:28,代码来源:stat.py


示例12: _batch_omp_step

def _batch_omp_step(G, alpha_0, m, eps_0=None, eps=None):
    idx = []
    L = np.ones((1,1))
    alpha = alpha_0
    eps_curr = eps_0
    delta = 0
    it = 0
    if eps == None:
        stopping_condition = lambda: it == m
    else:
        stopping_condition = lambda: eps_curr <=eps

    while not stopping_condition():
        lam = np.abs(alpha).argmax()
        if len(idx) > 0:
            w = linalg.solve_triangular(L, G[idx, lam],
                                        lower = True, unit_diagonal=True)
            L = np.r_[np.c_[L, np.zeros(len(L))],
                      np.atleast_2d(np.append(w, np.sqrt(1-np.inner(w,w))))]
        idx.append(lam)
        it +=1
        Ltc = linalg.solve_triangular(L, alpha_0[idx], lower=True)
        gamma = linalg.solve_triangular(L, Ltc, trans=1, lower=True)
        beta = np.dot(G[:, idx], gamma)
        alpha = alpha_0 - beta
        if eps != None:
            eps_curr += delta
            delta = np.inner(gamma, beta[idx])
            eps_curr -= delta
    return gamma, idx
开发者ID:LeonGuo1988,项目名称:LSpDl,代码行数:30,代码来源:leonomp2.py


示例13: power_iteration

 def power_iteration(self,x0,tol=1.e-10,max_it=1000, verbose = False):
     # P, L, U = sp.linalg.lu(A)
     L, U = self.lunopiv(self.matrices.L,1.e-6)
     #
     error = 1.
     it = 0
     #
     phi_new = x0
     lambda_new = np.linalg.norm(self.matrices.M.dot(phi_new))
     #
     while (error > tol) and (it < max_it):
         # setting the variable to the old name
         it = it+1
         phi_old = phi_new
         lambda_old = lambda_new
         #
         #phi_new = U\(L\(B*phi_old))
         tmp = self.matrices.M.dot(phi_old)
         sp_la.solve_triangular(L, tmp, lower=True,overwrite_b=True)
         sp_la.solve_triangular(U, tmp, lower=False,overwrite_b=True)
         phi_new = tmp
         #lambda_new = (phi_new*tmp)/(phi_old*tmp)
         #
         lambda_new = np.linalg.norm(phi_new)
         phi_new = phi_new/lambda_new
         #
         phi_error = np.linalg.norm(phi_new-phi_old)
         lambda_error = abs(lambda_new - lambda_old)/lambda_new
         error = max(phi_error,lambda_error)
         if (it%5 == 0 and verbose):
             print "iter = ", it, ";  lambda = ",lambda_new, "; error = ", error
     print "iter = ", it, ";  lambda = ",lambda_new, "; error = ", error
     self.state.keff = lambda_new
     self.state.phi = phi_new
开发者ID:segonpin,项目名称:CMFD_cpp_python,代码行数:34,代码来源:Solver.py


示例14: _posterior_dist

 def _posterior_dist(self,A,beta,XX,XY,full_covar=False):
     '''
     Calculates mean and covariance matrix of posterior distribution
     of coefficients.
     '''
     # compute precision matrix for active features
     Sinv = beta * XX
     np.fill_diagonal(Sinv, np.diag(Sinv) + A)
     cholesky = True
     # try cholesky, if it fails go back to pinvh
     try:
         # find posterior mean : R*R.T*mean = beta*X.T*Y
         # solve(R*z = beta*X.T*Y) => find z => solve(R.T*mean = z) => find mean
         R    = np.linalg.cholesky(Sinv)
         Z    = solve_triangular(R,beta*XY, check_finite=False, lower = True)
         Mn   = solve_triangular(R.T,Z, check_finite=False, lower = False)
         
         # invert lower triangular matrix from cholesky decomposition
         Ri   = solve_triangular(R,np.eye(A.shape[0]), check_finite=False, lower=True)
         if full_covar:
             Sn   = np.dot(Ri.T,Ri)
             return Mn,Sn,cholesky
         else:
             return Mn,Ri,cholesky
     except LinAlgError:
         cholesky = False
         Sn   = pinvh(Sinv)
         Mn   = beta*np.dot(Sinv,XY)
         return Mn, Sn, cholesky
开发者ID:AmazaspShumik,项目名称:sklearn-bayes,代码行数:29,代码来源:fast_rvm.py


示例15: getParametersOptVoi

    def getParametersOptVoi(self,i):
	tempN=self.numberTraining+i
	args={}
	args['i']=i
	A=self.stat._k.A(self.dataObj.Xhist[0:tempN,:],noise=self.dataObj.varHist[0:tempN])
        L=np.linalg.cholesky(A)
	args['L']=L
	
	muStart=self.stat._k.mu
	y=self.dataObj.yHist
	temp1=linalg.solve_triangular(L,np.array(y)-muStart,lower=True)
	args['temp1']=temp1
	
	m=self._VOI._points.shape[0]
	temp2=np.zeros((m,tempN))
	
	X=self.dataObj.Xhist
	B=np.zeros((m,tempN))
	for i in xrange(tempN):
	    B[:,i]=self.stat._k.K(self._VOI._points,X[i:i+1,:])[:,0]
	
	a=np.zeros(m)
	for i in xrange(m):
	    temp2[i,:]=linalg.solve_triangular(L,B[i,:].T,lower=True)
	    a[i]=muStart+np.dot(temp2[i,:],temp1)

	args['temp2']=temp2

	args['a']=a
	return args
开发者ID:toscanosaul,项目名称:SBO,代码行数:30,代码来源:KG.py


示例16: solve

 def solve(a, y, negative=True):
     """
     A wrapper for solving linear system 'ax = y' using:
         1) np.linalg.solve (if scipy is not available)
         2) cholesky decompose (if scipy is available and a is not sparse)
         3) spsolve (if scipy is available a is sparse)
     If scipy is available and matrix a is not sparse and is not positive definite, LinAlgError will be thrown
     :param a        : 'a'
     :param y        : 'y'
     :param negative : If True, we'll solve '-ax = y' instead
     :return         : 'x'
     """
     if scipy_flag:
         if issparse(a):
             if negative:
                 return sparse_lin.spsolve(-a, y)
             return sparse_lin.spsolve(a, y)
         l = linalg.cholesky(a, lower=True)
         if negative:
             z = linalg.solve_triangular(-l, y, lower=True)
         else:
             z = linalg.solve_triangular(l, y, lower=True)
         return linalg.solve_triangular(l.T, z)
     if negative:
         return np.linalg.solve(-a, y)
     return np.linalg.solve(a, y)
开发者ID:bitores,项目名称:MachineLearning,代码行数:26,代码来源:Methods.py


示例17: cholesky_solver_least_squares

def cholesky_solver_least_squares(part_one, part_two):
    '''
    Solves least squares problem using cholesky decomposition
    
    Parameters:
    -----------
    
    part_one: numpy array of size 'm x m', 
              Equals X.T * X
    part_two: numpy array of size 'm x 1'
              Equals X.T * Y
              
    Returns:
    --------
    Theta: numpy array of size 'm x 1'
              Vector of coefficients
    
    '''
    # R*R.T*Theta = part_two
    R = np.linalg.cholesky(part_one)
    # R*Z = part_two
    Z     = solve_triangular(R,part_two, check_finite = False, lower = True)
    # R.T*Theta = Z
    Theta = solve_triangular(R.T,Z, check_finite = False, lower = False)
    return Theta
开发者ID:AmazaspShumik,项目名称:Mixture-Models,代码行数:25,代码来源:weighted_lin_reg.py


示例18: _update

    def _update(self):
        sn2 = self._likelihood.s2
        su2 = sn2 / 1e6

        # kernel wrt the inducing points.
        Kuu = self._kernel.get(self._U)
        p = self._U.shape[0]

        # cholesky for the information gain. note that we only need to compute
        # this once as it is independent from the data.
        self._L = sla.cholesky(Kuu + su2 * np.eye(p))

        # evaluate the kernel and residuals at the new points
        Kux = self._kernel.get(self._U, self._X)
        kxx = self._kernel.dget(self._X)
        r = self._y - self._mean

        # the cholesky of Q.
        V = sla.solve_triangular(self._L, Kux, trans=True)

        # rescale everything by the diagonal matrix ell.
        ell = np.sqrt(kxx + sn2 - np.sum(V**2, axis=0))
        Kux /= ell
        V /= ell
        r /= ell

        # NOTE: to update things incrementally all we need to do is store these
        # components. A just needs to be initialized at the identity and then
        # we just accumulate here.
        self._A = np.eye(p) + np.dot(V, V.T)
        self._a = np.dot(Kux, r)

        # update the posterior.
        self._R = np.dot(sla.cholesky(self._A), self._L)
        self._b = sla.solve_triangular(self._R, self._a, trans=True)
开发者ID:audurand,项目名称:pygp,代码行数:35,代码来源:fitc.py


示例19: cholesky_omp

def cholesky_omp(D, x, m, eps=None):
    if eps == None:
        stopping_condition = lambda: it == m  # len(idx) == m
    else:
        stopping_condition = lambda: np.inner(residual, residual) <= eps

    alpha = np.dot(x, D)

    # first step:
    it = 1
    lam = np.abs(np.dot(x, D)).argmax()
    idx = [lam]
    L = np.ones((1, 1))
    gamma = linalg.lstsq(D[:, idx], x)[0]
    residual = x - np.dot(D[:, idx], gamma)

    while not stopping_condition():
        lam = np.abs(np.dot(residual, D)).argmax()
        w = linalg.solve_triangular(L, np.dot(D[:, idx].T, D[:, lam]), lower=True, unit_diagonal=True)
        # should the diagonal be unit in theory? It crashes without it
        L = np.r_[np.c_[L, np.zeros(len(L))], np.atleast_2d(np.append(w, np.sqrt(1 - np.dot(w.T, w))))]
        idx.append(lam)
        it += 1
        # gamma = linalg.solve(np.dot(L, L.T), alpha[idx], sym_pos=True)
        # what am I, stupid??
        Ltc = linalg.solve_triangular(L, alpha[idx], lower=True)
        gamma = linalg.solve_triangular(L, Ltc, trans=1, lower=True)
        residual = x - np.dot(D[:, idx], gamma)

    return gamma, idx
开发者ID:alexeyche,项目名称:alexeyche-junk,代码行数:30,代码来源:omp2.py


示例20: log_gaussian_pdf

def log_gaussian_pdf(x, mu=None, Sigma=None, is_cholesky=False, compute_grad=False):
    if mu is None:
        mu = np.zeros(len(x))
    if Sigma is None:
        Sigma = np.eye(len(mu))
    
    if is_cholesky is False:
        L = np.linalg.cholesky(Sigma)
    else:
        L = Sigma
    
    assert len(x) == Sigma.shape[0]
    assert len(x) == Sigma.shape[1]
    assert len(x) == len(mu)
    
    # solve y=K^(-1)x = L^(-T)L^(-1)x
    x = np.array(x - mu)
    y = solve_triangular(L, x.T, lower=True)
    y = solve_triangular(L.T, y, lower=False)
    
    if not compute_grad:
        log_determinant_part = -np.sum(np.log(np.diag(L)))
        quadratic_part = -0.5 * x.dot(y)
        const_part = -0.5 * len(L) * np.log(2 * np.pi)
        
        return const_part + log_determinant_part + quadratic_part
    else:
        return -y
开发者ID:littlefoxhome,项目名称:kernel_hmc,代码行数:28,代码来源:gaussian.py



注:本文中的scipy.linalg.solve_triangular函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python linalg.solveh_banded函数代码示例发布时间:2022-05-27
下一篇:
Python linalg.solve_circulant函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap