本文整理汇总了Python中scipy.linalg.cho_solve函数的典型用法代码示例。如果您正苦于以下问题:Python cho_solve函数的具体用法?Python cho_solve怎么用?Python cho_solve使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cho_solve函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: cho_solve_ATAI
def cho_solve_ATAI(A, rho, b, c, lwr, check_finite=True):
r"""
Solve the linear system :math:`(A^T A + \rho I)\mathbf{x} = \mathbf{b}`
or :math:`(A^T A + \rho I)X = B` using :func:`scipy.linalg.cho_solve`.
Parameters
----------
A : array_like
Matrix :math:`A`
rho : float
Scalar :math:`\rho`
b : array_like
Vector :math:`\mathbf{b}` or matrix :math:`B`
c : array_like
Matrix containing lower or upper triangular Cholesky factor,
as returned by :func:`scipy.linalg.cho_factor`
lwr : bool
Flag indicating whether the factor is lower or upper triangular
Returns
-------
x : ndarray
Solution to the linear system
"""
N, M = A.shape
if N >= M:
x = linalg.cho_solve((c, lwr), b, check_finite=check_finite)
else:
x = (b - A.T.dot(linalg.cho_solve((c, lwr), A.dot(b),
check_finite=check_finite))) / rho
return x
开发者ID:bwohlberg,项目名称:sporco,代码行数:32,代码来源:linalg.py
示例2: LMLdebug
def LMLdebug(self):
"""
LML function for debug
"""
assert self.N*self.P<5000, 'gp2kronSum:: N*P>=5000'
y = SP.reshape(self.Y,(self.N*self.P), order='F')
V = SP.kron(SP.eye(self.P),self.F)
XX = SP.dot(self.Xr,self.Xr.T)
K = SP.kron(self.Cr.K(),XX)
K += SP.kron(self.Cn.K()+self.offset*SP.eye(self.P),SP.eye(self.N))
# inverse of K
cholK = LA.cholesky(K)
Ki = LA.cho_solve((cholK,False),SP.eye(self.N*self.P))
# Areml and inverse
Areml = SP.dot(V.T,SP.dot(Ki,V))
cholAreml = LA.cholesky(Areml)
Areml_i = LA.cho_solve((cholAreml,False),SP.eye(self.K*self.P))
# effect sizes and z
b = SP.dot(Areml_i,SP.dot(V.T,SP.dot(Ki,y)))
z = y-SP.dot(V,b)
Kiz = SP.dot(Ki,z)
# lml
lml = y.shape[0]*SP.log(2*SP.pi)
lml += 2*SP.log(SP.diag(cholK)).sum()
lml += 2*SP.log(SP.diag(cholAreml)).sum()
lml += SP.dot(z,Kiz)
lml *= 0.5
return lml
开发者ID:PMBio,项目名称:mtSet,代码行数:35,代码来源:gp2kronSumLR.py
示例3: compute_logprod_derivative
def compute_logprod_derivative(Alup, dA, B, dB):
""" I = logdet(A)+Tr(inv(A)*B)
dI/dx = Tr(inv(A)*(dA - dA*inv(A)*B + dB) """
tmp = lalg.cho_solve(Alup, B, check_finite=False)
tmp2 = dA + dB - dA.dot(tmp)
return np.trace(lalg.cho_solve(Alup, tmp2, check_finite=False))
开发者ID:gwungwun,项目名称:pyParticleEst,代码行数:7,代码来源:mlnlg_compute.py
示例4: loglik_full
def loglik_full(self, l_a, l_rho, Agw, gammagw):
"""
Given all these parameters, calculate the full likelihood
@param l_a: List of Fourier coefficient arrays for all pulsars
@param l_rho: List of arrays of log10(PSD) amplitudes for all pulsars
@param Agw: log10(GW amplitude)
@param gammagw: GWB spectral index
@return: Log-likelihood
"""
# Transform the GWB parameters to PSD coefficients (pc)
pc_gw = self.gwPSD(Agw, gammagw)
rv = 0.0
for ii, freq in enumerate(self.freqs):
a_cos = l_a[:,2*ii] # Cosine modes for f=freq
a_sin = l_a[:,2*ii+1] # Sine modes for f=freq
rho = l_rho[:,ii] # PSD amp for f=freq
# Covariance matrix is the same for sine and cosine modes
cov = np.diag(10**rho) + self.hdmat * pc_gw[ii]
cf = sl.cho_factor(cov)
logdet = 2*np.sum(np.log(np.diag(cf[0])))
# Add the log-likelihood for the cosine and the sine modes
rv += -0.5 * np.dot(a_cos, sl.cho_solve(cf, a_cos)) - \
0.5 * np.dot(a_sin, sl.cho_solve(cf, a_sin)) - \
2*self.Npsr*np.log(2*np.pi) - logdet
return rv
开发者ID:derek-adair,项目名称:piccard,代码行数:31,代码来源:resampler.py
示例5: get_covariances
def get_covariances(self,hyperparams):
"""
INPUT:
hyperparams: dictionary
OUTPUT: dictionary with the fields
K: kernel
Kinv: inverse of the kernel
L: chol(K)
alpha: solve(K,y)
W: D*Kinv * alpha*alpha^T
"""
if self._is_cached(hyperparams):
return self._covar_cache
K = self.covar.K(hyperparams['covar'])
if self.likelihood is not None:
Knoise = self.likelihood.K(hyperparams['lik'],self.n)
K += Knoise
L = LA.cholesky(K).T# lower triangular
alpha = LA.cho_solve((L,True),self.Y)
Kinv = LA.cho_solve((L,True),SP.eye(L.shape[0]))
W = self.t*Kinv - SP.dot(alpha,alpha.T)
self._covar_cache = {}
self._covar_cache['K'] = K
self._covar_cache['Kinv'] = Kinv
self._covar_cache['L'] = L
self._covar_cache['alpha'] = alpha
self._covar_cache['W'] = W
self._covar_cache['hyperparams'] = copy.deepcopy(hyperparams)
return self._covar_cache
开发者ID:PMBio,项目名称:pygp_kronsum,代码行数:32,代码来源:gp_base.py
示例6: finalize
def finalize(self):
"""Finalize the fit, utilizing the already inverted Sigma matrix"""
# Calculate the prediction quantities
if len(self.dtp) > 0:
self.MNt_n = np.dot(self.Mp_n.T, sl.cho_solve(self.Np_cf, self.dtp))
dtNdt = np.dot(self.dtp, sl.cho_solve(self.Np_cf, self.dtp))
else:
self.MNt_n = np.zeros(self.Mp_n.shape[1])
dtNdt = 0.0
self.dpars_n = np.dot(self.Sigma_n, self.MNt_n + self.phipar_n)
# TODO: should use dpars, instead of MNt below here???
self.rp = np.dot(self.Mtot_n, np.dot(self.Sigma_n, self.MNt_n)) # Should be approx~0.0
self.rr = np.dot(self.Mtot_n, np.dot(self.Sigma_n, self.Mtot_n.T))
# Calculate the log-likelihood
logdetN2 = np.sum(np.log(np.diag(self.Np_cf[0])))
logdetphi2 = 0.5*np.sum(np.log(self.Phivec_n))
chi2dt = 0.5*dtNdt
chi2phi = 0.5*np.sum(self.prpars_delta_n**2/self.Phivec_n)
chi2phi1 = 0.5*np.dot(self.dpars_n, np.dot(self.Sigma_inv_n, self.dpars_n))
chi2_active = 0.5*np.dot(self.dpars_n, np.dot(self.Sigma_inv_n, self.dpars_n))
# NOTE: chi2_active is zero _if_ we move to ML solution. We are dpars
# away from there. That's why we subtract it from loglik.
# Note also that, now, chi2phi1 and chi2_active are the same in
# this rescaling
self.loglik = -logdetN2-logdetphi2-chi2dt-chi2phi+chi2phi1-chi2_active
self.loglik_ml = -logdetN2-logdetphi2-chi2dt-chi2phi+chi2phi1
开发者ID:vhaasteren,项目名称:pysolvepulsar,代码行数:29,代码来源:linearfitter.py
示例7: _update_cache
def _update_cache(self):
"""
INPUT:
hyperparams: dictionary
OUTPUT: dictionary with the fields
K: kernel
Kinv: inverse of the kernel
L: chol(K)
alpha: solve(K,y)
W: D*Kinv * alpha*alpha^T
"""
cov_params_have_changed = self.covar.params_have_changed
if cov_params_have_changed or self.Y_has_changed:
K = self.covar.K()
L = LA.cholesky(K).T# lower triangular
Kinv = LA.cho_solve((L,True),SP.eye(L.shape[0]))
alpha = LA.cho_solve((L,True),self.Y)
W = self.t*Kinv - SP.dot(alpha,alpha.T)
self._covar_cache = {}
self._covar_cache['K'] = K
self._covar_cache['Kinv'] = Kinv
self._covar_cache['L'] = L
self._covar_cache['alpha'] = alpha
self._covar_cache['W'] = W
return self._covar_cache
开发者ID:jeffhsu3,项目名称:limix,代码行数:27,代码来源:gp_base.py
示例8: predict
def predict(self, y, t):
"""
Compute the conditional predictive distribution of the model.
:param y: ``(nsamples,)``
The observations to condition the model on.
:param t: ``(ntest,)`` or ``(ntest, ndim)``
The coordinates where the predictive distribution should be
computed.
Returns a tuple ``(mu, cov)`` where
* **mu** ``(ntest,)`` is the mean of the predictive distribution, and
* **cov** ``(ntest, ntest)`` is the predictive covariance.
"""
self.recompute()
r = self._check_dimensions(y)[self.inds] - self.mean(self._x)
xs, i = self.parse_samples(t, False)
alpha = cho_solve(self._factor, r)
# Compute the predictive mean.
Kxs = self.kernel(self._x[None, :], xs[:, None])
mu = np.dot(Kxs, alpha) + self.mean(xs)
# Compute the predictive covariance.
cov = self.kernel(xs[:, None], xs[None, :])
cov -= np.dot(Kxs, cho_solve(self._factor, Kxs.T))
return mu, cov
开发者ID:RobertOrmandi,项目名称:george,代码行数:31,代码来源:basic.py
示例9: _calculate_log_likelihood
def _calculate_log_likelihood(self):
#if self.m == None:
# Give error message
R = zeros((self.n, self.n))
X,Y = array(self.X), array(self.Y)
thetas = 10.**self.thetas
for i in range(self.n):
for j in arange(i+1,self.n):
R[i,j] = (1-self.nugget)*e**(-sum(thetas*(X[i]-X[j])**2.)) #weighted distance formula
R = R + R.T + eye(self.n)
self.R = R
one = ones(self.n)
try:
self.R_fact = cho_factor(R)
rhs = vstack([Y, one]).T
R_fact = (self.R_fact[0].T,not self.R_fact[1])
cho = cho_solve(R_fact, rhs).T
self.mu = dot(one,cho[0])/dot(one,cho[1])
self.sig2 = dot(Y-dot(one,self.mu),cho_solve(self.R_fact,(Y-dot(one,self.mu))))/self.n
#self.log_likelihood = -self.n/2.*log(self.sig2)-1./2.*log(abs(det(self.R)+1.e-16))-sum(thetas)
self.log_likelihood = -self.n/2.*log(self.sig2)-1./2.*log(abs(det(self.R)+1.e-16))
except (linalg.LinAlgError,ValueError):
#------LSTSQ---------
self.R_fact = None #reset this to none, so we know not to use cholesky
#self.R = self.R+diag([10e-6]*self.n) #improve conditioning[Booker et al., 1999]
rhs = vstack([Y, one]).T
lsq = lstsq(self.R.T,rhs)[0].T
self.mu = dot(one,lsq[0])/dot(one,lsq[1])
self.sig2 = dot(Y-dot(one,self.mu),lstsq(self.R,Y-dot(one,self.mu))[0])/self.n
self.log_likelihood = -self.n/2.*log(self.sig2)-1./2.*log(abs(det(self.R)+1.e-16))
开发者ID:Kenneth-T-Moore,项目名称:OpenMDAO-Framework,代码行数:31,代码来源:kriging_surrogate.py
示例10: grad_nlogprob
def grad_nlogprob(hypers):
amp2 = np.exp(hypers[0])
noise = np.exp(hypers[1])
ls = np.exp(hypers[2:])
chol, corr, grad_corr = memoize(amp2, noise, ls)
solve = spla.cho_solve((chol, True), diffs)
inv_cov = spla.cho_solve((chol, True), np.eye(chol.shape[0]))
jacobian = np.outer(solve, solve) - inv_cov
grad = np.zeros(self.D + 2)
# Log amplitude gradient.
grad[0] = 0.5 * np.trace(np.dot( jacobian, corr + 1e-6*np.eye(chol.shape[0]))) * amp2
# Log noise gradient.
grad[1] = 0.5 * np.trace(np.dot( jacobian, np.eye(chol.shape[0]))) * noise
# Log length scale gradients.
for dd in xrange(self.D):
grad[dd+2] = 1 * np.trace(np.dot( jacobian, -amp2*grad_corr[:,:,dd]*comp[:,dd][:,np.newaxis]/(np.exp(ls[dd]))))*np.exp(ls[dd])
# Roll in the prior variance.
#grad -= 2*hypers/self.hyper_prior
return -grad
开发者ID:ninjin,项目名称:spearmint-lite,代码行数:27,代码来源:gp.py
示例11: elbo
def elbo(params, mask, *args):
"""ELBO with full posterior covariance matrix"""
t, mu, post_cov = args
K, dK = kernel(t, params)
dK *= mask[np.newaxis, np.newaxis, :]
try:
L = cholesky(K, lower=True)
except LinAlgError:
return -np.inf, np.zeros_like(params)
Kinv = cho_solve((L, True), np.eye(K.shape[0])) # K inverse
if mu.ndim == 1:
mu = mu[:, np.newaxis]
alpha = cho_solve((L, True), mu)
ll_dims = -0.5 * np.einsum("ik,ik->k", mu, alpha)
tmp = np.einsum("ik,jk->ijk", alpha, alpha)
tmp -= Kinv[:, :, np.newaxis]
for i in range(post_cov.shape[-1]):
KinvSigma = cho_solve((L, True), post_cov[:, :, i])
ll_dims[i] -= 0.5 * np.trace(KinvSigma)
tmp[:, :, i] += KinvSigma @ Kinv
ll_dims -= np.log(np.diag(L)).sum()
ll = ll_dims.sum(-1)
dll_dims = 0.5 * np.einsum("ijl,ijk->kl", tmp, dK)
dll = dll_dims.sum(-1)
return ll, dll
开发者ID:catniplab,项目名称:vLGP,代码行数:32,代码来源:gp.py
示例12: rakeDistortionlessFilters
def rakeDistortionlessFilters(self, source, interferer, R_n, delay=0.03, epsilon=5e-3):
'''
Compute time-domain filters of a beamformer minimizing noise and interference
while forcing a distortionless response towards the source.
'''
H = buildRIRMatrix(self.R, (source, interferer), self.Lg, self.Fs, epsilon=epsilon, unit_damping=True)
L = H.shape[1]/2
# We first assume the sample are uncorrelated
K_nq = np.dot(H[:,L:], H[:,L:].T) + R_n
# constraint
kappa = int(delay*self.Fs)
A = H[:,:L]
b = np.zeros((L,1))
b[kappa,0] = 1
# filter computation
C = la.cho_factor(K_nq, overwrite_a=True, check_finite=False)
B = la.cho_solve(C, A)
D = np.dot(A.T, B)
C = la.cho_factor(D, overwrite_a=True, check_finite=False)
x = la.cho_solve(C, b)
g_val = np.dot(B, x)
# reshape and store
self.filters = g_val.reshape((self.M, self.Lg))
# compute and return SNR
A = np.dot(g_val.T, H[:,:L])
num = np.dot(A, A.T)
denom = np.dot(np.dot(g_val.T, K_nq), g_val)
return num/denom
开发者ID:LCAV,项目名称:TimeDomainAcousticRakeReceiver,代码行数:35,代码来源:beamforming.py
示例13: _LMLgrad_covar_debug
def _LMLgrad_covar_debug(self,covar):
assert self.N*self.P<2000, 'gp2kronSum:: N*P>=2000'
y = SP.reshape(self.Y,(self.N*self.P), order='F')
K = SP.kron(self.Cg.K(),self.XX)
K += SP.kron(self.Cn.K()+self.offset*SP.eye(self.P),SP.eye(self.N))
cholK = LA.cholesky(K).T
Ki = LA.cho_solve((cholK,True),SP.eye(y.shape[0]))
Kiy = LA.cho_solve((cholK,True),y)
if covar=='Cr': n_params = self.Cr.getNumberParams()
elif covar=='Cg': n_params = self.Cg.getNumberParams()
elif covar=='Cn': n_params = self.Cn.getNumberParams()
RV = SP.zeros(n_params)
for i in range(n_params):
#0. calc grad_i
if covar=='Cg':
C = self.Cg.Kgrad_param(i)
Kgrad = SP.kron(C,self.XX)
elif covar=='Cn':
C = self.Cn.Kgrad_param(i)
Kgrad = SP.kron(C,SP.eye(self.N))
#1. der of log det
RV[i] = 0.5*(Ki*Kgrad).sum()
#2. der of quad form
RV[i] -= 0.5*(Kiy*SP.dot(Kgrad,Kiy)).sum()
return RV
开发者ID:PMBio,项目名称:mtSet,代码行数:35,代码来源:gp2kronSum.py
示例14: predict
def predict(self, y, t):
"""
Compute the conditional predictive distribution of the model.
:param y: ``(nsamples, )``
The observations to condition the model on.
:param t: ``(ntest, )``
The coordinates where the predictive distribution should be
computed.
:returns mu: ``(ntest, )``
The mean of the predictive distribution.
:returns cov: ``(ntest, ntest)``
The predictive covariance.
"""
r = self._check_dimensions(y)
xs, i = self._parse_samples(t, False)
alpha = cho_solve(self._factor, r)
# Compute the predictive mean.
Kxs = self._kernel(self._x[None, :], xs[:, None])
mu = np.dot(Kxs, alpha)
# Compute the predictive covariance.
cov = self._kernel(xs[:, None], xs[None, :])
cov -= np.dot(Kxs, cho_solve(self._factor, Kxs.T))
return mu, cov
开发者ID:exoplaneteer,项目名称:george,代码行数:31,代码来源:basic.py
示例15: find_likelihood_der
def find_likelihood_der(self, X, y):
"""
Find the negative log likelihood and its partial derivatives.
Parameters
----------
Returns
-------
"""
n = len(X)
K = self.cf.eval(X)
#if len(self.krnds)!=K.shape[0]:
# print "Created new self.krnds!"
# self.krnds = np.random.randn(K.shape[0])*10**-6
#K = K + np.eye(K.shape[0])*self.krnds
L = np.linalg.cholesky(K) # Problems using this on the cluster - bad scaling! Running time becomes really bad with large N. Solution: Update ATLAS
#L = la.cholesky(K)
#print np.linalg.solve(L.T, np.linalg.solve(L, y))
#a = np.linalg.solve(L.T, np.linalg.solve(L, y))
a = la.cho_solve((L, True), y)
nll = 0.5*np.dot(y.T, a) + np.sum(np.log(np.diag(L))) + 0.5*n*np.log(2*np.pi)
ders = np.zeros(len(self.cf.get_params()))
#W = np.linalg.solve(L.T, np.linalg.solve(L, np.eye(n))) - a*a.T
W = la.cho_solve((L, True), np.eye(n)) - a*a.T
for i in range(len(self.cf.get_params())):
ders[i] = np.sum(W*self.cf.derivative(X, i))/2
return nll[0,0], ders
开发者ID:GreenSteam,项目名称:pypr,代码行数:33,代码来源:GaussianProcess.py
示例16: cal_varcov
def cal_varcov(self, θ2_vec):
"""calculate variance covariance matrix"""
θ2, ix_θ2_T, Z, LinvW, X1 = self.θ2, self.ix_θ2_T, self.Z, self.LinvW, self.X1
θ2.T[ix_θ2_T] = θ2_vec
# update δ
δ = self.cal_δ(θ2)
jacob = self.cal_jacobian(θ2, δ)
θ1, ξ = self.cal_θ1_and_ξ(δ)
Zres = Z * ξ.reshape(-1, 1)
Ω = Zres.T @ Zres # covariance of the momconds
G = (np.c_[X1, jacob].T @ Z).T # gradient of the momconds
WG = cho_solve(LinvW, G)
WΩ = cho_solve(LinvW, Ω)
tmp = solve(G.T @ WG, G.T @ WΩ @ WG).T # G'WΩWG(G'WG)^(-1) part
varcov = solve((G.T @ WG), tmp)
return varcov
开发者ID:joonro,项目名称:BLP-Python,代码行数:26,代码来源:pyBLP.py
示例17: predict
def predict(self, pv, flux=None, inputs=None, inputs_pred=None, mean_only=True, splits=None):
flux = flux if flux is not None else self.data.masked_flux
iptr = inputs if inputs is not None else self.data.masked_inputs
ippr = inputs_pred if inputs_pred is not None else iptr
K0 = self.compute_cmat(pv, iptr, iptr, add_wn=False, splits=splits)
K = K0 + self._pv[-1]**2 * identity(K0.shape[0])
if inputs_pred is None:
Ks = K0.copy()
Kss = K.copy()
else:
Ks = self.compute_cmat(pv, ippr, ippr, add_wn=False, splits=splits)
Kss = self.compute_cmat(pv, ippr, ippr, add_wn=True, splits=splits)
L = sla.cho_factor(K)
b = sla.cho_solve(L, flux)
mu = dot(Ks, b)
if mean_only:
return mu
else:
b = sla.cho_solve(L, Ks.T)
cov = Kss - dot(Ks, b)
err = np.sqrt(diag(cov))
return mu, err
开发者ID:Cadair,项目名称:k2sc,代码行数:25,代码来源:segdetrender.py
示例18: predict_variance
def predict_variance(self, X1, X2):
if self.m == None:
print("ERROR: Model has to be trained first.")
return None
LX1 = spla.cho_solve((self.m.posterior.L, True), self.kernel.getCovMatrix(self.X, X1, "cross"))
LX2 = spla.cho_solve((self.m.posterior.L, True), self.kernel.getCovMatrix(self.X, X2, "cross"))
var = self.kernel.getCovMatrix(X1, X2, "cross") - np.dot(LX1.T, LX2)
return var
开发者ID:sfalkner,项目名称:RoBO,代码行数:8,代码来源:pygp_model.py
示例19: E_step
def E_step(self):
M = np.dot(self.W.T,self.W) + np.eye(self.q)*self.sigma2
#M_inv = np.linalg.inv(M)
#self.m_Z = np.dot(M_inv,np.dot(self.W.T,self.X2.T)).T
#self.S_z = M_inv*self.sigma2
M_chol = linalg.cholesky(M)
M_inv = linalg.cho_solve((M_chol,1),np.eye(self.q))
self.m_Z = linalg.cho_solve((M_chol,1),np.dot(self.W.T,self.X2.T)).T
self.S_z = M_inv*self.sigma2
开发者ID:codemarsyu,项目名称:pythonGPLVM,代码行数:9,代码来源:PCA_EM.py
示例20: remove_affine
def remove_affine(p, q, q_factor=None, skip_factorization=False):
"""Removes an (unknown) affine transform between two matrixes.
Given two arrays of the same size, `p` and `q`, finds a matrix `A` and
column vector `t` such that
`p = A * q + t`
in the least-squares sense, and then computes `qnew = A * q + t`. (Notation:
`matrix + vector` implies the vector is added to each column of the matrix.)
NB: `p` and the returned `qnew` will be equal if and only if `p` is
generated from `q` via an affine transform (no noise).
Returns `(qnew, q_factor, Ahat, that)`. `q_factor` is a matrix factorization
that can greatly speed up subsequent calls to remove_affine *with the same
`q`*. If your `q` stays the same for multiple calls, cache `q_factor` and
pass it in as a keyword argument; `q_factor` won't change from call to call.
However, if your `q` change from call to call, ignore `q_factor` and pass in
`skip_factorization=False` to avoid even calculating it. `Ahat` and `that`
are the estimated values of `A` and `t`.
NB2: the default `q_factor=None` will trigger computation of the
factorization unless `skip_factorization=False`. Non-`None` `q_factor` will
be trusted: no checks will be performed to make sure the given `q_factor` is
indeed generated by the `q` you pass in. (Example: for `q.shape` of (2, 22),
the speedup from using `q_factor` is 1.4x with skip_factorization=False, and
1.3x the case with skip_factorization=True, on a 2009 Mac Book Pro.)
Implements the algorithm described in H. Spath, "Fitting affine and
orthogonal transformations between two sets of points" in *Mathematical
Communications*, vol. 9 (2004), pp. 27--34. http://hrcak.srce.hr/file/1425
"""
qaug = np.vstack([q, np.ones_like(q[0, :])])
if q_factor is None:
Q = np.dot(qaug, qaug.T)
if skip_factorization:
sol = la.lstsq(Q, np.dot(qaug, p.T))[0]
q_factor = None
else:
q_factor = scila.cho_factor(Q)
sol = scila.cho_solve(q_factor, np.dot(qaug, p.T))
else:
sol = scila.cho_solve(q_factor, np.dot(qaug, p.T))
# sol.shape is (n+1, n), for n=p.shape[0]
Ahat = sol[:-1, :].T # top square matrix of sol, transposed
that = sol[-1:, :].T # bottom row vector of sol, transposed
qnew = np.dot(Ahat, q) + that
return (qnew, lambda x: Ahat @ x + that,
lambda t: np.linalg.lstsq(Ahat, t - that, rcond=None)[0], Ahat, that)
开发者ID:fasiha,项目名称:steppe-map,代码行数:56,代码来源:deproject.py
注:本文中的scipy.linalg.cho_solve函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论