本文整理汇总了Python中scipy.linalg.cho_factor函数的典型用法代码示例。如果您正苦于以下问题:Python cho_factor函数的具体用法?Python cho_factor怎么用?Python cho_factor使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cho_factor函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: lnprob_cov
def lnprob_cov(C):
# Get first term of loglikelihood expression (y * (1/C) * y.T)
# Do computation using Cholesky decomposition
try:
U, luflag = cho_factor(C)
except LinAlgError:
# Matrix is not positive semi-definite, so replace it with the
# positive semi-definite matrix that is nearest in the Frobenius norm
E, EV = eigh(C)
E[E<0] = 1e-12
U, luflag = cho_factor(EV.dot(np.diag(Ep)).dot(EV.T))
finally:
x2 = cho_solve((U, luflag), dxy)
L1 = dxy.dot(x2)
# Get second term of loglikelihood expression (log det C)
sign, L2 = slogdet(C)
# Why am I always confused by this?
thing_to_be_minimised = (L1 + L2)
return thing_to_be_minimised
开发者ID:evanbiederstedt,项目名称:pyBAST,代码行数:29,代码来源:distortion.py
示例2: jitchol
def jitchol(A,maxtries=5):
"""
Arguments
---------
A : An almost pd square matrix
Returns
-------
cho_factor(K)
Notes
-----
Adds jitter to K, to enforce positive-definiteness
"""
try:
return linalg.cho_factor(A)
except:
diagA = np.diag(A)
if np.any(diagA<0.):
raise linalg.LinAlgError, "not pd: negative diagonal elements"
jitter= diagA.mean()*1e-6
for i in range(1,maxtries+1):
try:
return linalg.cho_factor(A+np.eye(A.shape[0])*jitter)
except:
jitter *= 10
print 'Warning: adding jitter of '+str(jitter)
raise linalg.LinAlgError,"not positive definite, even with jitter."
开发者ID:christoforosc,项目名称:ndlutil,代码行数:29,代码来源:utilities.py
示例3: model_and_cov
def model_and_cov(self, fit_params):
model = (self.pmodel*fit_params['A']*self.b**fit_params['n'])[self.windowrange] + self.fgs(fit_params,self.lmax)[self.windowrange] #(self.pmodel*self.A*self.b**self.n)[self.windowrange] +self.fgs([self.Asz,self.Aps,self.Acib],self.freqs)[self.windowrange]
if self.freqs[0] !=143:
self.cho_cov = cho_factor(self.patch_sigma+self.planck_sigma+dot(self.windows,dot(self.beam_corr*outer(model,model),self.windows.T)))
else:
self.cho_cov = cho_factor(self.patch_sigma+self.planck_sigma)
return (dot(self.windows,model),self.cho_cov)
开发者ID:kmaylor,项目名称:CMB_minimizer,代码行数:7,代码来源:Power_law_param_estimator.py
示例4: rakeDistortionlessFilters
def rakeDistortionlessFilters(self, source, interferer, R_n, delay=0.03, epsilon=5e-3):
'''
Compute time-domain filters of a beamformer minimizing noise and interference
while forcing a distortionless response towards the source.
'''
H = buildRIRMatrix(self.R, (source, interferer), self.Lg, self.Fs, epsilon=epsilon, unit_damping=True)
L = H.shape[1]/2
# We first assume the sample are uncorrelated
K_nq = np.dot(H[:,L:], H[:,L:].T) + R_n
# constraint
kappa = int(delay*self.Fs)
A = H[:,:L]
b = np.zeros((L,1))
b[kappa,0] = 1
# filter computation
C = la.cho_factor(K_nq, overwrite_a=True, check_finite=False)
B = la.cho_solve(C, A)
D = np.dot(A.T, B)
C = la.cho_factor(D, overwrite_a=True, check_finite=False)
x = la.cho_solve(C, b)
g_val = np.dot(B, x)
# reshape and store
self.filters = g_val.reshape((self.M, self.Lg))
# compute and return SNR
A = np.dot(g_val.T, H[:,:L])
num = np.dot(A, A.T)
denom = np.dot(np.dot(g_val.T, K_nq), g_val)
return num/denom
开发者ID:LCAV,项目名称:TimeDomainAcousticRakeReceiver,代码行数:35,代码来源:beamforming.py
示例5: __init__
def __init__(self, data):
self._data = np.atleast_2d(data)
self._mean = np.mean(data, axis=0)
self._cov = None
if self.data.shape[0] > 1:
try:
self._cov = np.cov(data.T)
# Try factoring now to see if regularization is needed
la.cho_factor(self._cov)
except la.LinAlgError:
self._cov = oas_cov(data)
self._set_bandwidth()
# store transformation variables for drawing random values
alphas = np.std(data, axis=0)
ms = 1./alphas
m_i, m_j = np.meshgrid(ms, ms)
ms = m_i * m_j
self._draw_cov = ms * self._kernel_cov
self._scale_fac = alphas
开发者ID:bfarr,项目名称:kombine,代码行数:25,代码来源:clustered_kde.py
示例6: likelihood_prior
def likelihood_prior(self, mu, Sigma, k, R_S_mu = None, log_det_Q = None, R_S = None, switchprior = False):
"""
Computes the prior that is
\pi( \mu | \theta[k], \Sigma[k]) \pi(\Sigma| Q[k], \nu[k]) =
N(\mu; \theta[k], \Sigma[k]) IW(\Sigma; Q[k], \nu[k])
If switchprior = True, special values of nu and Sigma_mu
are used if the parameters nu_sw and Sigma_mu_sw are set
respectively. This enables use of "relaxed" priors
facilitating label switch. NB! This makes the kernel
non-symmetric, hence it cannot be used in a stationary state.
"""
if switchprior:
try:
nu = self.nu_sw
except:
nu = self.prior[k]['sigma']['nu']
try:
Sigma_mu = self.Sigma_mu_sw
except:
Sigma_mu = self.prior[k]['mu']['Sigma']
Q = self.prior[k]['sigma']['Q']*nu/self.prior[k]['sigma']['nu']
else:
nu = self.prior[k]['sigma']['nu']
Sigma_mu = self.prior[k]['mu']['Sigma']
Q = self.prior[k]['sigma']['Q']
if np.isnan(mu[0]) == 1:
return 0, None, None, None
if R_S_mu is None:
R_S_mu = sla.cho_factor(Sigma_mu,check_finite = False)
log_det_Sigma_mu = 2 * np.sum(np.log(np.diag(R_S_mu[0])))
if log_det_Q is None:
R_Q = sla.cho_factor(Q,check_finite = False)
log_det_Q = 2 * np.sum(np.log(np.diag(R_Q[0])))
if R_S is None:
R_S = sla.cho_factor(Sigma,check_finite = False)
log_det_Sigma = 2 * np.sum(np.log(np.diag(R_S[0])))
mu_theta = mu - self.prior[k]['mu']['theta'].reshape(self.d)
# N(\mu; \theta[k], \Sigma[k])
lik = - np.dot(mu_theta.T, sla.cho_solve(R_S_mu, mu_theta, check_finite = False)) /2
lik = lik - 0.5 * (nu + self.d + 1.) * log_det_Sigma
lik = lik + (nu * 0.5) * log_det_Q
lik = lik - 0.5 * log_det_Sigma_mu
lik = lik - self.ln_gamma_d(0.5 * nu) - 0.5 * np.log(2) * (nu * self.d)
lik = lik - 0.5 * np.sum(np.diag(sla.cho_solve(R_S, Q)))
return lik, R_S_mu, log_det_Q, R_S
开发者ID:JonasWallin,项目名称:BayesFlow,代码行数:55,代码来源:GMM.py
示例7: mark2loglikelihood
def mark2loglikelihood(psr, Aw, Ar, Si):
"""
Log-likelihood for our pulsar
This likelihood does marginalize over the timing model. Calculate
covariance matrix in the time-domain with:
ll = 0.5 * res^{t} (C^{-1} - C^{-1} M (M^{T} C^{-1} M)^{-1} M^{T} C^{-1} ) res - \
0.5 * log(det(C)) - 0.5 * log(det(M^{T} C^{-1} M))
In relation to 'mark1loglikelihood', this likelihood has but a simple addition:
res' = res - M xi
where M is a (n x m) matrix, with m < n, and xi is a vector of length m. The xi
are analytically marginalised over, yielding the above equation (up to constants)
:param psr:
pulsar object, containing the data and stuff
:param Aw:
White noise amplitude, model parameter
:param Ar:
Red noise amplitude, model parameter
:param Si:
Spectral index of red noise, model parameter
"""
Mmat = psr.Mmat
Cov = Aw ** 2 * np.eye(len(psr.toas)) + PL_covmat(psr.toas, Ar, alpha=0.5 * (3 - Si), fL=1.0 / (year * 20))
cfC = sl.cho_factor(Cov)
Cinv = sl.cho_solve(cfC, np.eye(len(psr.toas)))
ldetC = 2 * np.sum(np.log(np.diag(cfC[0])))
MCM = np.dot(Mmat.T, np.dot(Cinv, Mmat))
cfM = sl.cho_factor(MCM)
ldetM = 2 * np.sum(np.log(np.diag(cfM[0])))
wr = np.dot(Cinv, psr.residuals)
rCr = np.dot(psr.residuals, wr)
MCr = np.dot(Mmat.T, wr)
return (
-0.5 * rCr
+ 0.5 * np.dot(MCr, sl.cho_solve(cfM, MCr))
- 0.5 * ldetC
- 0.5 * ldetM
- 0.5 * len(psr.residuals) * np.log(2 * np.pi)
)
开发者ID:nanograv,项目名称:cit-busyweek,代码行数:50,代码来源:day4funcs.py
示例8: spla_chol_invert
def spla_chol_invert(K, eye):
'''
invert a positive-definite matrix using cholesky decomposition
'''
Ltup = spla.cho_factor(K, lower=True)
K_inv = spla.cho_solve(Ltup, eye, check_finite=False)
return K_inv
开发者ID:zpace,项目名称:stellarmass_pca,代码行数:7,代码来源:linalg.py
示例9: logLikelihood
def logLikelihood(self,p):
"Function to calculate the log likeihood"
#calculate the residuals
r = self.t - self.mf(p[self.n_hp:],self.mf_args)
#ensure r is an (n x 1) column vector
r = np.matrix(np.array(r).flatten()).T
#calculate covariance matrix, cholesky factor and logdet if hyperparameters change
# print "pars:", self.pars, type(self.pars)
# print "p:", p, type(p)
# print p[:self.n_hp] != self.pars[:self.n_hp]
# print np.all(p[:self.n_hp] != self.pars[:self.n_hp])
if self.pars == None or np.all(p[:self.n_hp] != self.pars[:self.n_hp]):
# print "no :("
self.K = GPC.CovarianceMatrix(p[:self.n_hp],self.kf_args,KernelFunction=self.kf)
self.ChoFactor = LA.cho_factor(self.K)#,overwrite_a=1)
self.logdetK = (2*np.log(np.diag(self.ChoFactor[0])).sum())
# else: print "yeah!!"
#store the new parameters
self.pars = np.copy(p)
#calculate the log likelihood
logP = -0.5 * r.T * np.mat(LA.cho_solve(self.ChoFactor,r)) - 0.5 * self.logdetK - (r.size/2.) * np.log(2*np.pi)
return np.float(logP)
开发者ID:OxES,项目名称:Infer,代码行数:28,代码来源:InferGP.old.py
示例10: _solve_admm
def _solve_admm(Y, q, alpha=10, mu=10, max_iter=10000):
n = Y.shape[0]
alpha_q = alpha * q
# solve (YYt + mu*I + mu) Z = (mu*C - lambda + gamma + mu)
A, lower = cho_factor(Y.dot(Y.T) + mu*(np.eye(n) + 1), overwrite_a=True)
C = np.zeros(n)
Z_old = 0 # shape (n,)
lmbda = np.zeros(n)
gamma = 0
# ADMM iteration
for i in range(max_iter):
# call the guts of cho_solve directly for speed
Z, _ = potrs(A, gamma + mu + mu*C - lmbda, lower=lower, overwrite_b=True)
tmp = mu*Z + lmbda
C[:] = np.abs(tmp)
C -= alpha_q
np.maximum(C, 0, out=C)
C *= np.sign(tmp)
C /= mu
d_ZC = Z - C
d_1Z = 1 - Z.sum()
lmbda += mu * d_ZC
gamma += mu * d_1Z
if ((abs(d_1Z) / n < 1e-6)
and (np.abs(d_ZC).mean() < 1e-6)
and (np.abs(Z - Z_old).mean() < 1e-5)):
break
Z_old = Z
else:
warnings.warn('ADMM failed to converge after %d iterations.' % max_iter)
return C
开发者ID:all-umass,项目名称:graphs,代码行数:34,代码来源:regularized.py
示例11: HTFNull
def HTFNull(psr, F, proj, SS, A, f, gam, efac, equad):
"""
Lentati marginalized likelihood function only including efac and equad
and power law coefficients
@param psr: Pulsar class
@param F: Fourier design matrix constructed in PALutils
@param proj: Projection operator from white noise
@param SS: Diagonalized white noise matrix
@param A: Power spectrum Amplitude
@param gam: Power spectrum index
@param f: Frequencies at which to parameterize power spectrum (Hz)
@param efac: constant multipier on error bar covaraince matrix term
@param equad: Additional white noise added in quadrature to efac
@return: LogLike: loglikelihood
"""
diff = np.dot(proj, psr.res)
# compute total time span of data
Tspan = psr.toas.max() - psr.toas.min()
# get power spectrum coefficients
f1yr = 1 / 3.16e7
rho = A ** 2 / 12 / np.pi ** 2 * f1yr ** (gam - 3) * f ** (-gam) / Tspan
# compute d
d = np.dot(F.T, diff / (efac * SS + equad ** 2))
# compute Sigma
N = 1 / (efac * SS + equad ** 2)
right = (N * F.T).T
FNF = np.dot(F.T, right)
arr = np.zeros(2 * len(rho))
ct = 0
for ii in range(0, 2 * len(rho), 2):
arr[ii] = rho[ct]
arr[ii + 1] = rho[ct]
ct += 1
Phi = np.diag(10 ** arr)
Sigma = FNF + np.diag(1 / arr)
# cholesky decomp for second term in exponential
cf = sl.cho_factor(Sigma)
expval2 = sl.cho_solve(cf, d)
logdet_Sigma = np.sum(2 * np.log(np.diag(cf[0])))
dtNdt = np.sum(diff ** 2 / (efac * SS + equad ** 2))
logdet_Phi = np.sum(np.log(arr))
logdet_N = np.sum(np.log(efac * SS + equad ** 2))
logLike = -0.5 * (logdet_N + logdet_Phi + logdet_Sigma) - 0.5 * (dtNdt - np.dot(d, expval2))
return logLike
开发者ID:jellis18,项目名称:PAL,代码行数:60,代码来源:HTFSingleEvolving.py
示例12: __init__
def __init__(self,
X,
sigma,
offset=None,
quadratic=None,
initial=None):
"""
Parameters
----------
X : np.ndarray
Design matrix.
sigma : float
Known standard deviation.
"""
rr.smooth_atom.__init__(self,
(X.shape[1],),
offset=offset,
quadratic=quadratic,
initial=initial)
self.X = X
self.sigma = sigma
self._cholX = cho_factor(X.T.dot(X))
开发者ID:selective-inference,项目名称:merged,代码行数:27,代码来源:estimation.py
示例13: evaluate
def evaluate(self):
'''
Return the lnprob using the current version of the C_GP matrix, data matrix,
and other intermediate products.
'''
self.lnprob_last = self.lnprob
CC = self.data_mat
model = self.chebyshevSpectrum.k * self.flux
try:
factor, flag = cho_factor(CC)
R = self.fl - model
logdet = np.sum(2 * np.log((np.diag(factor))))
self.lnprob = -0.5 * (np.dot(R, cho_solve((factor, flag), R)) + logdet)
self.logger.debug("Evaluating lnprob={}".format(self.lnprob))
return self.lnprob
# To give us some debugging information about what went wrong.
except np.linalg.linalg.LinAlgError:
print("Spectrum:", self.spectrum_id, "Order:", self.order)
raise
开发者ID:EdGillen,项目名称:Starfish,代码行数:28,代码来源:parallel_linear.py
示例14: init
def init(self, p):
self.datadir = os.path.join(os.path.dirname(__file__),'spt_lps12_20120828')
#Load spectrum and covariance
fn = 'Spectrum_spt2500deg2_lps12%s.newdat'%('_nocal' if ('spt_s12','a_calib') in p else '')
with open(os.path.join(self.datadir,fn)) as f:
while 'TT' not in f.readline(): pass
self.spec=array([fromstring(f.readline(),sep=' ')[1] for _ in range(47)])
self.sigma=array([fromstring(f.readline(),sep=' ') for _ in range(94)])[47:]
#Load windows
self.windows = [loadtxt(os.path.join(self.datadir,'windows','window_lps12','window_%i'%i))[:,1] for i in range(1,48)]
if ('spt_s12','lmin') in p:
bmin = sum(1 for _ in takewhile(lambda x: x<p['spt_s12','lmin'], (sum(1 for _ in takewhile(lambda x: abs(x)<.001,w) ) for w in self.windows)))
self.spec = self.spec[bmin:]
self.sigma = self.sigma[bmin:,bmin:]
self.windows = self.windows[bmin:]
self.errorbars = sqrt(diag(self.sigma))
self.sigma = cho_factor(self.sigma)
self.windowrange = (lambda x: slice(min(x),max(x)+1))(loadtxt(os.path.join(self.datadir,'windows','window_lps12','window_1'))[:,0])
self.lmax = self.windowrange.stop
self.ells = array([dot(arange(10000)[self.windowrange],w) for w in self.windows])
self.freq = {'dust':154, 'radio': 151, 'tsz':153}
self.fluxcut = 50
self.calib_prior = p.get(('spt_s12','calib_prior'),True)
开发者ID:marius311,项目名称:cosmoslik-uspype,代码行数:31,代码来源:spt_s12.py
示例15: mark1loglikelihood
def mark1loglikelihood(psr, Aw, Ar, Si):
"""
Log-likelihood for our pulsar. This one does not marginalize
over the timing model, so it cannot be used if the data has been
'fit'. Use when creating data with 'dofit=False':
psr = Pulsar(dofit=False)
Calculate covariance matrix in the time-domain with:
ll = -0.5 * res^{T} C^{-1} res - 0.5 * log(det(C))
:param psr:
pulsar object, containing the data and stuff
:param Aw:
White noise amplitude, model parameter
:param Ar:
Red noise amplitude, model parameter
:param Si:
Spectral index of red noise, model parameter
"""
# The function that builds the non-diagonal covariance matrix is Cred_sec
# Cov = Aw**2 * np.eye(len(psr.toas)) + \
# Ar**2 * Cred_sec(psr.toas, alpha=0.5*(3-Si))
Cov = Aw ** 2 * np.eye(len(psr.toas)) + PL_covmat(psr.toas, Ar, alpha=0.5 * (3 - Si), fL=1.0 / (year * 20))
cfC = sl.cho_factor(Cov)
ldetC = 2 * np.sum(np.log(np.diag(cfC[0])))
rCr = np.dot(psr.residuals, sl.cho_solve(cfC, psr.residuals))
return -0.5 * rCr - 0.5 * ldetC - 0.5 * len(psr.residuals) * np.log(2 * np.pi)
开发者ID:nanograv,项目名称:cit-busyweek,代码行数:34,代码来源:day4funcs.py
示例16: safe_GP_inv
def safe_GP_inv(K,w):
"""
Arguments
---------
K, a NxN pd matrix
w, a N-vector
Returns
-------
(K^-1 + diag(w))^-1
and
(1/2)*(ln|K^-1 + diag(w)| + ln|K|)
and
chol(K + diag(1./w))
"""
N = w.size
assert K.shape==(N,N)
w_sqrt = np.sqrt(w)
W_inv = np.diag(1./w)
W_sqrt_inv = np.diag(1./w_sqrt)
B = np.eye(N) + np.dot(w_sqrt[:,None],w_sqrt[None,:])*K
cho = linalg.cho_factor(B)
T = linalg.cho_solve(cho,W_sqrt_inv)
ret = W_inv - np.dot(W_sqrt_inv,T)
return ret, np.sum(np.log(np.diag(cho[0]))), (np.dot(cho[0],W_sqrt_inv), cho[1])
开发者ID:Sura82,项目名称:colvb,代码行数:26,代码来源:utilities.py
示例17: update_kern_grads
def update_kern_grads(self):
"""
Set the derivative of the lower bound wrt the (kernel) parameters
"""
for i, kern in enumerate(self.kern):
K = kern.K(self.X)
B_inv = np.diag(1. / (self.phi[:, i] / self.variance))
alpha = linalg.cho_solve(linalg.cho_factor(K + B_inv), self.Y)
K_B_inv = pdinv(K + B_inv)[0]
dL_dK = np.outer(alpha, alpha) - K_B_inv
kern.update_gradients_full(dL_dK=dL_dK, X=self.X)
# variance gradient
grad_Lm_variance = 0.0
for i, kern in enumerate(self.kern):
K = kern.K(self.X)
I = np.eye(self.N)
B_inv = np.diag(1. / ((self.phi[:, i] + 1e-6) / self.variance))
alpha = np.linalg.solve(K + B_inv, self.Y)
K_B_inv = pdinv(K + B_inv)[0]
dL_dB = np.outer(alpha, alpha) - K_B_inv
grad_B_inv = np.diag(1. / (self.phi[:, i] + 1e-6))
grad_Lm_variance += 0.5 * np.trace(np.dot(dL_dB, grad_B_inv))
self.variance.gradient = grad_Lm_variance
开发者ID:tomasgomes,项目名称:GPclust,代码行数:31,代码来源:OMGP.py
示例18: backward
def backward(self, x, tau):
# (1 + tau A^T A)^-1(x + tau A^T b)
# which amounts to
# min_y ||A y - b||^2_F + tau * || y - x ||
# TODO solve the dual when we have fat matrix
if hasattr(self.A, 'A') and type(self.A.A) is _np.ndarray:
# self.A is a dense matrix
# we can pre-factorize the system using cholesky decomposition
# and then quickly re-solve the system
if self._solve_backward is None or self._solve_backward_tau != tau:
from scipy.linalg import cho_factor, cho_solve
A = self.A.A
H = tau * A.T.dot(A) + _np.eye(A.shape[1])
self._solve_backward = partial(cho_solve, cho_factor(H))
self._solve_backward_tau = tau
return self._solve_backward(x + tau * self.A.rmatvec(self.b))
else:
from scipy.sparse.linalg import lsqr, LinearOperator
def matvec(y):
return y + tau * self.A.rmatvec(self.A.matvec(y))
x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var = \
lsqr(LinearOperator((self.A.shape[1], self.A.shape[1]), matvec, matvec),
x + tau * self.A.rmatvec(self.b))
return x
开发者ID:swenger,项目名称:convexopt,代码行数:30,代码来源:dataterm.py
示例19: lnprob
def lnprob(cube, ndim, nparams):
# Log-likelihood ratio for eccentric single-source detection.
x = np.array([cube[ii] for ii in range(nparams)])
if len(x)==9:
logmass, logdist, logorbfreq, gwphi, costheta, cosinc, gwpol, gwgamma, l0 = x
else:
logmass, logdist, logorbfreq, gwphi, costheta, cosinc, gwpol, gwgamma, l0, e0 = x
mass = 10.0**logmass
dist = 10.0**logdist
orbfreq = 10.0**logorbfreq
gwtheta = np.arccos(costheta)
gwinc = np.arccos(cosinc)
if len(x)==9:
nharm = 3
else:
nharm = int(keplersum[keplersum[:,0]>e0][0][1])+1 #np.arange(1,int(keplersum[keplersum[:,0]>e0][0][1]),1)
gwres = []
sig_sub = []
if len(x)==9:
for ii,p in enumerate(psr):
gwres.append( createResiduals_ecc(p, gwtheta, gwphi, mass, dist, orbfreq, gwinc, gwpol, gwgamma, 0.001, l0, Nharm=nharm, psrTerm=False) )
sig_sub.append( p.res - gwres[ii] )
else:
for ii,p in enumerate(psr):
gwres.append( createResiduals_ecc(p, gwtheta, gwphi, mass, dist, orbfreq, gwinc, gwpol, gwgamma, e0, l0, Nharm=nharm, psrTerm=False) )
sig_sub.append( p.res - gwres[ii] )
d = []
dtNdt = []
logLike = 0.0
for ii,p in enumerate(psr):
errs = p.toaerrs
d.append( np.dot(p.Te.T, sig_sub[ii]/( errs**2.0 )) )
# triple product in likelihood function
dtNdt.append( np.sum(sig_sub[ii]**2.0/( errs**2.0 )) )
loglike1 = -0.5 * (logdet_N[ii] + dtNdt[ii])
# cholesky decomp for second term in exponential
try:
cf = sl.cho_factor(Sigma[ii])
expval2 = sl.cho_solve(cf, d[ii])
logdet_Sigma = np.sum(2*np.log(np.diag(cf[0])))
except np.linalg.LinAlgError:
print 'Cholesky Decomposition Failed second time!! Using SVD instead'
u,s,v = sl.svd(Sigma[ii])
expval2 = np.dot(v.T, 1/s*np.dot(u.T, d[ii]))
logdet_Sigma = np.sum(np.log(s))
logLike += -0.5 * (logdet_Phi[ii] + logdet_Sigma) + 0.5 * (np.dot(d[ii], expval2)) + loglike1
return logLike
开发者ID:jellis18,项目名称:NX01,代码行数:60,代码来源:eccentric_BayesMnest_NanoSets.py
示例20: negll
def negll(self, pv=None, flux=None, inputs=None, splits=None):
flux = flux if flux is not None else self.data.masked_normalised_flux
inputs = inputs if inputs is not None else self.data.masked_inputs
K = self.compute_cmat(pv, inputs, inputs, splits=splits, add_wn=True)
L = sla.cho_factor(K)
b = sla.cho_solve(L, flux)
return log(diag(L[0])).sum() + 0.5 * dot(flux,b)
开发者ID:Cadair,项目名称:k2sc,代码行数:7,代码来源:segdetrender.py
注:本文中的scipy.linalg.cho_factor函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论