本文整理汇总了Python中scipy.linalg.pinvh函数的典型用法代码示例。如果您正苦于以下问题:Python pinvh函数的具体用法?Python pinvh怎么用?Python pinvh使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pinvh函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: _update_precisions
def _update_precisions(self, X, z):
"""Update the variational distributions for the precisions"""
n_features = X.shape[1]
if self.covariance_type == 'spherical':
self.dof_ = 0.5 * n_features * np.sum(z, axis=0)
for k in range(self.n_components):
# could be more memory efficient ?
sq_diff = np.sum((X - self.means_[k]) ** 2, axis=1)
self.scale_[k] = 1.
self.scale_[k] += 0.5 * np.sum(z.T[k] * (sq_diff + n_features))
self.bound_prec_[k] = (
0.5 * n_features * (
digamma(self.dof_[k]) - np.log(self.scale_[k])))
self.precs_ = np.tile(self.dof_ / self.scale_, [n_features, 1]).T
elif self.covariance_type == 'diag':
for k in range(self.n_components):
self.dof_[k].fill(1. + 0.5 * np.sum(z.T[k], axis=0))
sq_diff = (X - self.means_[k]) ** 2 # see comment above
self.scale_[k] = np.ones(n_features) + 0.5 * np.dot(
z.T[k], (sq_diff + 1))
self.precs_[k] = self.dof_[k] / self.scale_[k]
self.bound_prec_[k] = 0.5 * np.sum(digamma(self.dof_[k])
- np.log(self.scale_[k]))
self.bound_prec_[k] -= 0.5 * np.sum(self.precs_[k])
elif self.covariance_type == 'tied':
self.dof_ = 2 + X.shape[0] + n_features
self.scale_ = (X.shape[0] + 1) * np.identity(n_features)
for k in range(self.n_components):
diff = X - self.means_[k]
self.scale_ += np.dot(diff.T, z[:, k:k + 1] * diff)
self.scale_ = pinvh(self.scale_)
self.precs_ = self.dof_ * self.scale_
self.det_scale_ = linalg.det(self.scale_)
self.bound_prec_ = 0.5 * wishart_log_det(
self.dof_, self.scale_, self.det_scale_, n_features)
self.bound_prec_ -= 0.5 * self.dof_ * np.trace(self.scale_)
elif self.covariance_type == 'full':
for k in range(self.n_components):
sum_resp = np.sum(z.T[k])
self.dof_[k] = 2 + sum_resp + n_features
self.scale_[k] = (sum_resp + 1) * np.identity(n_features)
diff = X - self.means_[k]
self.scale_[k] += np.dot(diff.T, z[:, k:k + 1] * diff)
self.scale_[k] = pinvh(self.scale_[k])
self.precs_[k] = self.dof_[k] * self.scale_[k]
self.det_scale_[k] = linalg.det(self.scale_[k])
self.bound_prec_[k] = 0.5 * wishart_log_det(
self.dof_[k], self.scale_[k], self.det_scale_[k],
n_features)
self.bound_prec_[k] -= 0.5 * self.dof_[k] * np.trace(
self.scale_[k])
开发者ID:MechCoder,项目名称:scikit-learn,代码行数:54,代码来源:dpgmm.py
示例2: _init_params
def _init_params(self,X):
'''
Initialise parameters
'''
d = X.shape[1]
# initialise prior on means & precision matrices
if 'means' in self.init_params:
means0 = self.init_params['means']
else:
kms = KMeans(n_init = self.n_init, n_clusters = self.n_components)
means0 = kms.fit(X).cluster_centers_
if 'covar' in self.init_params:
scale_inv0 = self.init_params['covar']
scale0 = pinvh(scale_inv0)
else:
# heuristics to define broad prior over precision matrix
diag_els = np.abs(np.max(X,0) - np.min(X,0))/2
scale_inv0 = np.diag( diag_els )
scale0 = np.diag( 1./ diag_els )
if 'weights' in self.init_params:
weights0 = np.ones(self.n_components) / self.n_components
else:
weights0 = np.ones(self.n_components) / self.n_components
if 'dof' in self.init_params:
dof0 = self.init_params['dof']
else:
dof0 = d
if 'beta' in self.init_params:
beta0 = self.init_params['beta']
else:
beta0 = 1e-3
# clusters that are not pruned
self.active = np.ones(self.n_components, dtype = np.bool)
# checks initialisation errors in case parameters are user defined
assert dof0 >= d,( 'Degrees of freedom should be larger than '
'dimensionality of data')
assert means0.shape[0] == self.n_components,('Number of centrods defined should '
'be equal to number of components')
assert means0.shape[1] == d,('Dimensioanlity of means and data '
'should be the same')
assert weights0.shape[0] == self.n_components,('Number of weights should be '
'to number of components')
# At first iteration these parameters are equal to priors, but they change
# at each iteration of mean field approximation
scale = np.array([np.copy(scale0) for _ in range(self.n_components)])
means = np.copy(means0)
weights = np.copy(weights0)
dof = dof0*np.ones(self.n_components)
beta = beta0*np.ones(self.n_components)
init_ = [means0, scale0, scale_inv0, beta0, dof0, weights0]
iter_ = [means, scale, scale_inv0, beta, dof, weights]
return init_, iter_
开发者ID:AmazaspShumik,项目名称:sklearn-bayes,代码行数:60,代码来源:mixture.py
示例3: fit
def fit(self, X=None, y=None):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : array_like, shape (n_samples, 3)
An array with shape (n_eval, 3) with the observations of the output to be predicted.
of shape (n_samples, 3) with the Best Linear Unbiased Prediction at x.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
if X:
K_list = self.calc_scalar_kernel_matrices(X)
else:
K_list = self.calc_scalar_kernel_matrices()
# add diagonal noise to each scalar kernel matrix
K_list = [K + self.nugget * sp.ones(K.shape[0]) for K in K_list]
Kglob = None
# outer_iv = [sp.outer(iv, iv.T) for iv in self.ivs] # NO, wrong
for K, ivs, iv_corr in zip(K_list, self.ivs, self.iv_corr):
# make the outer product tensor of shape (N_ls, N_ls, 3, 3) and multiply it with the scalar kernel
K3D = iv_corr * K[:, :, None, None] * rotmat_multi(ivs, ivs)
# reshape tensor onto a 2D array tiled with 3x3 matrix blocks
if Kglob is None:
Kglob = K3D
else:
Kglob += K3D
Kglob = my_tensor_reshape(Kglob)
# # all channels merged into one covariance matrix
# # K^{glob}_{ij} = \sum_{k = 1}^{N_{IVs}} w_k D_{k, ij} |v_k^i\rangle \langle v_k^j |
try:
inv = LA.pinv2(Kglob)
except LA.LinAlgError as err:
print("pinv2 failed: %s. Switching to pinvh" % err)
try:
inv = LA.pinvh(Kglob)
except LA.LinAlgError as err:
print("pinvh failed: %s. Switching to pinv2" % err)
inv = None
# alpha is the vector of regression coefficients of GaussianProcess
alpha = sp.dot(inv, self.y.ravel())
if not self.low_memory:
self.inverse = inv
self.Kglob = Kglob
self.alpha = sp.array(alpha)
开发者ID:marcocaccin,项目名称:MarcoGP,代码行数:60,代码来源:forcegp_module.py
示例4: __init__
def __init__(self, xs, ys, noise=0.001, l=1, K=K_SE):
self.xs = xs
self.l = l
self.K = K
Kxx = self.K(xs, l=self.l)
self.KxxI = pinvh(Kxx + (noise**2) * eye_like(Kxx))
self.KxxI_ys = self.KxxI.dot(ys)
开发者ID:davidar,项目名称:gpo,代码行数:7,代码来源:gp.py
示例5: fit
def fit(self, evidence_approx_method="fixed-point",max_iter = 100):
'''
Fits Bayesian linear regression, returns posterior mean and preision
of parameters
Parameters:
-----------
max_iter: int
Number of maximum iterations
evidence_approx_method: str (DEFAULT = 'fixed-point')
Method for approximating evidence, either 'fixed-point' or 'EM'
# Theory Note:
-----------------
This code implements two methods to fit type II ML Bayesian Linear Regression:
Expectation Maximization and Fixed Point Iterations. Expectation Maximization
is generally slower so by default we use fixed-point.
'''
# use type II maximum likelihood to find hyperparameters alpha and beta
self._evidence_approx(max_iter = max_iter, method = evidence_approx_method)
# find parameters of posterior distribution after last update of alpha & beta
self.w_mu, self.w_precision = self._posterior_params(self.alpha,self.beta)
self.D = pinvh(self.w_precision)
开发者ID:fliem,项目名称:Bayesian-Regression-Methods,代码行数:25,代码来源:bayesian_regression.py
示例6: test_simple_complex
def test_simple_complex(self):
a = array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float) + 1j * array(
[[10, 8, 7], [6, 5, 4], [3, 2, 1]], dtype=float
)
a = np.dot(a, a.conj().T)
a_pinv = pinvh(a)
assert_array_almost_equal(np.dot(a, a_pinv), np.eye(3))
开发者ID:metamorph-inc,项目名称:meta-core,代码行数:7,代码来源:test_basic.py
示例7: nll
def nll(l): # negative log likelihood
#if l < 0.001: return 1e10
Kxx = K(xs, l=l)
Kxx += (noise**2) * eye_like(Kxx)
res = (ys.T).dot(pinvh(Kxx)).dot(ys) + slogdet(Kxx)[1]
res = squeeze(res)
#print l,res
return res
开发者ID:davidar,项目名称:gpo,代码行数:8,代码来源:gp.py
示例8: test_nonpositive
def test_nonpositive(self):
a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=float)
a = np.dot(a, a.T)
u, s, vt = np.linalg.svd(a)
s[0] *= -1
a = np.dot(u * s, vt) # a is now symmetric non-positive and singular
a_pinv = pinv2(a)
a_pinvh = pinvh(a)
assert_array_almost_equal(a_pinv, a_pinvh)
开发者ID:7924102,项目名称:scipy,代码行数:9,代码来源:test_basic.py
示例9: laplacian_sc_pinv
def laplacian_sc_pinv(G,
observed_nodelist, unobserved_nodelist, weight='weight'):
"""
Pseudo-inverse of Laplacian Schur complement.
"""
sc = laplacian_schur_complement(G,
observed_nodelist, unobserved_nodelist, weight=weight)
return pinvh(sc)
开发者ID:argriffing,项目名称:fiedlerology,代码行数:9,代码来源:linalg.py
示例10: error_matrix
def error_matrix(self):
"""
Covariance Matrix.
"""
try:
mask = self.flat_hess_.mask
except AttributeError:
mask = None
return self._reshape_matrix(
-np.ma.array(pinvh(self.flat_hess_.data), mask=mask))
开发者ID:jpceia,项目名称:maxlike,代码行数:10,代码来源:maxlike_base.py
示例11: update_sigma
def update_sigma(X, alpha_, lambda_, keep_lambda, n_samples):
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1]) *
X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
return sigma_
开发者ID:allefpablo,项目名称:scikit-learn,代码行数:11,代码来源:bayes.py
示例12: nll_prime
def nll_prime(l):
Kxx,Kps = K(xs, l=l, deriv=True)
Kxx += (noise**2) * eye_like(Kxx)
KxxI = pinvh(Kxx)
a = KxxI.dot(ys)
aaT = outer(a,a) # a . a.T
KI_aaT = KxxI - aaT # K^-1 - aaT
res = []
for Kp in Kps:
grad = trace_prod(KI_aaT, Kp)
res.append(grad)
return asarray(res)
开发者ID:davidar,项目名称:gpo,代码行数:12,代码来源:gp.py
示例13: _init_params
def _init_params(self,*args):
'''
Initialise parameters of Bayesian Gaussian HMM
'''
d,X = args
pr_start, pr_trans = super(VBGaussianHMM,self)._init_params()
# initialise prior on means & precision matrices
if 'means' in self.init_params:
means0 = check_array(self.init_params['means'])
else:
kms = KMeans(n_init = 2, n_clusters = self.n_hidden)
means0 = kms.fit(X).cluster_centers_
if 'covar' in self.init_params:
scale_inv0 = self.init_params['covar']
scale0 = pinvh(scale_inv0)
else:
# heuristics to define broad prior over precision matrix
diag_els = np.abs(np.max(X,0) - np.min(X,0))
scale_inv0 = np.diag( diag_els )
scale0 = np.diag( 1./ diag_els )
if 'dof' in self.init_params:
dof0 = self.init_params['dof']
else:
dof0 = d
if 'beta' in self.init_params:
beta0 = self.init_params['beta']
else:
beta0 = 1e-3
# checks initialisation errors in case parameters are user defined
if dof0 < d:
raise ValueError(( 'Degrees of freedom should be larger than '
'dimensionality of data'))
if means0.shape[0] != self.n_hidden:
raise ValueError(('Number of centrods defined should '
'be equal to number of components' ))
if means0.shape[1] != d:
raise ValueError(('Dimensionality of means and data '
'should be the same'))
scale = np.array([np.copy(scale0) for _ in range(self.n_hidden)])
dof = dof0*np.ones(self.n_hidden)
beta = beta0*np.ones(self.n_hidden)
# if user did not define initialisation parameters use KMeans
return pr_start, pr_trans, {'means':means0,'scale':scale,'beta': beta,
'dof':dof,'scale_inv0':scale_inv0}
开发者ID:Ferrine,项目名称:sklearn-bayes,代码行数:51,代码来源:hmm.py
示例14: get_precision
def get_precision(self):
"""Getter for the precision matrix.
Returns
-------
precision_ : array-like,
The precision matrix associated to the current covariance object.
"""
if self.store_precision:
precision = self.precision_
else:
precision = linalg.pinvh(self.covariance_)
return precision
开发者ID:adykstra,项目名称:mne-python,代码行数:14,代码来源:fixes.py
示例15: _update_params
def _update_params(self, Nk, Xk, Sk, beta0, means0, dof0, scale_inv0,
beta, means, dof, scale):
''' Updates distribution of means and precisions '''
for k in range(self.n_active):
# update mean and precision for each cluster
beta[k] = beta0 + Nk[k]
means[k] = (beta0*means0[k,:] + Xk[k]) / beta[k]
dof[k] = dof0 + Nk[k] + 1
# precision calculation is ugly but prevent overflow & underflow
scale[k,:,:] = pinvh( scale_inv0 + (beta0*Sk[k] + Nk[k]*Sk[k] -
np.outer(Xk[k],Xk[k]) -
beta0*np.outer(means0[k,:] - Xk[k],means0[k,:])) /
(beta0 + Nk[k]) )
return beta,means,dof,scale
开发者ID:AmazaspShumik,项目名称:sklearn-bayes,代码行数:14,代码来源:mixture.py
示例16: fit
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y
not used, present for API consistence purpose.
Returns
-------
self : object
"""
X = check_array(X, ensure_min_samples=2, estimator='MinCovDet')
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = linalg.pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
开发者ID:kjacks21,项目名称:scikit-learn,代码行数:48,代码来源:robust_covariance.py
示例17: _vbm_emission_params
def _vbm_emission_params(self,emission_params_prior, emission_params, sf_stats):
'''
Performs vbm step for parameters of emission probabilities
'''
Nk,Xk,Sk = sf_stats
beta0, means0 = emission_params_prior['beta'], emission_params_prior['means']
emission_params['beta'] = beta0 + Nk
emission_params['means'] = ((beta0*means0.T + Xk.T ) / emission_params['beta']).T
emission_params['dof'] = emission_params_prior['dof'] + Nk + 1
scale_inv0 = emission_params_prior['scale_inv0']
for k in range(self.n_hidden):
emission_params['scale'][k] = pinvh( scale_inv0 + (beta0*Sk[k] + Nk[k]*Sk[k] -
np.outer(Xk[k],Xk[k]) -
beta0*np.outer(means0[k] - Xk[k],means0[k])) /
(beta0 + Nk[k]) )
return emission_params
开发者ID:Ferrine,项目名称:sklearn-bayes,代码行数:16,代码来源:hmm.py
示例18: inversion_checker
def inversion_checker(X,alpha,beta):
'''
Checks accuracy of inversion
'''
n,m = X.shape
u,d,vh = np.linalg.svd(X,full_matrices = False)
dsq = d**2
# precision matrix
S = beta*np.dot(X.T,X) + alpha*np.eye(m)
# inverting precision : PREVIOUS VERSION
a1 = np.dot( np.dot(vh.T, np.diag( 1. / (beta*dsq + alpha)) ), vh)
# inverting precision : CURRENT VERSION
a2 = pinvh(S)
return [a1,a2]
开发者ID:fliem,项目名称:Bayesian-Regression-Methods,代码行数:16,代码来源:precision_inversion_tester.py
示例19: test_bayesian_ridge_score_values
def test_bayesian_ridge_score_values():
"""Check value of score on toy example.
Compute log marginal likelihood with equation (36) in Sparse Bayesian
Learning and the Relevance Vector Machine (Tipping, 2001):
- 0.5 * (log |Id/alpha + X.X^T/lambda| +
y^T.(Id/alpha + X.X^T/lambda).y + n * log(2 * pi))
+ lambda_1 * log(lambda) - lambda_2 * lambda
+ alpha_1 * log(alpha) - alpha_2 * alpha
and check equality with the score computed during training.
"""
X, y = diabetes.data, diabetes.target
n_samples = X.shape[0]
# check with initial values of alpha and lambda (see code for the values)
eps = np.finfo(np.float64).eps
alpha_ = 1. / (np.var(y) + eps)
lambda_ = 1.
# value of the parameters of the Gamma hyperpriors
alpha_1 = 0.1
alpha_2 = 0.1
lambda_1 = 0.1
lambda_2 = 0.1
# compute score using formula of docstring
score = lambda_1 * log(lambda_) - lambda_2 * lambda_
score += alpha_1 * log(alpha_) - alpha_2 * alpha_
M = 1. / alpha_ * np.eye(n_samples) + 1. / lambda_ * np.dot(X, X.T)
M_inv = pinvh(M)
score += - 0.5 * (fast_logdet(M) + np.dot(y.T, np.dot(M_inv, y)) +
n_samples * log(2 * np.pi))
# compute score with BayesianRidge
clf = BayesianRidge(alpha_1=alpha_1, alpha_2=alpha_2,
lambda_1=lambda_1, lambda_2=lambda_2,
n_iter=1, fit_intercept=False, compute_score=True)
clf.fit(X, y)
assert_almost_equal(clf.scores_[0], score, decimal=9)
开发者ID:allefpablo,项目名称:scikit-learn,代码行数:42,代码来源:test_bayes.py
示例20: _set_covariance
def _set_covariance(self, covariance):
"""Saves the covariance and precision estimates
Storage is done accordingly to `self.store_precision`.
Precision stored only if invertible.
Parameters
----------
covariance : 2D ndarray, shape (n_features, n_features)
Estimated covariance matrix to be stored, and from which precision
is computed.
"""
# covariance = check_array(covariance)
# set covariance
self.covariance_ = covariance
# set precision
if self.store_precision:
self.precision_ = linalg.pinvh(covariance)
else:
self.precision_ = None
开发者ID:adykstra,项目名称:mne-python,代码行数:21,代码来源:fixes.py
注:本文中的scipy.linalg.pinvh函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论