本文整理汇总了Python中sklearn.utils.extmath.pinvh函数的典型用法代码示例。如果您正苦于以下问题:Python pinvh函数的具体用法?Python pinvh怎么用?Python pinvh使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pinvh函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: fit
def fit(self, balance_param=0.5, sparsity_param=0.01, verbose=False):
'''
balance_param: trades off between sparsity and M0 prior
sparsity_param: trades off between optimizer and sparseness (see graph_lasso)
'''
P = pinvh(self.M) + balance_param * self.loss_matrix
emp_cov = pinvh(P)
# hack: ensure positive semidefinite
emp_cov = emp_cov.T.dot(emp_cov)
self.M, _ = graph_lasso(emp_cov, sparsity_param, verbose=verbose)
开发者ID:ChihChengLiang,项目名称:metric_learn,代码行数:10,代码来源:sdml.py
示例2: fit
def fit(self, X, W, verbose=False):
"""
X: data matrix, (n x d)
W: connectivity graph, (n x n). +1 for positive pairs, -1 for negative.
"""
self._prepare_inputs(X, W)
P = pinvh(self.M) + self.balance_param * self.loss_matrix
emp_cov = pinvh(P)
# hack: ensure positive semidefinite
emp_cov = emp_cov.T.dot(emp_cov)
self.M, _ = graph_lasso(emp_cov, self.sparsity_param, verbose=verbose)
return self
开发者ID:martin-prillard,项目名称:metric_learn,代码行数:12,代码来源:sdml.py
示例3: fit
def fit(self, X, W):
"""
X: data matrix, (n x d)
each row corresponds to a single instance
W: connectivity graph, (n x n). +1 for positive pairs, -1 for negative.
"""
self._prepare_inputs(X, W)
P = pinvh(self.M) + self.params['balance_param'] * self.loss_matrix
emp_cov = pinvh(P)
# hack: ensure positive semidefinite
emp_cov = emp_cov.T.dot(emp_cov)
self.M, _ = graph_lasso(emp_cov, self.params['sparsity_param'],
verbose=self.params['verbose'])
return self
开发者ID:Shivamagrawal2014,项目名称:metric-learn,代码行数:14,代码来源:sdml.py
示例4: correct_covariance
def correct_covariance(self, data, method=None):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [Rouseeuw1984]_.
Parameters
----------
data: array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
covariance_corrected: array-like, shape (n_features, n_features)
Corrected robust covariance estimate.
"""
if method is "empirical":
X_c = data - self.raw_location_
dist = np.sum(
np.dot(X_c, pinvh(self.raw_covariance_)) * X_c, 1)
correction = np.median(dist) / sp.stats.chi2(
data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
elif method is "theoretical":
n, p = data.shape
c = sp.stats.chi2(p + 2).cdf(sp.stats.chi2(p).ppf(self.h)) / self.h
covariance_corrected = self.raw_covariance_ * c
else:
covariance_corrected = self.raw_covariance_
self._set_covariance(covariance_corrected)
return covariance_corrected
开发者ID:VirgileFritsch,项目名称:outliers,代码行数:35,代码来源:mcd.py
示例5: _posterior_dist
def _posterior_dist(self,A,beta,XX,XY,full_covar=False):
'''
Calculates mean and covariance matrix of posterior distribution
of coefficients.
'''
# compute precision matrix for active features
Sinv = beta * XX
np.fill_diagonal(Sinv, np.diag(Sinv) + A)
cholesky = True
# try cholesky, if it fails go back to pinvh
try:
# find posterior mean : R*R.T*mean = beta*X.T*Y
# solve(R*z = beta*X.T*Y) => find z => solve(R.T*mean = z) => find mean
R = np.linalg.cholesky(Sinv)
Z = solve_triangular(R,beta*XY, check_finite=False, lower = True)
Mn = solve_triangular(R.T,Z, check_finite=False, lower = False)
# invert lower triangular matrix from cholesky decomposition
Ri = solve_triangular(R,np.eye(A.shape[0]), check_finite=False, lower=True)
if full_covar:
Sn = np.dot(Ri.T,Ri)
return Mn,Sn,cholesky
else:
return Mn,Ri,cholesky
except LinAlgError:
cholesky = False
Sn = pinvh(Sinv)
Mn = beta*np.dot(Sinv,XY)
return Mn, Sn, cholesky
开发者ID:AmazaspShumik,项目名称:sklearn-bayes,代码行数:29,代码来源:fast_rvm.py
示例6: mutual_incoherence
def mutual_incoherence(X_relevant, X_irelevant):
"""Mutual incoherence, as defined by formula (26a) of [Wainwright2006].
"""
projector = np.dot(
np.dot(X_irelevant.T, X_relevant),
pinvh(np.dot(X_relevant.T, X_relevant))
)
return np.max(np.abs(projector).sum(axis=1))
开发者ID:Honglang,项目名称:scikit-learn,代码行数:8,代码来源:plot_sparse_recovery.py
示例7: _log_multivariate_normal_density_tied
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model"""
n_samples, n_dim = X.shape
icv = pinvh(covars)
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.log(linalg.det(covars) + 0.1)
+ np.sum(X * np.dot(X, icv), 1)[:, np.newaxis]
- 2 * np.dot(np.dot(X, icv), means.T)
+ np.sum(means * np.dot(means, icv), 1))
return lpr
开发者ID:AdityaTewari,项目名称:hmmlearn,代码行数:9,代码来源:fixes.py
示例8: test_pinvh_nonpositive
def test_pinvh_nonpositive():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)
a = np.dot(a, a.T)
u, s, vt = np.linalg.svd(a)
s[0] *= -1
a = np.dot(u * s, vt) # a is now symmetric non-positive and singular
a_pinv = pinv2(a)
a_pinvh = pinvh(a)
assert_almost_equal(a_pinv, a_pinvh)
开发者ID:0664j35t3r,项目名称:scikit-learn,代码行数:9,代码来源:test_utils.py
示例9: objective_function
def objective_function(self, data, location, covariance):
"""Objective function minimized at each step of the MCD algorithm.
"""
precision = pinvh(covariance)
det = fast_logdet(precision)
trace = np.trace(
np.dot(empirical_covariance(data - location, assume_centered=True),
precision))
pen = self.shrinkage * np.trace(precision)
return -det + trace + pen
开发者ID:VirgileFritsch,项目名称:outliers,代码行数:10,代码来源:rmcd.py
示例10: _prepare_inputs
def _prepare_inputs(self, X, W):
self.X_ = X = check_array(X)
W = check_array(W, accept_sparse=True)
# set up prior M
if self.use_cov:
self.M_ = pinvh(np.cov(X, rowvar = False))
else:
self.M_ = np.identity(X.shape[1])
L = laplacian(W, normed=False)
return X.T.dot(L.dot(X))
开发者ID:svecon,项目名称:metric-learn,代码行数:10,代码来源:sdml.py
示例11: _condition
def _condition(self, i1, i2, X):
cov_12 = self.covariance[np.ix_(i1, i2)]
cov_11 = self.covariance[np.ix_(i1, i1)]
cov_22 = self.covariance[np.ix_(i2, i2)]
prec_22 = pinvh(cov_22)
regression_coeffs = cov_12.dot(prec_22)
if X.ndim == 2:
mean = self.mean[i1] + regression_coeffs.dot(
(X - self.mean[i2]).T).T
elif X.ndim == 1:
mean = self.mean[i1] + regression_coeffs.dot(X - self.mean[i2])
else:
raise ValueError("%d dimensions are not allowed for X!" % X.ndim)
covariance = cov_11 - regression_coeffs.dot(cov_12.T)
return mean, covariance
开发者ID:HRZaheri,项目名称:gmr,代码行数:16,代码来源:mvn.py
示例12: fit
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X: array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y: not used, present for API consistence purpose.
Returns
-------
self: object
Returns self.
"""
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support = fast_mcd(
X, objective_function=self.objective_function,
h=self.h, cov_computation_method=self._nonrobust_covariance)
if self.h is None:
self.h = int(np.ceil(0.5 * (n_samples + n_features + 1))) \
/ float(n_samples)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(
X[raw_support], assume_centered=True)
# get precision matrix in an optimized way
precision = pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
return self
开发者ID:VirgileFritsch,项目名称:outliers,代码行数:45,代码来源:mcd.py
示例13: launch_rmcdl1_on_dataset
def launch_rmcdl1_on_dataset(n_samples, n_features, n_outliers):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
# compute RMCD by fitting an object
rmcd_fit = RMCDl1().fit(data)
T = rmcd_fit.location_
S = rmcd_fit.covariance_
# compare with the true location and precision
error_location = np.mean(T ** 2)
assert(error_location < 1.)
error_cov = np.mean((np.eye(n_features) - pinvh(S)) ** 2)
assert(error_cov < 1.)
开发者ID:VirgileFritsch,项目名称:outliers,代码行数:21,代码来源:test_rmcd.py
示例14: _posterior_dist
def _posterior_dist(self,X,y,A,intercept_prior):
'''
Uses Laplace approximation for calculating posterior distribution
'''
if self.solver == 'lbfgs_b':
f = lambda w: _logistic_cost_grad(X,y,w,A,intercept_prior)
w_init = np.random.random(X.shape[1])
Mn = fmin_l_bfgs_b(f, x0 = w_init, pgtol = self.tol_solver,
maxiter = self.n_iter_solver)[0]
Xm = np.dot(X,Mn)
s = expit(Xm)
B = logistic._pdf(Xm) # avoids underflow
S = np.dot(X.T*B,X)
np.fill_diagonal(S, np.diag(S) + A)
t_hat = Xm + (y - s) / B
Sn = pinvh(S)
elif self.solver == 'newton_cg':
# TODO: Implement Newton-CG
raise NotImplementedError(('Newton Conjugate Gradient optimizer '
'is not currently supported'))
return [Mn,Sn,B,t_hat]
开发者ID:OncoImmunity,项目名称:sklearn-bayes,代码行数:21,代码来源:fast_rvm.py
示例15: fit
def fit(self, X, W):
"""Learn the SDML model.
Parameters
----------
X : array-like, shape (n, d)
data matrix, where each row corresponds to a single instance
W : array-like, shape (n, n)
connectivity graph, with +1 for positive pairs and -1 for negative
Returns
-------
self : object
Returns the instance.
"""
loss_matrix = self._prepare_inputs(X, W)
P = self.M_ + self.balance_param * loss_matrix
emp_cov = pinvh(P)
# hack: ensure positive semidefinite
emp_cov = emp_cov.T.dot(emp_cov)
_, self.M_ = graph_lasso(emp_cov, self.sparsity_param, verbose=self.verbose)
return self
开发者ID:svecon,项目名称:metric-learn,代码行数:22,代码来源:sdml.py
示例16: to_probability_density
def to_probability_density(self, X):
"""Compute probability density.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data.
Returns
-------
p : array, shape (n_samples,)
Probability densities of data.
"""
X = np.atleast_2d(X)
n_samples, n_features = X.shape
precision = pinvh(self.covariance)
d = X - self.mean
normalization = 1 / np.sqrt((2 * np.pi) ** n_features *
np.linalg.det(self.covariance))
p = np.ndarray(n_samples)
for n in range(n_samples):
p[n] = normalization * np.exp(-0.5 * d[n].dot(precision).dot(d[n]))
return p
开发者ID:HRZaheri,项目名称:gmr,代码行数:23,代码来源:mvn.py
示例17: conditional_distribution
def conditional_distribution(self, x, indices=np.array([0])):
""" Conditional gaussian distribution
See
https://en.wikipedia.org/wiki/Multivariate_normal_distribution#Conditional_distributions
Return
------
conditional : GMM
Conditional GMM distribution p(Y | X=x)
"""
n_features = self.means_.shape[1] - len(indices)
expected_means = np.empty((self.n_components, n_features))
expected_covars = np.empty((self.n_components, n_features, n_features))
expected_weights = np.empty(self.n_components)
# Highly inspired from https://github.com/AlexanderFabisch/gmr
# Compute expexted_means, expexted_covars, given input X
for i, (mean, covar, weight) in enumerate(zip(self.means_, self.covars_, self.weights_)):
i1, i2 = invert_indices(mean.shape[0], indices), indices
cov_12 = covar[np.ix_(i1, i2)]
cov_11 = covar[np.ix_(i1, i1)]
cov_22 = covar[np.ix_(i2, i2)]
prec_22 = pinvh(cov_22)
regression_coeffs = cov_12.dot(prec_22)
if x.ndim == 1:
x = x[:, np.newaxis]
expected_means[i] = mean[i1] + regression_coeffs.dot((x - mean[i2]).T).T
expected_covars[i] = cov_11 - regression_coeffs.dot(cov_12.T)
expected_weights[i] = weight * \
multivariate_normal.pdf(x, mean=mean[indices], cov=covar[np.ix_(indices, indices)])
expected_weights /= expected_weights.sum()
return expected_means, expected_covars, expected_weights
开发者ID:show0k,项目名称:gmm-lbd,代码行数:37,代码来源:gmm.py
示例18: test_pinvh_simple_complex
def test_pinvh_simple_complex():
a = (np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
+ 1j * np.array([[10, 8, 7], [6, 5, 4], [3, 2, 1]]))
a = np.dot(a, a.conj().T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
开发者ID:0664j35t3r,项目名称:scikit-learn,代码行数:6,代码来源:test_utils.py
示例19: test_pinvh_simple_real
def test_pinvh_simple_real():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=np.float64)
a = np.dot(a, a.T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
开发者ID:0664j35t3r,项目名称:scikit-learn,代码行数:5,代码来源:test_utils.py
示例20: set_optimal_shrinkage_amount
def set_optimal_shrinkage_amount(self, X, method="cv", verbose=False):
"""Set optimal shrinkage amount according to chosen method.
/!\ Could be rewritten with GridSearchCV.
Parameters
----------
X: array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
method: float or str in {"cv", "lw", "oas"},
The method used to set the shrinkage. If a floating value is provided
that value is used. Otherwise, the selection is made according to
the selected method.
"cv" (default): 10-fold cross-validation.
(or Leave-One Out cross-validation if n_samples < 10)
"lw": Ledoit-Wolf criterion
"oas": OAS criterion
verbose: bool,
Verbose mode or not.
Returns
-------
optimal_shrinkage: float,
The optimal amount of shrinkage.
"""
n_samples, n_features = X.shape
if isinstance(method, str):
std_shrinkage = np.trace(empirical_covariance(X)) / \
(n_features * n_samples)
self.std_shrinkage = std_shrinkage
if method == "cv":
from sklearn.covariance import log_likelihood
n_samples, n_features = X.shape
shrinkage_range = np.concatenate((
[0.], 10. ** np.arange(-n_samples / n_features, -1, 0.5),
np.arange(0.05, 1., 0.05),
np.arange(1., 20., 1.), np.arange(20., 100, 5.),
10. ** np.arange(2, 7, 0.5)))
# get a "pure" active set with a standard shrinkage
active_set_estimator = RMCDl2(shrinkage=std_shrinkage)
active_set_estimator.fit(X)
active_set = np.where(active_set_estimator.support_)[0]
# split this active set in ten parts
active_set = active_set[np.random.permutation(active_set.size)]
if active_set.size >= 10:
# ten fold cross-validation
n_folds = 10
fold_size = active_set.size / 10
else:
n_folds = active_set.size
fold_size = 1
log_likelihoods = np.zeros((shrinkage_range.size, n_folds))
if verbose:
print "*** Cross-validation"
for trial in range(n_folds):
if verbose:
print trial / float(n_folds)
# define train and test sets
train_set_indices = np.concatenate(
(np.arange(0, fold_size * trial),
np.arange(fold_size * (trial + 1), n_folds * fold_size)))
train_set = X[active_set[train_set_indices]]
test_set = X[active_set[np.arange(
fold_size * trial, fold_size * (trial + 1))]]
# learn location and covariance estimates from train set
# for several amounts of shrinkage
for i, shrinkage in enumerate(shrinkage_range):
location = test_set.mean(0)
cov = empirical_covariance(train_set)
cov.flat[::(n_features + 1)] += shrinkage * std_shrinkage
# compute test data likelihood
log_likelihoods[i, trial] = log_likelihood(
empirical_covariance(test_set - location,
assume_centered=True), pinvh(cov))
optimal_shrinkage = shrinkage_range[
np.argmax(log_likelihoods.mean(1))]
self.shrinkage = optimal_shrinkage * std_shrinkage
self.shrinkage_cst = optimal_shrinkage
if verbose:
print "optimal shrinkage: %g (%g x lambda(= %g))" \
% (self.shrinkage, optimal_shrinkage, std_shrinkage)
self.log_likelihoods = log_likelihoods
self.shrinkage_range = shrinkage_range
return shrinkage_range, log_likelihoods
elif method == "oas":
from sklearn.covariance import OAS
rmcd = self.__init__(shrinkage=std_shrinkage)
support = rmcd.fit(X).support_
oas = OAS().fit(X[support])
if oas.shrinkage_ == 1:
self.shrinkage_cst = np.inf
else:
self.shrinkage_cst = oas.shrinkage_ / (1. - oas.shrinkage_)
self.shrinkage = self.shrinkage_cst * std_shrinkage * n_features
elif method == "lw":
from sklearn.covariance import LedoitWolf
rmcd = RMCDl2(self, h=self.h, shrinkage=std_shrinkage)
#.........这里部分代码省略.........
开发者ID:VirgileFritsch,项目名称:outliers,代码行数:101,代码来源:rmcd.py
注:本文中的sklearn.utils.extmath.pinvh函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论