本文整理汇总了Python中sklearn.covariance.EmpiricalCovariance类的典型用法代码示例。如果您正苦于以下问题:Python EmpiricalCovariance类的具体用法?Python EmpiricalCovariance怎么用?Python EmpiricalCovariance使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了EmpiricalCovariance类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_suffstat_sk_tied
def test_suffstat_sk_tied():
# use equation Nk * Sk / N = S_tied
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 500, 2, 2
resp = rng.rand(n_samples, n_components)
resp = resp / resp.sum(axis=1)[:, np.newaxis]
X = rng.rand(n_samples, n_features)
nk = resp.sum(axis=0)
xk = np.dot(resp.T, X) / nk[:, np.newaxis]
covars_pred_full = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0)
covars_pred_full = np.sum(nk[:, np.newaxis, np.newaxis] * covars_pred_full,
0) / n_samples
covars_pred_tied = _estimate_gaussian_covariances_tied(resp, X, nk, xk, 0)
ecov = EmpiricalCovariance()
ecov.covariance_ = covars_pred_full
assert_almost_equal(ecov.error_norm(covars_pred_tied, norm='frobenius'), 0)
assert_almost_equal(ecov.error_norm(covars_pred_tied, norm='spectral'), 0)
# check the precision computation
precs_chol_pred = _compute_precision_cholesky(covars_pred_tied, 'tied')
precs_pred = np.dot(precs_chol_pred, precs_chol_pred.T)
precs_est = linalg.inv(covars_pred_tied)
assert_array_almost_equal(precs_est, precs_pred)
开发者ID:jerry-dumblauskas,项目名称:scikit-learn,代码行数:27,代码来源:test_gaussian_mixture.py
示例2: test_suffstat_sk_full
def test_suffstat_sk_full():
# compare the EmpiricalCovariance.covariance fitted on X*sqrt(resp)
# with _sufficient_sk_full, n_components=1
rng = np.random.RandomState(0)
n_samples, n_features = 500, 2
# special case 1, assuming data is "centered"
X = rng.rand(n_samples, n_features)
resp = rng.rand(n_samples, 1)
X_resp = np.sqrt(resp) * X
nk = np.array([n_samples])
xk = np.zeros((1, n_features))
covars_pred = _estimate_gaussian_covariance_full(resp, X, nk, xk, 0)
ecov = EmpiricalCovariance(assume_centered=True)
ecov.fit(X_resp)
assert_almost_equal(ecov.error_norm(covars_pred[0], norm='frobenius'), 0)
assert_almost_equal(ecov.error_norm(covars_pred[0], norm='spectral'), 0)
# special case 2, assuming resp are all ones
resp = np.ones((n_samples, 1))
nk = np.array([n_samples])
xk = X.mean().reshape((1, -1))
covars_pred = _estimate_gaussian_covariance_full(resp, X, nk, xk, 0)
ecov = EmpiricalCovariance(assume_centered=False)
ecov.fit(X)
assert_almost_equal(ecov.error_norm(covars_pred[0], norm='frobenius'), 0)
assert_almost_equal(ecov.error_norm(covars_pred[0], norm='spectral'), 0)
开发者ID:123fengye741,项目名称:scikit-learn,代码行数:27,代码来源:test_gaussian_mixture.py
示例3: __init__
def __init__(self, store_precision=True, assume_centered=False, h=None,
correction=None):
EmpiricalCovariance.__init__(
self, store_precision=store_precision,
assume_centered=assume_centered)
self.h = h
self.correction = correction
开发者ID:VirgileFritsch,项目名称:outliers,代码行数:7,代码来源:mcd.py
示例4: test_suffstat_sk_diag
def test_suffstat_sk_diag():
# test against 'full' case
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 500, 2, 2
resp = rng.rand(n_samples, n_components)
resp = resp / resp.sum(axis=1)[:, np.newaxis]
X = rng.rand(n_samples, n_features)
nk = resp.sum(axis=0)
xk = np.dot(resp.T, X) / nk[:, np.newaxis]
precs_pred_full = _estimate_gaussian_precisions_cholesky_full(resp, X,
nk, xk, 0)
covars_pred_full = [linalg.inv(np.dot(precision_chol, precision_chol.T))
for precision_chol in precs_pred_full]
precs_pred_diag = _estimate_gaussian_precisions_cholesky_diag(resp, X,
nk, xk, 0)
covars_pred_diag = np.array([np.diag(1. / d) ** 2
for d in precs_pred_diag])
ecov = EmpiricalCovariance()
for (cov_full, cov_diag) in zip(covars_pred_full, covars_pred_diag):
ecov.covariance_ = np.diag(np.diag(cov_full))
assert_almost_equal(ecov.error_norm(cov_diag, norm='frobenius'), 0)
assert_almost_equal(ecov.error_norm(cov_diag, norm='spectral'), 0)
开发者ID:0664j35t3r,项目名称:scikit-learn,代码行数:25,代码来源:test_gaussian_mixture.py
示例5: CovEmbedding
class CovEmbedding(BaseEstimator, TransformerMixin):
""" Tranformer that returns the coefficients on a flat space to
perform the analysis.
"""
def __init__(self, base_estimator=None, kind='tangent'):
self.base_estimator = base_estimator
self.kind = kind
# if self.base_estimator == None:
# self.base_estimator_ = ...
# else:
# self.base_estimator_ = clone(base_estimator)
def fit(self, X, y=None):
if self.base_estimator is None:
self.base_estimator_ = EmpiricalCovariance(
assume_centered=True)
else:
self.base_estimator_ = clone(self.base_estimator)
if self.kind == 'tangent':
# self.mean_cov = mean_cov = spd_manifold.log_mean(covs)
# Euclidean mean as an approximation to the geodesic
covs = [self.base_estimator_.fit(x).covariance_ for x in X]
covs = my_stack(covs)
mean_cov = np.mean(covs, axis=0)
self.whitening_ = inv_sqrtm(mean_cov)
return self
def transform(self, X):
"""Apply transform to covariances
Parameters
----------
covs: list of array
list of covariance matrices, shape (n_rois, n_rois)
Returns
-------
list of array, transformed covariance matrices,
shape (n_rois * (n_rois+1)/2,)
"""
covs = [self.base_estimator_.fit(x).covariance_ for x in X]
covs = my_stack(covs)
p = covs.shape[-1]
if self.kind == 'tangent':
id_ = np.identity(p)
covs = [self.whitening_.dot(c.dot(self.whitening_)) - id_
for c in covs]
elif self.kind == 'partial correlation':
covs = [cov_to_corr(inv(g)) for g in covs]
elif self.kind == 'correlation':
covs = [cov_to_corr(g) for g in covs]
return np.array([sym_to_vec(c) for c in covs])
开发者ID:rphlypo,项目名称:parietalretreat,代码行数:54,代码来源:covariance.py
示例6: Mahalanobis
class Mahalanobis (BaseEstimator):
"""Mahalanobis distance estimator. Uses Covariance estimate
to compute mahalanobis distance of the observations
from the model.
Parameters
----------
robust : boolean to determine wheter to use robust estimator
based on Minimum Covariance Determinant computation
"""
def __init__(self, robust=False):
if not robust:
from sklearn.covariance import EmpiricalCovariance as CovarianceEstimator #
else:
from sklearn.covariance import MinCovDet as CovarianceEstimator #
self.model = CovarianceEstimator()
self.cov = None
def fit(self, X, y=None, **params):
"""Fits the covariance model according to the given training
data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self : object
Returns self.
"""
self.cov = self.model.fit(X)
return self
def score(self, X, y=None):
"""Computes the mahalanobis distances of given observations.
The provided observations are assumed to be centered. One may want to
center them using a location estimate first.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The observations, the Mahalanobis distances of the which we compute.
Returns
-------
mahalanobis_distance : array, shape = [n_observations,]
Mahalanobis distances of the observations.
"""
#return self.model.score(X,assume_centered=True)
return - self.model.mahalanobis(X-self.model.location_) ** 0.33
开发者ID:pborky,项目名称:pynfsa,代码行数:53,代码来源:models.py
示例7: __init__
def __init__(self, store_precision=True, assume_centered=False,
h=None, contamination=0.1, pvalue_correction="fwer",
no_fit=False):
"""
"""
EmpiricalCovariance.__init__(
self, store_precision=store_precision,
assume_centered=assume_centered)
CovarianceOutlierDetectionMixin.__init__(
self, contamination=contamination,
pvalue_correction=pvalue_correction)
self.no_fit = no_fit
开发者ID:VirgileFritsch,项目名称:outliers,代码行数:12,代码来源:elliptic_envelope.py
示例8: printSciKitCovarianceMatrixs
def printSciKitCovarianceMatrixs():
#does not work, ValueError: setting an array element with a sequence.
xMaker = RSTCovarianceMatrixMaker()
nums, data, ilabels = getLabeledRSTData(False)
for i,d in enumerate(data):
d['ratio'] = ilabels[i]
xMaker.setInstanceNums(nums)
xMaker.fit(data)
X = xMaker.transform(data)
correlator = EmpiricalCovariance()
correlator.fit(X)
print correlator.covariance_
开发者ID:ybur-yug,项目名称:emailinsight,代码行数:13,代码来源:reviewDatasetInspection.py
示例9: CovEmbedding
class CovEmbedding(BaseEstimator, TransformerMixin):
""" Tranformer that returns the coefficients on a flat space to
perform the analysis.
"""
def __init__(self, cov_estimator=None, kind='tangent'):
self.cov_estimator = cov_estimator
self.kind = kind
def fit(self, X, y=None):
if self.cov_estimator is None:
self.cov_estimator_ = EmpiricalCovariance(
assume_centered=True)
else:
self.cov_estimator_ = clone(self.cov_estimator)
if self.kind == 'tangent':
covs = [self.cov_estimator_.fit(x).covariance_ for x in X]
self.mean_cov_ = spd_mfd.frechet_mean(covs, max_iter=30, tol=1e-7)
self.whitening_ = spd_mfd.inv_sqrtm(self.mean_cov_)
return self
def transform(self, X):
"""Apply transform to covariances
Parameters
----------
covs: list of array
list of covariance matrices, shape (n_rois, n_rois)
Returns
-------
list of array, transformed covariance matrices,
shape (n_rois * (n_rois+1)/2,)
"""
covs = [self.cov_estimator_.fit(x).covariance_ for x in X]
covs = spd_mfd.my_stack(covs)
if self.kind == 'tangent':
covs = [spd_mfd.logm(self.whitening_.dot(c).dot(self.whitening_))
for c in covs]
elif self.kind == 'precision':
covs = [spd_mfd.inv(g) for g in covs]
elif self.kind == 'partial correlation':
covs = [prec_to_partial(spd_mfd.inv(g)) for g in covs]
elif self.kind == 'correlation':
covs = [cov_to_corr(g) for g in covs]
else:
raise ValueError("Unknown connectivity measure.")
return np.array([sym_to_vec(c) for c in covs])
开发者ID:rphlypo,项目名称:parietalretreat,代码行数:50,代码来源:connectivity.py
示例10: mahalanobis_plot
def mahalanobis_plot(ctry=None, df=None, weighted=True, inliers=False):
"""
See http://scikit-learn.org/0.13/modules/outlier_detection.html#\
fitting-an-elliptic-envelop
for details.
"""
if df is None and ctry is None:
raise ValueError('Either the country or a dataframe must be supplied')
elif df is None:
df = load_res(ctry, weighted=weighted)
if inliers:
df = get_inliers(df=df)
X = df.values
robust_cov = MinCovDet().fit(X)
#-----------------------------------------------------------------------------
# compare estimators learnt from the full data set with true parameters
emp_cov = EmpiricalCovariance().fit(X)
#-----------------------------------------------------------------------------
# Display results
fig = plt.figure()
fig.subplots_adjust(hspace=-.1, wspace=.4, top=.95, bottom=.05)
#-----------------------------------------------------------------------------
# Show data set
ax1 = fig.add_subplot(1, 1, 1)
ax1.scatter(X[:, 0], X[:, 1], alpha=.5, color='k', marker='.')
ax1.set_title(country_code[ctry])
#-----------------------------------------------------------------------------
# Show contours of the distance functions
xx, yy = np.meshgrid(np.linspace(ax1.get_xlim()[0], ax1.get_xlim()[1],
100),
np.linspace(ax1.get_ylim()[0], ax1.get_ylim()[1],
100))
zz = np.c_[xx.ravel(), yy.ravel()]
#-----------------------------------------------------------------------------
mahal_emp_cov = emp_cov.mahalanobis(zz)
mahal_emp_cov = mahal_emp_cov.reshape(xx.shape)
emp_cov_contour = ax1.contour(xx, yy, np.sqrt(mahal_emp_cov),
cmap=plt.cm.PuBu_r,
linestyles='dashed')
#-----------------------------------------------------------------------------
mahal_robust_cov = robust_cov.mahalanobis(zz)
mahal_robust_cov = mahal_robust_cov.reshape(xx.shape)
robust_contour = ax1.contour(xx, yy, np.sqrt(mahal_robust_cov),
cmap=plt.cm.YlOrBr_r, linestyles='dotted')
ax1.legend([emp_cov_contour.collections[1], robust_contour.collections[1]],
['MLE dist', 'robust dist'],
loc="upper right", borderaxespad=0)
ax1.grid()
return (fig, ax1, ctry)
开发者ID:RaoUmer,项目名称:data-wrangling,代码行数:50,代码来源:outliers_after_weighting.py
示例11: __init__
def __init__(self, robust=False):
if not robust:
from sklearn.covariance import EmpiricalCovariance as CovarianceEstimator #
else:
from sklearn.covariance import MinCovDet as CovarianceEstimator #
self.model = CovarianceEstimator()
self.cov = None
开发者ID:pborky,项目名称:pynfsa,代码行数:7,代码来源:models.py
示例12: fit
def fit(self, data):
nu = 0.01
n_sample = data.shape[0]
n_feature = data.shape[1]
exclude = set()
for d in range(n_feature):
feature = data[:, d]
s_feature = feature.copy()
s_feature.sort()
low = s_feature[int(n_sample*nu/2)]
upp = s_feature[n_sample-int(n_sample*nu/2)]
exld = numpy.nonzero(numpy.logical_or((feature > upp),(feature < low)))[0]
[exclude.add(e) for e in exld]
use = numpy.array([f for f in range(n_sample) if f not in exclude])
data_ = data[use, :]
self.cov = EmpiricalCovariance().fit(data_)
dist = self.cov.mahalanobis(data)
self.cutoff = numpy.percentile(dist, self.perc_keep)
print self.cutoff
开发者ID:CellH5,项目名称:cellh5apps,代码行数:26,代码来源:learner.py
示例13: test_gaussian_mixture_fit
def test_gaussian_mixture_fit():
# recover the ground truth
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_features = rand_data.n_features
n_components = rand_data.n_components
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
g = GaussianMixture(n_components=n_components, n_init=20,
reg_covar=0, random_state=rng,
covariance_type=covar_type)
g.fit(X)
# needs more data to pass the test with rtol=1e-7
assert_allclose(np.sort(g.weights_), np.sort(rand_data.weights),
rtol=0.1, atol=1e-2)
arg_idx1 = g.means_[:, 0].argsort()
arg_idx2 = rand_data.means[:, 0].argsort()
assert_allclose(g.means_[arg_idx1], rand_data.means[arg_idx2],
rtol=0.1, atol=1e-2)
if covar_type == 'full':
prec_pred = g.precisions_
prec_test = rand_data.precisions['full']
elif covar_type == 'tied':
prec_pred = np.array([g.precisions_] * n_components)
prec_test = np.array([rand_data.precisions['tied']] * n_components)
elif covar_type == 'spherical':
prec_pred = np.array([np.eye(n_features) * c
for c in g.precisions_])
prec_test = np.array([np.eye(n_features) * c for c in
rand_data.precisions['spherical']])
elif covar_type == 'diag':
prec_pred = np.array([np.diag(d) for d in g.precisions_])
prec_test = np.array([np.diag(d) for d in
rand_data.precisions['diag']])
arg_idx1 = np.trace(prec_pred, axis1=1, axis2=2).argsort()
arg_idx2 = np.trace(prec_test, axis1=1, axis2=2).argsort()
for k, h in zip(arg_idx1, arg_idx2):
ecov = EmpiricalCovariance()
ecov.covariance_ = prec_test[h]
# the accuracy depends on the number of data and randomness, rng
assert_allclose(ecov.error_norm(prec_pred[k]), 0, atol=0.1)
开发者ID:jerry-dumblauskas,项目名称:scikit-learn,代码行数:46,代码来源:test_gaussian_mixture.py
示例14: OneClassMahalanobis
class OneClassMahalanobis(BaseClassifier):
_fit_params = ['perc_keep']
_predict_params = []
def __init__(self,*args, **kwargs):
# BaseClassifier.__init__(self, *args, **kwargs)
self.perc_keep = kwargs["perc_keep"]
def fit(self, data):
nu = 0.01
n_sample = data.shape[0]
n_feature = data.shape[1]
exclude = set()
for d in range(n_feature):
feature = data[:, d]
s_feature = feature.copy()
s_feature.sort()
low = s_feature[int(n_sample*nu/2)]
upp = s_feature[n_sample-int(n_sample*nu/2)]
exld = numpy.nonzero(numpy.logical_or((feature > upp),(feature < low)))[0]
[exclude.add(e) for e in exld]
use = numpy.array([f for f in range(n_sample) if f not in exclude])
data_ = data[use, :]
self.cov = EmpiricalCovariance().fit(data_)
dist = self.cov.mahalanobis(data)
self.cutoff = numpy.percentile(dist, self.perc_keep)
print self.cutoff
def predict(self, data):
mahal_dist = self.cov.mahalanobis(data)
self.mahal_dist = mahal_dist
print mahal_dist.min(), mahal_dist.max(), self.cutoff, (mahal_dist > self.cutoff).sum(), "of", len(mahal_dist)
return (mahal_dist > self.cutoff).astype(numpy.uint8)*-2+1
def decision_function(self, data=None):
return self.mahal_dist
开发者ID:CellH5,项目名称:cellh5apps,代码行数:45,代码来源:learner.py
示例15: test_suffstat_sk_tied
def test_suffstat_sk_tied():
# use equation Nk * Sk / N = S_tied
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 500, 2, 2
resp = rng.rand(n_samples, n_components)
resp = resp / resp.sum(axis=1)[:, np.newaxis]
X = rng.rand(n_samples, n_features)
nk = resp.sum(axis=0)
xk = np.dot(resp.T, X) / nk[:, np.newaxis]
covars_pred_full = _estimate_gaussian_covariance_full(resp, X, nk, xk, 0)
covars_pred_full = np.sum(nk[:, np.newaxis, np.newaxis] * covars_pred_full,
0) / n_samples
covars_pred_tied = _estimate_gaussian_covariance_tied(resp, X, nk, xk, 0)
ecov = EmpiricalCovariance()
ecov.covariance_ = covars_pred_full
assert_almost_equal(ecov.error_norm(covars_pred_tied, norm='frobenius'), 0)
assert_almost_equal(ecov.error_norm(covars_pred_tied, norm='spectral'), 0)
开发者ID:123fengye741,项目名称:scikit-learn,代码行数:19,代码来源:test_gaussian_mixture.py
示例16: launch_mcd_on_dataset
def launch_mcd_on_dataset(n_samples, n_features, n_outliers,
tol_loc, tol_cov, tol_support, correction):
"""
"""
data = np.random.randn(n_samples, n_features)
# add some outliers
outliers_index = np.random.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(np.random.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
# compute MCD directly
T, S, H = fast_mcd(data, correction=correction)
# compare with the estimates learnt from the inliers
pure_data = data[inliers_mask]
error_location = np.sum((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
emp_cov = EmpiricalCovariance().fit(pure_data)
#print emp_cov.error_norm(S)
assert(emp_cov.error_norm(S) < tol_cov)
assert(np.sum(H) > tol_support)
# check improvement
if (n_outliers / float(n_samples) > 0.1) and (n_features > 1):
error_bad_location = np.sum((data.mean(0) - T) ** 2)
assert(error_bad_location > error_location)
bad_emp_cov = EmpiricalCovariance().fit(data)
assert(emp_cov.error_norm(S) < bad_emp_cov.error_norm(S))
# compute MCD by fitting an object
mcd_fit = MCD().fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.sum((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
assert(emp_cov.error_norm(S) < tol_cov)
assert(np.sum(H) > tol_support)
# check improvement
if (n_outliers / float(n_samples) > 0.1) and (n_features > 1):
error_bad_location = np.sum((data.mean(0) - T) ** 2)
assert(error_bad_location > error_location)
bad_emp_cov = EmpiricalCovariance().fit(data)
assert(emp_cov.error_norm(S) < bad_emp_cov.error_norm(S))
开发者ID:bvtrach,项目名称:scikit-learn,代码行数:47,代码来源:test_covariance.py
示例17: fit
def fit(self, X, y=None):
if self.cov_estimator is None:
self.cov_estimator_ = EmpiricalCovariance(
assume_centered=True)
else:
self.cov_estimator_ = clone(self.cov_estimator)
if self.kind == 'tangent':
covs = [self.cov_estimator_.fit(x).covariance_ for x in X]
self.mean_cov_ = spd_mfd.frechet_mean(covs, max_iter=30, tol=1e-7)
self.whitening_ = spd_mfd.inv_sqrtm(self.mean_cov_)
return self
开发者ID:rphlypo,项目名称:parietalretreat,代码行数:12,代码来源:connectivity.py
示例18: test_suffstat_sk_full
def test_suffstat_sk_full():
# compare the precision matrix compute from the
# EmpiricalCovariance.covariance fitted on X*sqrt(resp)
# with _sufficient_sk_full, n_components=1
rng = np.random.RandomState(0)
n_samples, n_features = 500, 2
# special case 1, assuming data is "centered"
X = rng.rand(n_samples, n_features)
resp = rng.rand(n_samples, 1)
X_resp = np.sqrt(resp) * X
nk = np.array([n_samples])
xk = np.zeros((1, n_features))
covars_pred = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0)
ecov = EmpiricalCovariance(assume_centered=True)
ecov.fit(X_resp)
assert_almost_equal(ecov.error_norm(covars_pred[0], norm='frobenius'), 0)
assert_almost_equal(ecov.error_norm(covars_pred[0], norm='spectral'), 0)
# check the precision computation
precs_chol_pred = _compute_precision_cholesky(covars_pred, 'full')
precs_pred = np.array([np.dot(prec, prec.T) for prec in precs_chol_pred])
precs_est = np.array([linalg.inv(cov) for cov in covars_pred])
assert_array_almost_equal(precs_est, precs_pred)
# special case 2, assuming resp are all ones
resp = np.ones((n_samples, 1))
nk = np.array([n_samples])
xk = X.mean(axis=0).reshape((1, -1))
covars_pred = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0)
ecov = EmpiricalCovariance(assume_centered=False)
ecov.fit(X)
assert_almost_equal(ecov.error_norm(covars_pred[0], norm='frobenius'), 0)
assert_almost_equal(ecov.error_norm(covars_pred[0], norm='spectral'), 0)
# check the precision computation
precs_chol_pred = _compute_precision_cholesky(covars_pred, 'full')
precs_pred = np.array([np.dot(prec, prec.T) for prec in precs_chol_pred])
precs_est = np.array([linalg.inv(cov) for cov in covars_pred])
assert_array_almost_equal(precs_est, precs_pred)
开发者ID:jerry-dumblauskas,项目名称:scikit-learn,代码行数:40,代码来源:test_gaussian_mixture.py
示例19: test_suffstat_sk_diag
def test_suffstat_sk_diag():
# test against 'full' case
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 500, 2, 2
resp = rng.rand(n_samples, n_components)
resp = resp / resp.sum(axis=1)[:, np.newaxis]
X = rng.rand(n_samples, n_features)
nk = resp.sum(axis=0)
xk = np.dot(resp.T, X) / nk[:, np.newaxis]
covars_pred_full = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0)
covars_pred_diag = _estimate_gaussian_covariances_diag(resp, X, nk, xk, 0)
ecov = EmpiricalCovariance()
for (cov_full, cov_diag) in zip(covars_pred_full, covars_pred_diag):
ecov.covariance_ = np.diag(np.diag(cov_full))
cov_diag = np.diag(cov_diag)
assert_almost_equal(ecov.error_norm(cov_diag, norm='frobenius'), 0)
assert_almost_equal(ecov.error_norm(cov_diag, norm='spectral'), 0)
# check the precision computation
precs_chol_pred = _compute_precision_cholesky(covars_pred_diag, 'diag')
assert_almost_equal(covars_pred_diag, 1. / precs_chol_pred ** 2)
开发者ID:jerry-dumblauskas,项目名称:scikit-learn,代码行数:23,代码来源:test_gaussian_mixture.py
示例20: detect_bad_channels
def detect_bad_channels(inst, pick_types=None, threshold=.2):
from sklearn.preprocessing import RobustScaler
from sklearn.covariance import EmpiricalCovariance
from jr.stats import median_abs_deviation
if pick_types is None:
pick_types = dict(meg='mag')
inst = inst.pick_types(copy=True, **pick_types)
cov = EmpiricalCovariance()
cov.fit(inst._data.T)
cov = cov.covariance_
# center
scaler = RobustScaler()
cov = scaler.fit_transform(cov).T
cov /= median_abs_deviation(cov)
cov -= np.median(cov)
# compute robust summary metrics
mu = np.median(cov, axis=0)
sigma = median_abs_deviation(cov, axis=0)
mu /= median_abs_deviation(mu)
sigma /= median_abs_deviation(sigma)
distance = np.sqrt(mu ** 2 + sigma ** 2)
bad = np.where(distance < threshold)[0]
bad = [inst.ch_names[ch] for ch in bad]
return bad
开发者ID:LauraGwilliams,项目名称:jr-tools,代码行数:24,代码来源:base.py
注:本文中的sklearn.covariance.EmpiricalCovariance类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论