本文整理汇总了Python中sklearn.covariance.empirical_covariance函数的典型用法代码示例。如果您正苦于以下问题:Python empirical_covariance函数的具体用法?Python empirical_covariance怎么用?Python empirical_covariance使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了empirical_covariance函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_covariance
def test_covariance():
"""Tests Covariance module on a simple dataset.
"""
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X), norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X), norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X), scaling=False), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X), squared=False), 0)
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
assert(np.amax(mahal_dist) < 250)
assert(np.amin(mahal_dist) > 50)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
开发者ID:forkloop,项目名称:scikit-learn,代码行数:35,代码来源:test_covariance.py
示例2: test_shrunk_covariance
def test_shrunk_covariance():
"""Tests ShrunkCovariance module on a simple dataset.
"""
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
开发者ID:forkloop,项目名称:scikit-learn,代码行数:32,代码来源:test_covariance.py
示例3: _nonrobust_covariance
def _nonrobust_covariance(self, data, assume_centered=False):
"""Non-robust estimation of the covariance to be used within MCD.
Parameters
----------
data: array_like, shape (n_samples, n_features)
Data for which to compute the non-robust covariance matrix.
assume_centered: Boolean
Whether or not the observations should be considered as centered.
Returns
-------
nonrobust_covariance: array_like, shape (n_features, n_features)
The non-robust covariance of the data.
"""
try:
cov, prec = graph_lasso(
empirical_covariance(data, assume_centered=assume_centered),
self.shrinkage)
except:
print " > Exception!"
emp_cov = empirical_covariance(
data, assume_centered=assume_centered)
emp_cov.flat[::data.shape[1] + 1] += 1e-06
cov, prec = graph_lasso(emp_cov, self.shrinkage)
return cov
开发者ID:VirgileFritsch,项目名称:outliers,代码行数:27,代码来源:rmcd.py
示例4: test_covariance
def test_covariance():
"""Tests Covariance module on a simple dataset.
"""
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
print(np.amin(mahal_dist), np.amax(mahal_dist))
assert(np.amin(mahal_dist) > 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
cov = EmpiricalCovariance()
assert_warns(UserWarning, cov.fit, X_1sample)
assert_array_almost_equal(cov.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
开发者ID:HapeMask,项目名称:scikit-learn,代码行数:51,代码来源:test_covariance.py
示例5: test_graphical_lasso_iris_singular
def test_graphical_lasso_iris_singular():
# Small subset of rows to test the rank-deficient case
# Need to choose samples such that none of the variances are zero
indices = np.arange(10, 13)
# Hard-coded solution from R glasso package for alpha=0.01
cov_R = np.array([
[0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
[0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
[0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
[0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222]
])
icov_R = np.array([
[24.42244057, -16.831679593, 0.0, 0.0],
[-16.83168201, 24.351841681, -6.206896552, -12.5],
[0.0, -6.206896171, 153.103448276, 0.0],
[0.0, -12.499999143, 0.0, 462.5]
])
X = datasets.load_iris().data[indices, :]
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graphical_lasso(emp_cov, alpha=0.01, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R, decimal=5)
assert_array_almost_equal(icov, icov_R, decimal=5)
开发者ID:MartinThoma,项目名称:scikit-learn,代码行数:25,代码来源:test_graphical_lasso.py
示例6: test_graph_lasso
def test_graph_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (.1, .01):
covs = dict()
for method in ('cd', 'lars'):
cov_, _, costs = graph_lasso(emp_cov, alpha=.1, return_costs=True)
covs[method] = cov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease
assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
assert_array_almost_equal(covs['cd'], covs['lars'])
# Smoke test the estimator
model = GraphLasso(alpha=.1).fit(X)
assert_array_almost_equal(model.covariance_, covs['cd'])
开发者ID:Jetafull,项目名称:scikit-learn,代码行数:25,代码来源:test_graph_lasso.py
示例7: test_oas
def test_oas():
"""Tests OAS module on a simple dataset.
"""
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X, assume_centered=True)
assert_almost_equal(oa.shrinkage_, 0.018740, 4)
assert_almost_equal(oa.score(X, assume_centered=True), -5.03605, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X, assume_centered=True)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
oa = OAS()
oa.fit(X_1d, assume_centered=True)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X, assume_centered=True)
assert_almost_equal(oa.score(X, assume_centered=True), -5.03605, 4)
assert(oa.precision_ is None)
### Same tests without assuming centered data
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, 0.020236, 4)
assert_almost_equal(oa.score(X), 2.079025, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), 2.079025, 4)
assert(oa.precision_ is None)
开发者ID:bvtrach,项目名称:scikit-learn,代码行数:60,代码来源:test_covariance.py
示例8: launch_mcd_on_dataset
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
开发者ID:JeongSeonGyo,项目名称:EnergyData,代码行数:26,代码来源:test_robust_covariance.py
示例9: get_cov
def get_cov(data):
dat = data.training_data_all_ways + data.testing_data_all_ways
num_ways = len(data.get_list_of_ways())
m = {}
i = 0
for way in data.get_list_of_ways():
m[way] = i
i += 1
mat = np.zeros((num_ways,num_ways))
for elem in dat:
ways = elem[1]
for way in ways:
mat[m[way],m[way]] = mat[m[way],m[way]] + 1
for w1 in ways:
for w2 in ways:
if w1 == w2: continue
mat[m[w1],m[w2]] = mat[m[w1],m[w2]] + 1
print mat
emp_cov = empirical_covariance(mat)
print emp_cov
corr = np.zeros((num_ways,num_ways))
for i in range(num_ways):
for j in range(num_ways):
corr[i,j] = emp_cov[i,j]/(math.sqrt(emp_cov[i,i])*math.sqrt(emp_cov[j,j]))
print corr
sns.heatmap(corr,vmin = -1, vmax = 1,square=True,xticklabels=m.keys(),yticklabels=m.keys())
sns.plt.title("Covariance of WAYS frequencies")
sns.plt.show()
开发者ID:casawa,项目名称:multiclass-multilabel-course-labeling,代码行数:28,代码来源:cov.py
示例10: objective_function
def objective_function(self, data, location, covariance):
"""Objective function minimized at each step of the MCD algorithm.
"""
precision = pinvh(covariance)
det = fast_logdet(precision)
trace = np.trace(
np.dot(empirical_covariance(data - location, assume_centered=True),
precision))
pen = self.shrinkage * np.trace(precision)
return -det + trace + pen
开发者ID:VirgileFritsch,项目名称:outliers,代码行数:10,代码来源:rmcd.py
示例11: test_empirical_covariance
def test_empirical_covariance(self):
iris = datasets.load_iris()
df = pdml.ModelFrame(iris)
result = df.covariance.empirical_covariance()
expected = covariance.empirical_covariance(iris.data)
self.assertTrue(isinstance(result, pdml.ModelFrame))
self.assert_index_equal(result.index, df.data.columns)
self.assert_index_equal(result.columns, df.data.columns)
self.assert_numpy_array_almost_equal(result.values, expected)
开发者ID:Sandy4321,项目名称:pandas-ml,代码行数:10,代码来源:test_covariance.py
示例12: covariances
def covariances():
subject_to_means, subject_to_values = load_data(TRAINING_DATA_FILENAME, True)
subject_to_covariance = {}
full_matrix = None
for key in subject_to_values.keys():
if full_matrix is None:
full_matrix = subject_to_values[key]
subject_to_covariance[key] = empirical_covariance(subject_to_values[key])
print subject_to_means[key]
print subject_to_covariance[key]
else:
full_matrix = np.append(full_matrix, subject_to_values[key], axis = 0)
subject_to_covariance[key] = empirical_covariance(subject_to_values[key])
full_mean = full_matrix.mean(axis=0)
full_covariance = empirical_covariance(full_matrix)
print full_mean
print full_covariance
return subject_to_covariance, full_covariance, full_mean
开发者ID:ankurbpn,项目名称:CS-259D-HW,代码行数:19,代码来源:question3.py
示例13: test_covariance
def test_covariance():
"""Tests Covariance module on a simple dataset.
"""
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(cov.error_norm(emp_cov, norm="spectral"), 0)
assert_almost_equal(cov.error_norm(emp_cov, norm="frobenius"), 0)
assert_almost_equal(cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError, cov.error_norm, emp_cov, norm="foo")
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
print(np.amin(mahal_dist), np.amax(mahal_dist))
assert np.amin(mahal_dist) > 0
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d), norm="spectral"), 0)
# test with one sample
X_1sample = np.arange(5)
cov = EmpiricalCovariance()
with warnings.catch_warnings(record=True):
cov.fit(X_1sample)
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
开发者ID:mugiro,项目名称:elm-python,代码行数:43,代码来源:test_covariance.py
示例14: empirical_covariances
def empirical_covariances(subjects, assume_centered=False, standardize=False):
"""Compute empirical covariances for several signals.
Parameters
----------
subjects : list of numpy.ndarray, shape for each (n_samples, n_features)
input subjects. Each subject is a 2D array, whose columns contain
signals. Sample number can vary from subject to subject, but all
subjects must have the same number of features (i.e. of columns).
assume_centered : bool, optional
if True, assume that all input signals are centered. This slightly
decreases computation time by avoiding useless computation.
standardize : bool, optional
if True, set every signal variance to one before computing their
covariance matrix (i.e. compute a correlation matrix).
Returns
-------
emp_covs : numpy.ndarray, shape : (feature number, feature number, subject number)
empirical covariances.
n_samples : numpy.ndarray, shape: (subject number,)
number of samples for each subject. dtype is np.float.
"""
if not hasattr(subjects, "__iter__"):
raise ValueError("'subjects' input argument must be an iterable. "
"You provided {0}".format(subjects.__class__))
n_subjects = [s.shape[1] for s in subjects]
if len(set(n_subjects)) > 1:
raise ValueError("All subjects must have the same number of "
"features.\nYou provided: {0}".format(str(n_subjects))
)
n_subjects = len(subjects)
n_features = subjects[0].shape[1]
# Enable to change dtype here because depending on user, conversion from
# single precision to double will be required or not.
emp_covs = np.empty((n_features, n_features, n_subjects), order="F")
for k, s in enumerate(subjects):
if standardize:
s = s / s.std(axis=0) # copy on purpose
M = empirical_covariance(s, assume_centered=assume_centered)
# Force matrix symmetry, for numerical stability
# of _group_sparse_covariance
emp_covs[..., k] = M + M.T
emp_covs /= 2
n_samples = np.asarray([s.shape[0] for s in subjects], dtype=np.float)
return emp_covs, n_samples
开发者ID:demianw,项目名称:nilearn,代码行数:54,代码来源:group_sparse_covariance.py
示例15: feat_select
def feat_select(f):
cp = load(read)
(X, y, t) = cp.export_data(f)
data = numpy.c_[X, y]
cov = empirical_covariance(data, False)
print cov
for i in range(cov.shape[0] - 1):
print cov[i, -1]
开发者ID:mtthss,项目名称:experiments-in-summarization,代码行数:12,代码来源:test.py
示例16: test_graph_lasso_2D
def test_graph_lasso_2D():
# Hard-coded solution from Python skggm package
# obtained by calling `quic(emp_cov, lam=.1, tol=1e-8)`
cov_skggm = np.array([[3.09550269, 1.186972],
[1.186972, 0.57713289]])
icov_skggm = np.array([[1.52836773, -3.14334831],
[-3.14334831, 8.19753385]])
X = datasets.load_iris().data[:, 2:]
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graphical_lasso(emp_cov, alpha=.1, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_skggm)
assert_array_almost_equal(icov, icov_skggm)
开发者ID:amueller,项目名称:scikit-learn,代码行数:15,代码来源:test_graphical_lasso.py
示例17: train
def train(self, use_entropy=False):
""" Train the classifier for all the models that it knows. """
if len(self.dict_categories) < 2:
print "At least two categories are needed for training..."
print "Training is skipped."
return
(X, Y, W) = self._get_example_matrix(use_entropy)
if (hasattr(self.classifier, 'metric') and
self.classifier.metric == 'mahalanobis'):
# The mahalanobis distance needs the covariance of the data
cov = covariance.empirical_covariance(X)
self.classifier.metric_kwds['V'] = cov
print "Training with {} categories and {} views.".format(
len(self.dict_categories), len(Y))
print self.classifier.fit(X, Y)
开发者ID:julfla,项目名称:master_project,代码行数:15,代码来源:models.py
示例18: plot_all
def plot_all(X):
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
#----------------------------------------------------------------------
# Pre-processing
print "t-SNE Scaling"
X_scaled = preprocessing.scale(X) #zero mean, unit variance
X_tsne_scaled = tsne.fit_transform(X_scaled)
#normalize the data (scaling individual samples to have unit norm)
print "t-SNE L2 Norm"
X_normalized = preprocessing.normalize(X, norm='l2')
X_tsne_norm = tsne.fit_transform(X_normalized)
#whiten the data
print "t-SNE Whitening"
# the mean computed by the scaler is for the feature dimension.
# We want the normalization to be in feature dimention.
# Zero mean for each sample assumes stationarity which is not necessarily true for CNN features.
# X: NxD where N is number of examples and D is number of features.
# scaler = preprocessing.StandardScaler(with_std=False).fit(X)
scaler = preprocessing.StandardScaler().fit(X) #this scales each feature to have std-dev 1
X_centered = scaler.transform(X)
# U, s, Vh = linalg.svd(X_centered)
shapeX = X_centered.shape
IPython.embed()
# this is DxD matrix where D is the feature dimension
# still to figure out: It seems computation is not a problem but carrying around a 50kx50k matrix is memory killer!
sig = (1/shapeX[0]) * np.dot(X_centered.T, X_centered)
sig2= covariance.empirical_covariance(X_centered, assume_centered=True) #estimated -- this is better.
sig3, shrinkage= covariance.oas(X_centered, assume_centered=True) #estimated
U, s, Vh = linalg.svd(sig, full_matrices=False)
eps = 1e-2 # this affects how many low- freq eigevalues are eliminated
invS = np.diag (np.reciprocal(np.sqrt(s+eps)))
#PCA_whiten
X_pca = np.dot(invS, np.dot(U.T, X_centered))
X_tsne_pca = tsne.fit_transform(X_pca)
#whiten the data (ZCA)
X_zca = np.dot(U, X_pca)
X_tsne_zca = tsne.fit_transform(X_zca)
return X_tsne_scaled, X_tsne_norm, X_tsne_pca, X_tsne_zca
开发者ID:adithyamurali,项目名称:C3D,代码行数:47,代码来源:tsne.py
示例19: test_graphical_lasso
def test_graphical_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (0., .1, .25):
covs = dict()
icovs = dict()
for method in ('cd', 'lars'):
cov_, icov_, costs = graphical_lasso(emp_cov, return_costs=True,
alpha=alpha, mode=method)
covs[method] = cov_
icovs[method] = icov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease (doesn't hold if alpha == 0)
if not alpha == 0:
assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
assert_array_almost_equal(covs['cd'], covs['lars'], decimal=4)
assert_array_almost_equal(icovs['cd'], icovs['lars'], decimal=4)
# Smoke test the estimator
model = GraphicalLasso(alpha=.25).fit(X)
model.score(X)
assert_array_almost_equal(model.covariance_, covs['cd'], decimal=4)
assert_array_almost_equal(model.covariance_, covs['lars'], decimal=4)
# For a centered matrix, assume_centered could be chosen True or False
# Check that this returns indeed the same result for centered data
Z = X - X.mean(0)
precs = list()
for assume_centered in (False, True):
prec_ = GraphicalLasso(
assume_centered=assume_centered).fit(Z).precision_
precs.append(prec_)
assert_array_almost_equal(precs[0], precs[1])
开发者ID:MartinThoma,项目名称:scikit-learn,代码行数:42,代码来源:test_graphical_lasso.py
示例20: _naive_ledoit_wolf_shrinkage
def _naive_ledoit_wolf_shrinkage(X):
# A simple implementation of the formulas from Ledoit & Wolf
# The computation below achieves the following computations of the
# "O. Ledoit and M. Wolf, A Well-Conditioned Estimator for
# Large-Dimensional Covariance Matrices"
# beta and delta are given in the beginning of section 3.2
n_samples, n_features = X.shape
emp_cov = empirical_covariance(X, assume_centered=False)
mu = np.trace(emp_cov) / n_features
delta_ = emp_cov.copy()
delta_.flat[::n_features + 1] -= mu
delta = (delta_ ** 2).sum() / n_features
X2 = X ** 2
beta_ = 1. / (n_features * n_samples) \
* np.sum(np.dot(X2.T, X2) / n_samples - emp_cov ** 2)
beta = min(beta_, delta)
shrinkage = beta / delta
return shrinkage
开发者ID:AlexisMignon,项目名称:scikit-learn,代码行数:20,代码来源:test_covariance.py
注:本文中的sklearn.covariance.empirical_covariance函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论