本文整理汇总了Python中numpy.cov函数的典型用法代码示例。如果您正苦于以下问题:Python cov函数的具体用法?Python cov怎么用?Python cov使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cov函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: moments
def moments(self):
"""Calculate covariance and correlation matrices,
trait, genotipic and ontogenetic means"""
zs = np.array([ind["z"] for ind in self.pop])
xs = np.array([ind["x"] for ind in self.pop])
ys = np.array([ind["y"] for ind in self.pop])
bs = np.array([ind["b"] for ind in self.pop])
ymean = ys.mean(axis=0)
zmean = zs.mean(axis=0)
xmean = xs.mean(axis=0)
ymean = ys.mean(axis=0)
bmean = bs.mean(axis=0)
phenotipic = np.cov(zs, rowvar=0, bias=1)
genetic = np.cov(xs, rowvar=0, bias=1)
heridability = genetic[np.diag_indices_from(genetic)] / phenotipic[np.diag_indices_from(phenotipic)]
corr_phenotipic = np.corrcoef(zs, rowvar=0, bias=1)
corr_genetic = np.corrcoef(xs, rowvar=0, bias=1)
avgP = avg_ratio(corr_phenotipic, self.modules)
avgG = avg_ratio(corr_genetic, self.modules)
return {
"y.mean": ymean,
"b.mean": bmean,
"z.mean": zmean,
"x.mean": xmean,
"P": phenotipic,
"G": genetic,
"h2": heridability,
"avgP": avgP,
"avgG": avgG,
"corrP": corr_phenotipic,
"corrG": corr_genetic,
}
开发者ID:lem-usp,项目名称:evomod,代码行数:32,代码来源:pop.py
示例2: wprp_split
def wprp_split(gals, red_split, box_size, cols=['ssfr', 'pred'], jack_nside=3,
rpmin=0.1, rpmax=20.0, Nrp=25): # for 2 splits
"""
Calculates the 2PCF of gals binned by sSFR, separated by red_split.
Note that sSFR can be substitued in _cols_ to bin by, say, concentration
Accepts:
gals - numpy array with objects, their positions, and attributes
red_split - value which separates two populations
box_size - box_size of the objects in gals
cols - tags to specify the actual and predicted distribution. Defaults
to ['ssfr', 'pred'], but could be modified to use, say
['c', 'pred_c'] (assuming they exist in gals).
Returns:
[r, [actual], [pred], [err], [chi2]]
r - centers of r bins
[actual] - clustering of red/blue galaxies
[pred] - clustering of predicted red/blue galaxies
[err] - errorbars for red/blue galaxies
[chi2] - goodness of fit for red/blue galaxies
"""
r, rbins = make_r_scale(rpmin, rpmax, Nrp)
n_jack = jack_nside ** 2
results = []
results.append(r)
r_jack = []
b_jack = []
for col in cols:
red = gals[gals[col] < red_split]
blue = gals[gals[col] > red_split]
r = calculate_xi(red, box_size, True, jack_nside, rpmin, rpmax, Nrp)
b = calculate_xi(blue, box_size, True, jack_nside, rpmin, rpmax, Nrp)
results.append([r[0], b[0]])
if jack_nside <= 1:
r_var = r[1]
b_var = b[1]
else:
r_jack.append(r[2])
b_jack.append(b[2])
if jack_nside > 1:
r_cov = np.cov(r_jack[0] - r_jack[1], rowvar=0, bias=1) * (n_jack - 1)
b_cov = np.cov(b_jack[0] - b_jack[1], rowvar=0, bias=1) * (n_jack - 1)
r_var = np.sqrt(np.diag(r_cov))
b_var = np.sqrt(np.diag(b_cov))
results.append([r_var, b_var])
if jack_nside > 1:
r_chi2 = calculate_chi_square(results[1][0], results[2][0], r_cov)
b_chi2 = calculate_chi_square(results[1][1], results[2][1], b_cov)
print "Goodness of fit for the red (lo) and blue (hi): ", r_chi2, b_chi2
else:
d_r = results[1][0] - results[2][0]
d_b = results[1][1] - results[2][1]
r_chi2 = d_r**2/np.sqrt(r_var[0]**2 + r_var[1]**2)
b_chi2 = d_b**2/np.sqrt(b_var[0]**2 + b_var[1]**2)
results.append([r_chi2, b_chi2])
return results
开发者ID:vipasu,项目名称:addseds,代码行数:60,代码来源:calc.py
示例3: test_2d_wo_missing
def test_2d_wo_missing(self):
# Test cov on 1 2D variable w/o missing values
x = self.data.reshape(3, 4)
assert_almost_equal(np.cov(x), cov(x))
assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False))
assert_almost_equal(np.cov(x, rowvar=False, bias=True),
cov(x, rowvar=False, bias=True))
开发者ID:SylvainCorlay,项目名称:numpy,代码行数:7,代码来源:test_extras.py
示例4: testComponentSeparation
def testComponentSeparation(self):
A = generate_covsig([[10,5,2],[5,10,2],[2,2,10]], 500)
B = generate_covsig([[10,2,2],[2,10,5],[2,5,10]], 500)
X = np.dstack([A,B])
W, V = csp(X,[1,2])
C1a = np.cov(X[:,:,0].dot(W).T)
C2a = np.cov(X[:,:,1].dot(W).T)
Y = np.dstack([B,A])
W, V = csp(Y,[1,2])
C1b = np.cov(Y[:,:,0].dot(W).T)
C2b = np.cov(Y[:,:,1].dot(W).T)
# check symmetric case
self.assertTrue(np.allclose(C1a.diagonal(), C2a.diagonal()[::-1]))
self.assertTrue(np.allclose(C1b.diagonal(), C2b.diagonal()[::-1]))
# swapping class labels (or in this case, trials) should not change the result
self.assertTrue(np.allclose(C1a, C1b))
self.assertTrue(np.allclose(C2a, C2b))
# variance of first component should be greatest for class 1
self.assertTrue(C1a[0,0] > C2a[0,0])
# variance of last component should be greatest for class 1
self.assertTrue(C1a[2,2] < C2a[2,2])
# variance of central component should be equal for both classes
self.assertTrue(np.allclose(C1a[1,1], C2a[1,1]))
开发者ID:dongqunxi,项目名称:SCoT,代码行数:30,代码来源:test_csp.py
示例5: test_pairwise_distances_data_derived_params
def test_pairwise_distances_data_derived_params(n_jobs, metric, dist_function,
y_is_x):
# check that pairwise_distances give the same result in sequential and
# parallel, when metric has data-derived parameters.
with config_context(working_memory=1): # to have more than 1 chunk
rng = np.random.RandomState(0)
X = rng.random_sample((1000, 10))
if y_is_x:
Y = X
expected_dist_default_params = squareform(pdist(X, metric=metric))
if metric == "seuclidean":
params = {'V': np.var(X, axis=0, ddof=1)}
else:
params = {'VI': np.linalg.inv(np.cov(X.T)).T}
else:
Y = rng.random_sample((1000, 10))
expected_dist_default_params = cdist(X, Y, metric=metric)
if metric == "seuclidean":
params = {'V': np.var(np.vstack([X, Y]), axis=0, ddof=1)}
else:
params = {'VI': np.linalg.inv(np.cov(np.vstack([X, Y]).T)).T}
expected_dist_explicit_params = cdist(X, Y, metric=metric, **params)
dist = np.vstack(tuple(dist_function(X, Y,
metric=metric, n_jobs=n_jobs)))
assert_allclose(dist, expected_dist_explicit_params)
assert_allclose(dist, expected_dist_default_params)
开发者ID:scikit-learn,项目名称:scikit-learn,代码行数:29,代码来源:test_pairwise.py
示例6: kal0
def kal0(x,sv=None,Kdisp=1.0,Nsamp=1000,L=5,Norder=3,pg=1.0,vg=1.0,
sigma0=1000,N0=200,Prange=8):
x = x.T
# Time scale
if sv is None:
mux = x-mean(x,0)
phi = unwrap(angle(mux[:,0]+1j*mux[:,1]))
sv= 2*pi*x.shape[0]/abs(phi[-1]-phi[0])
# System matrix
A = Kdisp*eye(2*x.shape[1])
A[:x.shape[1],x.shape[1]:2*x.shape[1]] = eye(x.shape[1])/sv
# Observation matrix
C = zeros((x.shape[1],2*x.shape[1]))
C[:x.shape[1],:x.shape[1]] = eye(x.shape[1])
# Observation covariance
R = cov((x[:-1]-x[1:]).T)/sqrt(2.0)
# System covariance
idx = random.randint(x.shape[0]-5,size=(Nsamp))
idx = vstack([idx+i for i in xrange(L)])
tx = x[idx].reshape(idx.shape[0],-1)
P = array([[(i-(L-1)/2)**j for i in xrange(L)] for j in xrange(Norder)])
K = lstsq(P.T,tx)[0]
s = (cov((tx-dot(P[:-1].T,K[:-1]))[1])-cov((tx-dot(P.T,K))[1]))/cov((tx-dot(P[:-1].T,K[:-1]))[1])
D = zeros_like(A)
D[:x.shape[1],:x.shape[1]] = R*pg
D[x.shape[1]:,x.shape[1]:] = R*vg
Q = D*s
return(Kalman(A,C,Q,R))
开发者ID:BIRDSLab,项目名称:temporal1form,代码行数:31,代码来源:kalman.py
示例7: get_features
def get_features(data):
X = [d[0] for d in data]
Y = [d[1] for d in data]
Z = [d[2] for d in data]
x_mean = np.mean(X)
y_mean = np.mean(Y)
z_mean = np.mean(Z)
x_var = np.var(X)
y_var = np.var(Y)
z_var = np.var(Z)
mean_magnitude = np.mean([math.sqrt(x*x + y*y +z*z) for (x,y,z) in izip(X,Y,Z)])
magnitude_mean = math.sqrt(x_mean*x_mean + y_mean*y_mean + z_mean*z_mean)
sma = np.mean([math.fabs(x) + math.fabs(y) + math.fabs(z) for (x,y,z) in izip(X,Y,Z)])
corr_xy = (np.cov(X,Y) / (math.sqrt(x_var) * math.sqrt(y_var)))[0][1]
corr_yz = (np.cov(Y,Z) / (math.sqrt(z_var) * math.sqrt(y_var)))[0][1]
corr_xz = (np.cov(Z,X) / (math.sqrt(x_var) * math.sqrt(z_var)))[0][1]
vector_d = [(x - x_mean, y - y_mean, z - z_mean) for (x,y,z) in izip(X,Y,Z)]
vector_v = [x_mean, y_mean, z_mean]
vector_p = [np.multiply((np.dot(d, vector_v)/np.dot(vector_v, vector_v)), vector_v) for d in vector_d]
vector_h = [np.subtract(d, p) for d, p in izip(vector_d, vector_p)]
mod_vector_p = [np.linalg.norm(p) for p in vector_p]
mod_vector_h = [np.linalg.norm(h) for h in vector_h]
cor_p_h = (np.cov(mod_vector_h,mod_vector_p) / (math.sqrt(np.var(mod_vector_h)) * math.sqrt(np.var(mod_vector_p))))[0][1]
vector_p = np.mean(vector_p, axis=0)
vector_h = np.mean(vector_h, axis=0)
mod_vector_p = np.mean(mod_vector_p)
mod_vector_h = np.mean(mod_vector_h)
ret = [x_mean, y_mean, z_mean, x_var, y_var, z_var, mean_magnitude, magnitude_mean, sma, corr_xy, corr_yz, corr_xz, cor_p_h, mod_vector_p, mod_vector_h]
ret.extend([x for x in vector_p])
ret.extend([x for x in vector_h])
return ret
开发者ID:siddharthsarda,项目名称:spams,代码行数:34,代码来源:extract_motion_features.py
示例8: cov_estimation
def cov_estimation(list_of_recarrays, index_name, pair_wise=False):
def get_the_other_name(rec, index_name):
assert len(rec.dtype.names) == 2
name = [nm for nm in rec.dtype.names if nm != index_name]
assert len(name) == 1
return name[0]
for array in list_of_recarrays:
array[get_the_other_name(array, index_name)] = winsorize(array[get_the_other_name(array, index_name)], 99)
nn = len(list_of_recarrays)
if not pair_wise:
new_rec = list_of_recarrays[0]
for ii in range(1, nn):
new_rec = rec_join(index_name, new_rec, list_of_recarrays[ii], jointype='inner', defaults=None, r1postfix='', r2postfix=str(ii+1))
dat_mat = np.c_[[new_rec[nm] for nm in new_rec.dtype.names if nm != index_name]]
covmat = np.cov(dat_mat)
else :
covmat = np.zeros((nn, nn))
for ii in range(0, nn):
covmat[ii,ii] = list_of_recarrays[ii][get_the_other_name(list_of_recarrays[ii], index_name)].var()
for jj in range(ii+1, nn):
new_rec = rec_join(index_name, list_of_recarrays[ii], list_of_recarrays[jj], jointype='inner', defaults=None, r1postfix='1', r2postfix='2')
dat_mat = np.c_[[new_rec[nm] for nm in new_rec.dtype.names if nm != index_name]]
tmp_cov = np.cov(dat_mat)[0,1]
covmat[ii,jj] = tmp_cov
covmat[jj,ii] = tmp_cov
return covmat
开发者ID:anlovescat,项目名称:scratch_space1,代码行数:26,代码来源:mean_variance.py
示例9: ldaTransform
def ldaTransform(data):
C0 = data[data[:, -1] == -1]
C1 = data[data[:, -1] == 1]
C0 = C0[:, :-1]
C1 = C1[:, :-1]
S0 = np.cov(np.transpose(C0))
S1 = np.cov(np.transpose(C1))
SW = S0 + S1
Mu0 = np.mean(C0, axis = 0)
Mu1 = np.mean(C1, axis = 0)
Mu = np.mean(data, axis = 0)
Mu = Mu[:-1]
Mu = np.matrix(Mu)
Mu0 = np.matrix(Mu0)
Mu1 = np.matrix(Mu1)
SB = C0.shape[0] * np.transpose(Mu0 - Mu) * (Mu0 - Mu) + C1.shape[0] * np.transpose(Mu1 - Mu) * (Mu1 - Mu)
Swin = LA.pinv(SW) #costly
Swin = np.matrix(Swin)
SwinSB = Swin * SB #costly
e, v = LA.eig(SwinSB) #costly
s = np.argsort(e)[::-1]
v = np.array(v)
ev = np.zeros(v.shape)
for i in xrange(e.shape[0]):
ev[:, i] = v[:, s[i]]
w = ev[:, 0]
w = np.matrix(w)
return w
开发者ID:abhinavmoudgil95,项目名称:smai-coursework,代码行数:28,代码来源:ldasvm.py
示例10: fit
def fit(self, data, chunks):
"""Learn the RCA model.
Parameters
----------
data : (n x d) data matrix
Each row corresponds to a single instance
chunks : (n,) array of ints
When ``chunks[i] == -1``, point i doesn't belong to any chunklet.
When ``chunks[i] == j``, point i belongs to chunklet j.
"""
data, M_pca = self._process_data(data)
chunks = np.asanyarray(chunks, dtype=int)
chunk_mask, chunked_data = _chunk_mean_centering(data, chunks)
inner_cov = np.cov(chunked_data, rowvar=0, bias=1)
dim = self._check_dimension(np.linalg.matrix_rank(inner_cov))
# Fisher Linear Discriminant projection
if dim < data.shape[1]:
total_cov = np.cov(data[chunk_mask], rowvar=0)
tmp = np.linalg.lstsq(total_cov, inner_cov)[0]
vals, vecs = np.linalg.eig(tmp)
inds = np.argsort(vals)[:dim]
A = vecs[:, inds]
inner_cov = A.T.dot(inner_cov).dot(A)
self.transformer_ = _inv_sqrtm(inner_cov).dot(A.T)
else:
self.transformer_ = _inv_sqrtm(inner_cov).T
if M_pca is not None:
self.transformer_ = self.transformer_.dot(M_pca)
return self
开发者ID:svecon,项目名称:metric-learn,代码行数:35,代码来源:rca.py
示例11: get_projection
def get_projection():
#get the matrix for raw data
cla0_matri = np.asmatrix(cla_0)
cla1_matri = np.asmatrix(cla_1)
#compute the mean for each classes
#select the 8 features
mu_0 =(cla0_matri.transpose()[:8]).mean(1)
mu_1 =(cla1_matri.transpose()[:8]).mean(1)
#print mu_0,mu_1
#compute the covariance matrix for each class
cov_0 = np.asmatrix(np.cov(cla0_matri.transpose()[:8]))
cov_1 = np.asmatrix(np.cov(cla1_matri.transpose()[:8]))
#compute the scatter matrices s0 and s1 for each class
s_0 = np.dot((len(cla0_matri)-1),cov_0)
s_1 = np.dot((len(cla1_matri)-1),cov_1)
#compute the winthin class scatter
s_w = np.add(s_0,s_1)
#compute the inverse of winthin calss scatter
inv_s = np.linalg.inv(s_w)
#get the finally optimal line direction v
dir_v = np.matrix.dot(inv_s,np.subtract(mu_0,mu_1))
print dir_v
#get the projection for all data set
proj_data = np.matrix.dot(dir_v.transpose(),((np.asmatrix(data_set)).transpose())[:8])
proj_lis = (proj_data.tolist())[0]
#adding the execlude labels to the projected data
for it in range(0,len(proj_lis)):
temp_lis = []
temp_lis.append(float(proj_lis[it]))
#adding the label
temp_lis.append(int(data_set[it][8]))
proj_data_set.append(temp_lis)
开发者ID:boy0122,项目名称:machine_learning,代码行数:32,代码来源:prb4.py
示例12: run
def run(self,X):
if self.covType == "diag":
Sigma = np.diag(np.diag(np.cov(X.T)))
elif self.covType == "full":
Sigma = np.cov(X.T)
else:
print "error"
self.mu = None
self.labels = None
n,p = X.shape
mu,pi = self._initialize(X)
iter = 0
converge = False
while iter < self.maxIter and not converge:
old_mu = mu.copy()
old_pi = pi.copy()
gamma = self._estep(X, old_mu, Sigma, old_pi)
mu,pi = self._mstep(X,gamma)
if np.sum(abs(old_mu-mu))/np.sum(abs(old_mu))<0.001:
converge=True
print("GMM algorithm converges in "+str(iter+1)+" iterations")
iter = iter + 1
if iter == self.maxIter:
print("GMM algorithm fails to converge in "+str(iter)+" iterations")
labels = [np.argmax(g) for g in gamma]
self.mu = mu
self.labels =labels
开发者ID:tonyzhang1231,项目名称:Machine-Learning-CS6316-Assignments,代码行数:30,代码来源:gmmCluster.py
示例13: stop_training
def stop_training(self, destroy_training_set = True):
self.covariance = numpy.cov(self.training_set.T)
self.mean = numpy.mean(self.training_set, axis=0)
xy = self.training_set[:,-2:]
self.xycovariance = numpy.cov(xy.T)
self.xymean = numpy.mean(xy, axis=0)
self.training_set = None
开发者ID:maxikov,项目名称:attfocus,代码行数:7,代码来源:gauss_node.py
示例14: covandcoef
def covandcoef(compare_data):
hx = []
hy = []
ox = []
oy = []
tx = []
ty = []
for i in compare_data:
hx.append(i[4])
hy.append(i[7])
for i in range(0,7):
ox.append(compare_data[i][4])
oy.append(compare_data[i][7])
for i in range(0,89):
tx.append(compare_data[i][4])
ty.append(compare_data[i][7])
X = np.vstack((hx,hy))
Z = np.vstack((ox,oy))
Y = np.vstack((tx,ty))
return [[np.cov(X)[0][1],np.corrcoef(X)[0][1]],[np.cov(Y)[0][1],np.corrcoef(Y)[0][1]],[np.cov(Z)[0][1],np.corrcoef(Z)[0][1]]]
开发者ID:t3abdulg,项目名称:Stock-Comparison,代码行数:31,代码来源:main.py
示例15: plt_1d
def plt_1d(class1, class2):
prior1 = 0.5
prior2 = 0.5
mean1 = np.array([np.mean(class1[:, 0])])
mean2 = np.array([np.mean(class2[:, 0])])
# print mean1, mean2
cov1 = np.array([[np.cov([class1[:, 0]])]])
cov2 = np.array([[np.cov([class2[:, 0]])]])
# print cov1, cov2
discriminant_function1 = gdf.gen_discriminant_function_of_normal_distribution(mean1, cov1, prior1)
discriminant_function2 = gdf.gen_discriminant_function_of_normal_distribution(mean2, cov2, prior2)
# X = np.linspace(np.amin(class1[:, 0]), np.amax(class1[:, 0]), 200)
X = np.linspace(-100, 100, 100)
y1 = [discriminant_function1(np.array([x])) for x in X]
y2 = [discriminant_function2(np.array([x])) for x in X]
plt.plot(X, y1)
plt.plot(X, y2)
plt.show()
开发者ID:stamaimer,项目名称:PatternClassification,代码行数:31,代码来源:plt_discriminant_function.py
示例16: dataNorm
def dataNorm(self):
SXX = np.cov(self.X)
U, l, Ut = LA.svd(SXX, full_matrices=True)
H = np.dot(LA.sqrtm(LA.inv(np.diag(l))),Ut)
self.nX = np.dot(H,self.X)
#print np.cov(self.nX)
#print "mean:"
#print np.mean(self.nX)
SYY = np.cov(self.Y)
U, l, Ut = LA.svd(SYY, full_matrices=True)
H = np.dot(LA.sqrtm(LA.inv(np.diag(l))),Ut)
#print "H"
#print H
self.nY = np.dot(H,self.Y)
#print np.cov(self.nY)
print "dataNorm_X:"
for i in range(len(self.nX)):
print(self.nX[i])
print("---")
print "dataNorm_Y:"
for i in range(len(self.nY)):
print(self.nY[i])
print("---")
开发者ID:cvpapero,项目名称:canotest,代码行数:27,代码来源:cca4.py
示例17: main
def main():
fnm = 'prob3.data'
data = md.read_data(fnm)
D1 = data[0:8,].T
D2 = data[8:,].T
u1 = np.matrix((np.mean(D1[0,:]), np.mean(D1[1,:]))).T
u2 = np.matrix((np.mean(D2[0,:]), np.mean(D2[1,:]))).T
sigma1 = np.asmatrix(np.cov(D1, bias=1))
sigma2 = np.asmatrix(np.cov(D1, bias=1))
g1 = discrim_func(u1, sigma1)
g2 = discrim_func(u2, sigma2)
steps = 100
x = np.linspace(-2,2,steps)
y = np.linspace(-6,6,steps)
X,Y = np.meshgrid(x,y)
z = [g1(X[r,c], Y[r,c]) - g2(X[r,c], Y[r,c])
for r in range(0,steps) for c in range(0,steps)]
Z = np.array(z)
px = X.ravel()
py = Y.ravel()
pz = Z.ravel()
gridsize = 50
plot = plt.subplot(111)
plt.hexbin(px,py,C=pz, gridsize=gridsize, cmap=cm.jet, bins=None)
cb = plt.colorbar()
cb.set_label('g1 minus g2')
return plot
开发者ID:nail82,项目名称:final_exam,代码行数:32,代码来源:problem3.py
示例18: correlation
def correlation():
df = pd.read_csv("dataset/train_new.csv")
# df = df.dropna(axis=0,how="any")
print df.describe()
# print df.head()
param=[]
correlation=[]
abs_corr=[]
covariance = []
columns = ["Applicant_Gender","App_age","Applicant_Occupation","Applicant_Qualification","Manager_age","Manager_Status","Manager_Gender","Manager_Business","Manager_Business2","Manager_Num_Application"]
for c in columns:
#Check if binary or continuous
if len(df[c].unique())<=12:
corr = spearmanr(df['Business_Sourced'],df[c])[0]
print "spear",c,corr
y = df['Business_Sourced']
x = df[c]
X = np.vstack((y,x))
covar = np.cov(X)
else:
corr = pointbiserialr(df['Business_Sourced'],df[c])[0]
print "point",c,corr
y = df['Business_Sourced']
x = df[c]
X = np.vstack((y,x))
covar = np.cov(X)
param.append(c)
correlation.append(corr)
abs_corr.append(abs(corr))
# covariance.append(covar[0][1])
print covariance
开发者ID:ayush1997,项目名称:AnalyticsVidhya_SmartRecruit,代码行数:32,代码来源:smart.py
示例19: get_stats
def get_stats(arrs,interpolatenans=False):
arrslen = len(arrs)
if DEBUG_PRINT: print "array nums:", arrslen
stats = [0] * arrslen
for i,arr in enumerate(arrs):
if(len(arrs[i].shape) > 2):
stats[i] = None
else:
maskedarr = ma.masked_array(arrs[i],fill_value=0)
if interpolatenans:
arr = interpolate_nans(arr)
else:
arr = maskedarr.filled() # check and see what happens when you interpolate
stats[i] = {'avgs':[np.mean(arr,axis=0),np.mean(arr,axis=1)],
'stdevs':[np.std(arr,axis=0),np.std(arr,axis=1)],
'cov':[0,0]}
xlen = arr.shape[0]
ylen = arr.shape[1]
# get specific covariance values along x axis
covx = np.zeros(xlen)
covar = np.cov(arr) # get the covariance values by row for dim 1
for x in range(1,xlen):
covx[x-1] = covar[x][x-1]
stats[i]['cov'][0] = covx
# get specific covariance values along y axis
covy = np.zeros(ylen)
covar = np.cov(arr,rowvar=0) # get the covariance values by col for dim 2
for y in range(1,ylen):
covy[y-1] = covar[y][y-1]
stats[i]['cov'][1] = covy
return stats
开发者ID:leibatt,项目名称:user_study,代码行数:32,代码来源:scidb_server_interface_numpy.py
示例20: test_1d_wo_missing
def test_1d_wo_missing(self):
"Test cov on 1D variable w/o missing values"
x = self.data
assert_almost_equal(np.cov(x), cov(x))
assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False))
assert_almost_equal(np.cov(x, rowvar=False, bias=True),
cov(x, rowvar=False, bias=True))
开发者ID:Alanchi,项目名称:numpy,代码行数:7,代码来源:test_extras.py
注:本文中的numpy.cov函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论