本文整理汇总了Python中scipy.linalg.svd函数的典型用法代码示例。如果您正苦于以下问题:Python svd函数的具体用法?Python svd怎么用?Python svd使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了svd函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: normalise_site_svd
def normalise_site_svd(self, site, left=True):
'''
Normalise the MP at a given site. Use the SVD method,
arXiv:1008.3477, sec. 4.1.3
'''
if (left and site == L) or (not left and site == 1):
self.normalise_extremal_site(site, left=left)
return
M_asb = self.mps[site] # the matrix we have to work on, M_site
r, s, c = M_asb.shape # row, spin, column
if left:
M_aab = M_asb.reshape((r * s, c)) # merge first two indices
U, S, Vh = la.svd(M_aab, full_matrices=False)
self.mps[site] = U.reshape((r, s, U.size // (r * s))) # new A_site
# Contract over index b: (S @ Vh)_ab (X) (M_site+1)_bsc
SVh = np.dot(S, Vh)
self.mps[site+1] = np.tensordot(SVh, self.mps[site+1], ((1),(0)))
else:
M_aab = M_asb.reshape((r, s * c)) # merge last two indices
U, S, Vh = la.svd(M_aab, full_matrices=False)
self.mps[site] = Vh.reshape((Vh.size // (s * c), s, c)) # new B
# Contract over index b: (M_site-1)_asb (X) (U @ S)_bc
US = np.dot(U, S)
self.mps[site-1] = np.tensordot(self.mps[site-1], US, ((2),(0)))
return
开发者ID:japs,项目名称:mpys,代码行数:30,代码来源:libmps.py
示例2: mixed_canonicalise
def mixed_canonicalise(site):
'''
Put the MPS in mixed canonical form.
Sites 1...site are left-normalised, sites site...L are
right-normalised. The singular values are stored in self.S and
returned.
'''
if site > self.left_normalised:
# gotta left-normalise sites left_normalised...site
# left qr up to site-1
for _s in range(self.left_normalised+1, site):
self.normalise_site_qr(_s, left=True)
# svd at site
Msite = self.mps[site]
r, s, c = Msite.shape
Msite = Msite.reshape((r * s, c))
U, self.S, Vh = la.svd(Msite, full_matrices=False)
self.mps[site] = U.reshape((r, s, U.size // (r * s))) # new A_site
self.mps[site+1] = np.tensordot(Vh, self.mps[site+1], ((1),(0)))
else:
# gotta right-normalise sites right_normalised...site+1
# right qr up to site+2
for _s in range(self.right_normalised, site+1):
self.normalise_site_qr(_s, left=False)
# svd at site+1
Msite = self.mps[site+1]
r, s, c = Msite.shape
Msite = Msite.reshape((r, s * c))
U, self.S, Vh = la.svd(Msite, full_matrices=False)
self.mps[site+1] = Vh.reshape((Vh // (s * c), s,c)) #new B
self.mps[site] = np.tensordot(self.mps[site], U, ((1),(0)))
return self.S
开发者ID:japs,项目名称:mpys,代码行数:34,代码来源:libmps.py
示例3: ideal_data
def ideal_data(num, dimU, dimY, dimX, noise=1):
"""Linear system data"""
# generate randomized linear system matrices
A = randn(dimX, dimX)
B = randn(dimX, dimU)
C = randn(dimY, dimX)
D = randn(dimY, dimU)
# make sure state evolution is stable
U, S, V = svd(A)
A = dot(U, dot(diag(S / max(S)), V))
U, S, V = svd(B)
S2 = zeros((size(U,1), size(V,0)))
S2[:,:size(U,1)] = diag(S / max(S))
B = dot(U, dot(S2, V))
# random input
U = randn(num, dimU)
# initial state
X = reshape(randn(dimX), (1,-1))
# initial output
Y = reshape(dot(C, X[-1]) + dot(D, U[0]), (1,-1))
# generate next state
X = concatenate((X, reshape(dot(A, X[-1]) + dot(B, U[0]), (1,-1))))
# and so forth
for u in U[1:]:
Y = concatenate((Y, reshape(dot(C, X[-1]) + dot(D, u), (1,-1))))
X = concatenate((X, reshape(dot(A, X[-1]) + dot(B, u), (1,-1))))
return U, Y + randn(num, dimY) * noise
开发者ID:riscy,项目名称:mllm,代码行数:34,代码来源:system_identifier.py
示例4: randomized_svd
def randomized_svd(A, rank=None, power_iter=2, k=50, p=50, block_krylov=True, l=0, use_id=False):
"""
:param rank: If None, adaptive range finder is used to detect the rank.
:param power_iter: Number of power iterations.
:param k: Initial estimate of rank, if adaptive range finder is used.
:param p: Batch size in the incremental steps of adaptive range finder.
:param l: Number of overestimated columns in computing Q.
"""
if rank is None:
Q = adaptive_range(A, k=k, p=p, power_iter_k=power_iter)
else:
Q = randomized_range(A, rank, power_iter=power_iter, block_krylov=block_krylov, l=l)
if rank is None:
rank = Q.shape[1]
if use_id:
J, P = ID_row(Q, rank)
W, R = la.qr(A[J, :].T, mode='economic')
Q = P.dot(R.T)
U, sigma, VT = la.svd(Q, full_matrices=False)
V = VT.dot(W.T)
return U[:, :rank], sigma[:rank], V[:rank, :]
M = Q.T.dot(A)
U, sigma, VT = la.svd(M, full_matrices=False)
U = Q.dot(U)
return U[:, :rank], sigma[:rank], VT[:rank, :]
开发者ID:daeyun,项目名称:randomized_svd,代码行数:28,代码来源:randomized_svd.py
示例5: _compute_subcorr
def _compute_subcorr(G, phi_sig):
"""Compute the subspace correlation."""
Ug, Sg, Vg = linalg.svd(G, full_matrices=False)
tmp = np.dot(Ug.T.conjugate(), phi_sig)
Uc, Sc, Vc = linalg.svd(tmp, full_matrices=False)
X = np.dot(np.dot(Vg.T, np.diag(1. / Sg)), Uc) # subcorr
return Sc[0], X[:, 0] / linalg.norm(X[:, 0])
开发者ID:hoechenberger,项目名称:mne-python,代码行数:7,代码来源:_rap_music.py
示例6: compute_bench
def compute_bench(data_gen, samples_range, features_range, q=3):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print '===================='
print 'Iteration %03d of %03d' % (it, max_it)
print '===================='
X = make_data(n_samples, n_features)
rank = min(n_samples, n_samples) / 10 + 1
gc.collect()
print "benching scipy svd: "
tstart = time()
svd(X, full_matrices=False)
results['scipy svd'].append(time() - tstart)
gc.collect()
print "benching scikit-learn fast_svd: q=0"
tstart = time()
fast_svd(X, rank, q=0)
results['scikit-learn fast_svd (q=0)'].append(time() - tstart)
gc.collect()
print "benching scikit-learn fast_svd: q=%d " % q
tstart = time()
fast_svd(X, rank, q=q)
results['scikit-learn fast_svd (q=%d)' % q].append(time() - tstart)
return results
开发者ID:AnneLaureF,项目名称:scikit-learn,代码行数:35,代码来源:plot_bench_svd.py
示例7: setdata
def setdata(self, X, V):
A = self.bialtprodeye(2*self.F.J_coords)
"""Note: p, q <= min(n,m)"""
self.data.Brand = 2*(random((A.shape[0],self.data.p))-0.5)
self.data.Crand = 2*(random((A.shape[1],self.data.q))-0.5)
self.data.B = zeros((A.shape[0],self.data.p), Float)
self.data.C = zeros((A.shape[1],self.data.q), Float)
self.data.D = zeros((self.data.q,self.data.p), Float)
U, S, Vh = linalg.svd(A)
self.data.b = U[:,-1:]
self.data.c = num_transpose(Vh)[:,-1:]
if self.update:
self.data.B[:,1] = self.data.b
self.data.C[:,1] = self.data.c
U2, S2, Vh2 = linalg.svd(c_[r_[A, transpose(self.data.C[:,1])], r_[self.data.B[:,1], [[0]]]])
self.data.B[:,2] = U2[0:A.shape[0],-1:]
self.data.C[:,2] = num_transpose(Vh2)[0:A.shape[1],-1:]
self.data.D[0,1] = U2[A.shape[0],-1]
self.data.D[1,0] = num_transpose(Vh2)[A.shape[1],-1]
else:
self.data.B = self.data.Brand
self.data.C = self.data.Crand
开发者ID:BenjaminBerhault,项目名称:Python_Classes4MAD,代码行数:26,代码来源:TestFunc.py
示例8: __init__
def __init__(self,data,cov_matrix=False,loc=None):
"""Parameters
----------
data : array of data, shape=(number points,number dim)
If cov_matrix is True then data is the covariance matrix (see below)
Keywords
--------
cov_matrix : bool (optional)
If True data is treated as a covariance matrix with shape=(number dim, number dim)
loc : the mean of the data if a covarinace matrix is given, shape=(number dim)
"""
if cov_matrix:
self.dim=data.shape[0]
self.n=None
self.data_t=None
self.mu=loc
self.evec,eval,V=sl.svd(data,full_matrices=False)
self.sigma=sqrt(eval)
self.Sigma=diag(1./self.sigma)
self.B=dot(self.evec,self.Sigma)
self.Binv=sl.inv(self.B)
else:
self.n,self.dim=data.shape #the shape of input data
self.mu=data.mean(axis=0) #the mean of the data
self.data_t=data-self.mu #remove the mean
self.evec,eval,V=sl.svd(self.data_t.T,full_matrices=False) #get the eigenvectors (axes of the ellipsoid)
data_p=dot(self.data_t,self.evec) #project the data onto the eigenvectors
self.sigma=data_p.std(axis=0) #get the spread of the distribution (the axis ratos for the ellipsoid)
self.Sigma=diag(1./self.sigma) #the eigenvalue matrix for the ellipsoid equation
self.B=dot(self.evec,self.Sigma) #used in the ellipsoid equation
self.Binv=sl.inv(self.B) #also useful to have around
开发者ID:cmp346,项目名称:densityplot,代码行数:32,代码来源:error_ellipse.py
示例9: rcca
def rcca(X, Y, reg):
X = np.array(X)
Y = np.array(Y)
n, p = X.shape
n, q = Y.shape
# zero mean
X = X - X.mean(axis=0)
Y = Y - Y.mean(axis=0)
# covariances
S = np.cov(X.T, Y.T, bias=1)
SXX = S[:p,:p]
SYY = S[p:,p:]
SXY = S[:p,p:]
ux, lx, vxt = SLA.svd(SXX)
#uy, ly, vyt = SLA.svd(SYY)
#print lx
#正則化
Rg = np.diag(np.ones(p)*reg)
SXX = SXX + Rg
SYY = SYY + Rg
sqx = SLA.sqrtm(SLA.inv(SXX)) # SXX^(-1/2)
sqy = SLA.sqrtm(SLA.inv(SYY)) # SYY^(-1/2)
M = np.dot(np.dot(sqx, SXY), sqy.T) # SXX^(-1/2) * SXY * SYY^(-T/2)
A, r, Bh = SLA.svd(M, full_matrices=False)
B = Bh.T
#r = self.reg*r
return r[0], lx[0], lx[0]
开发者ID:cvpapero16,项目名称:pose_cca,代码行数:33,代码来源:rcca_test.py
示例10: compute_fundamental
def compute_fundamental(x1,x2):
""" 正規化8点法を使って対応点群(x1,x2:3*nの配列)
から基礎行列を計算する。各列は次のような並びである。
[x'*x, x'*y, x', y'*x, y'*y, y', x, y, 1] """
n = x1.shape[1]
if x2.shape[1] != n:
raise ValueError("Number of points don't match.")
# 方程式の行列を作成する
A = zeros((n,9))
for i in range(n):
A[i] = [x1[0,i]*x2[0,i], x1[0,i]*x2[1,i], x1[0,i]*x2[2,i],
x1[1,i]*x2[0,i], x1[1,i]*x2[1,i], x1[1,i]*x2[2,i],
x1[2,i]*x2[0,i], x1[2,i]*x2[1,i], x1[2,i]*x2[2,i] ]
# 線形最小2乗法で計算する
U,S,V = linalg.svd(A)
F = V[-1].reshape(3,3)
# Fの制約
# 最後の特異値を0にして階数を2にする
U,S,V = linalg.svd(F)
S[2] = 0
F = dot(U,dot(diag(S),V))
return F
开发者ID:Hironsan,项目名称:ComputerVision,代码行数:27,代码来源:sfm.py
示例11: dataNorm
def dataNorm(self):
SXX = np.cov(self.X)
U, l, Ut = LA.svd(SXX, full_matrices=True)
H = np.dot(LA.sqrtm(LA.inv(np.diag(l))),Ut)
self.nX = np.dot(H,self.X)
#print np.cov(self.nX)
#print "mean:"
#print np.mean(self.nX)
SYY = np.cov(self.Y)
U, l, Ut = LA.svd(SYY, full_matrices=True)
H = np.dot(LA.sqrtm(LA.inv(np.diag(l))),Ut)
#print "H"
#print H
self.nY = np.dot(H,self.Y)
#print np.cov(self.nY)
print "dataNorm_X:"
for i in range(len(self.nX)):
print(self.nX[i])
print("---")
print "dataNorm_Y:"
for i in range(len(self.nY)):
print(self.nY[i])
print("---")
开发者ID:cvpapero,项目名称:canotest,代码行数:27,代码来源:cca4.py
示例12: geigen
def geigen(Amat, Bmat, Cmat):
"""
generalized eigenvalue problem of the form
max tr L'AM / sqrt(tr L'BL tr M'CM) w.r.t. L and M
:param Amat numpy ndarray of shape (M,N)
:param Bmat numpy ndarray of shape (M,N)
:param Bmat numpy ndarray of shape (M,N)
:rtype: numpy ndarray
:return values: eigenvalues
:return Lmat: left eigenvectors
:return Mmat: right eigenvectors
"""
if Bmat.shape[0] != Bmat.shape[1]:
print("BMAT is not square.\n")
sys.exit(1)
if Cmat.shape[0] != Cmat.shape[1]:
print("CMAT is not square.\n")
sys.exit(1)
p = Bmat.shape[0]
q = Cmat.shape[0]
s = min(p, q)
tmp = fabs(Bmat - Bmat.transpose())
tmp1 = fabs(Bmat)
if tmp.max() / tmp1.max() > 1e-10:
print("BMAT not symmetric..\n")
sys.exit(1)
tmp = fabs(Cmat - Cmat.transpose())
tmp1 = fabs(Cmat)
if tmp.max() / tmp1.max() > 1e-10:
print("CMAT not symmetric..\n")
sys.exit(1)
Bmat = (Bmat + Bmat.transpose()) / 2.
Cmat = (Cmat + Cmat.transpose()) / 2.
Bfac = cholesky(Bmat)
Cfac = cholesky(Cmat)
Bfacinv = inv(Bfac)
Bfacinvt = Bfacinv.transpose()
Cfacinv = inv(Cfac)
Dmat = Bfacinvt.dot(Amat).dot(Cfacinv)
if p >= q:
u, d, v = svd(Dmat)
values = d
Lmat = Bfacinv.dot(u)
Mmat = Cfacinv.dot(v.transpose())
else:
u, d, v = svd(Dmat.transpose())
values = d
Lmat = Bfacinv.dot(u)
Mmat = Cfacinv.dot(v.transpose())
return values, Lmat, Mmat
开发者ID:glemaitre,项目名称:fdasrsf,代码行数:60,代码来源:utility_functions.py
示例13: addblock_svd_update
def addblock_svd_update( Uarg, Sarg, Varg, Aarg, force_orth = False):
U = Varg
V = Uarg
S = np.eye(len(Sarg),len(Sarg))*Sarg
A = Aarg.T
current_rank = U.shape[1]
m = np.dot(U.T,A)
p = A - np.dot(U,m)
P = lin.orth(p)
Ra = np.dot(P.T,p)
z = np.zeros(m.shape)
K = np.vstack(( np.hstack((S,m)), np.hstack((z.T,Ra)) ))
tUp,tSp,tVp = lin.svd(K);
tUp = tUp[:,:current_rank]
tSp = np.diag(tSp[:current_rank])
tVp = tVp[:,:current_rank]
Sp = tSp
Up = np.dot(np.hstack((U,P)),tUp)
Vp = np.dot(V,tVp[:current_rank,:])
Vp = np.vstack((Vp, tVp[current_rank:tVp.shape[0], :]))
if force_orth:
UQ,UR = lin.qr(Up,mode='economic')
VQ,VR = lin.qr(Vp,mode='economic')
tUp,tSp,tVp = lin.svd( np.dot(np.dot(UR,Sp),VR.T));
tSp = np.diag(tSp)
Up = np.dot(UQ,tUp)
Vp = np.dot(VQ,tVp)
Sp = tSp;
Up1 = Vp;
Vp1 = Up;
return Up1,Sp,Vp1
开发者ID:burakbayramli,项目名称:classnotes,代码行数:35,代码来源:isvd.py
示例14: compute_fundamental
def compute_fundamental(x1,x2):
""" Computes the fundamental matrix from corresponding points
(x1,x2 3*n arrays) using the 8 point algorithm.
Each row in the A matrix below is constructed as
[x'*x, x'*y, x', y'*x, y'*y, y', x, y, 1] """
n = x1.shape[1]
if x2.shape[1] != n:
raise ValueError("Number of points don't match.")
# build matrix for equations
A = zeros((n,9))
for i in range(n):
A[i] = [x1[0,i]*x2[0,i], x1[0,i]*x2[1,i], x1[0,i]*x2[2,i],
x1[1,i]*x2[0,i], x1[1,i]*x2[1,i], x1[1,i]*x2[2,i],
x1[2,i]*x2[0,i], x1[2,i]*x2[1,i], x1[2,i]*x2[2,i] ]
# compute linear least square solution
U,S,V = linalg.svd(A)
F = V[-1].reshape(3,3)
# constrain F
# make rank 2 by zeroing out last singular value
U,S,V = linalg.svd(F)
S[2] = 0
F = dot(U,dot(diag(S),V))
return F/F[2,2]
开发者ID:iCamera,项目名称:OfisEye,代码行数:28,代码来源:sfm.py
示例15: rpca
def rpca(M, eps=0.001, r=1):
'''
An implementation of the robust pca algorithm as
outlined in [need reference]
'''
assert(len(M.shape) == 2)
m,n = M.shape
s = linalg.svd(M, compute_uv=False)
B = 1 / np.sqrt(n) # threshold parameter
thresh = B * s[0]
# Initial Low Rank Component
L = np.zeros(M.shape)
# Initial Sparse Component
S = HT(M - L, thresh)
iterations = range(int(10 * np.log(n * B * frob_norm(M - S) / eps)))
logger.info('Number of iterations: %d to achieve eps = %f' % (len(iterations), eps))
for k in xrange(1, r+1):
for t in iterations:
U,s,Vt = linalg.svd(M - S, full_matrices=False)
thresh = B * ( s[k] + s[k-1] * (1/2)**t )
# Best rank k approximation of M - S
L = np.dot(np.dot(U[:,:k], np.diag(s[:k])), Vt[:k])
S = HT(M - L, thresh)
if (B * s[k]) < (eps / (2*n)):
break
return L,S
开发者ID:afraenkel,项目名称:rpyca,代码行数:35,代码来源:rpca.py
示例16: taskFive
def taskFive(n,R,G,B,w,h):
Ur, Sr, VrT = svd(R.T, full_matrices=False)
Ug, Sg, VgT = svd(G.T, full_matrices=False)
Ub, Sb, VbT = svd(B.T, full_matrices=False)
Ur = Ur[:,0:n]
Sr = Sr[0:n]
VrT = VrT[0:n,:]
Ug = Ug[:,0:n]
Sg = Sg[0:n]
VgT = VgT[0:n,:]
Ub = Ub[:,0:n]
Sb = Sb[0:n]
VbT = VbT[0:n,:]
newim = Image.new('RGB',(w,h))
R = numpy.dot(numpy.dot(Ur,numpy.diag(Sr)),VrT)
R = numpy.array(R.T.flatten(),dtype=int)
G = numpy.dot(numpy.dot(Ug,numpy.diag(Sg)),VgT)
G = numpy.array(G.T.flatten(),dtype=int)
B = numpy.dot(numpy.dot(Ub,numpy.diag(Sb)),VbT)
B = numpy.array(B.T.flatten(),dtype=int)
t = zip(R,G,B)
newim.putdata(t)
"""newim.show()"""
newim.save("large"+str(n)+".tif")
开发者ID:chaoxia1,项目名称:EIA,代码行数:25,代码来源:imsvd.py
示例17: fit
def fit(self, X, **params):
"""Fit the model to the data X"""
self._set_params(**params)
X = np.atleast_2d(X)
n_samples = X.shape[0]
if self.copy:
X = X.copy()
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if self.do_fast_svd:
if self.n_comp == "mle" or self.n_comp is None:
warnings.warn('All components are to be computed'
'Not using fast truncated SVD')
U, S, V = linalg.svd(X, full_matrices=False)
else:
U, S, V = fast_svd(X, self.n_comp, q=self.iterated_power)
else:
U, S, V = linalg.svd(X, full_matrices=False)
self.explained_variance_ = (S ** 2) / n_samples
self.explained_variance_ratio_ = self.explained_variance_ / \
self.explained_variance_.sum()
self.components_ = V.T
if self.n_comp=='mle':
self.n_comp = _infer_dimension_(self.explained_variance_,
n_samples, X.shape[1])
if self.n_comp is not None:
self.components_ = self.components_[:, :self.n_comp]
self.explained_variance_ = self.explained_variance_[:self.n_comp]
return self
开发者ID:omitevski,项目名称:scikit-learn,代码行数:32,代码来源:pca.py
示例18: hosvd2
def hosvd2(T, saveSpace=False):
"""HOSVD N-mode SVD decomposition of N-way tensor
(Z, Un, Sn, Vn) = HOSVD(T) decomposes the N-way tensor D into N
orthogonal matrices stored in Un and a core tensor Z. Also
returns the n Tucker1 results in Sn and Vn if saveSpace=False (default)
Author: Larsson Omberg <[email protected]>
Date: 11-NOV-2007, 28-February-2008, 5-June-2009, 4-Nov-2010"""
#Find orthogonal matrices
Un=[]; Vn=[]; Sn=[]
for n in range(T.ndim):
Tn = flatten(T, n+1); #FIX
if Tn.shape[1] < Tn.shape[0]:
[U,S,V] = svd(Tn,0);
V=V.T
else:
[V,S,U] = svd(Tn.T,0);
U=U.T
Un.append(U);
if not saveSpace:
Vn.append(V);
Sn.append(S);
Z=T.copy()
for n in range(len(Un)):
Z = nmodmult(Z, Un[n].T, n+1)
for i in range(len(Un)):
new_T = nmodmult(Z, Un[n], n+1)
if not saveSpace:
return [new_T, Z, Un, Sn, Vn]
return [Z, Un]
开发者ID:pengyuan,项目名称:markov2tensor,代码行数:33,代码来源:tensor_old.py
示例19: _solve_svd
def _solve_svd(self, X, y):
"""SVD solver.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
"""
n_samples, n_features = X.shape
n_classes = len(self.classes_)
self.means_ = _class_means(X, y)
if self.store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for idx, group in enumerate(self.classes_):
Xg = X[y == group, :]
Xc.append(Xg - self.means_[idx])
self.xbar_ = np.dot(self.priors_, self.means_)
Xc = np.concatenate(Xc, axis=0)
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear.")
# Scaling of within covariance is: V' 1/S
scalings = (V[:rank] / std).T / S[:rank]
# 3) Between variance scaling
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(self.means_ - self.xbar_).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use SVD to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
rank = np.sum(S > self.tol * S[0])
self.scalings_ = np.dot(scalings, V.T[:, :rank])
coef = np.dot(self.means_ - self.xbar_, self.scalings_)
self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1)
+ np.log(self.priors_))
self.coef_ = np.dot(coef, self.scalings_.T)
self.intercept_ -= np.dot(self.xbar_, self.coef_.T)
开发者ID:ElDeveloper,项目名称:scikit-learn,代码行数:60,代码来源:discriminant_analysis.py
示例20: _tucker3
def _tucker3(X, n_components, tol, max_iter, init_type, random_state=None):
"""
3 dimensional Tucker decomposition.
This code is meant to be a tutorial/testing example... in general _tuckerN
should be more compact and equivalent mathematically.
"""
if len(X.shape) != 3:
raise ValueError("Tucker3 decomposition only supports 3 dimensions!")
if init_type == "random":
A, B, C = _random_init(X, n_components, random_state)
elif init_type == "hosvd":
A, B, C = _hosvd_init(X, n_components)
err = 1E10
X_sq = np.sum(X ** 2)
for itr in range(max_iter):
err_old = err
U, S, V = linalg.svd(matricize(X, 0).dot(np.kron(C, B)),
full_matrices=False)
A = U[:, :n_components]
U, S, V = linalg.svd(matricize(X, 1).dot(np.kron(C, A)),
full_matrices=False)
B = U[:, :n_components]
U, S, V = linalg.svd(matricize(X, 2).dot(np.kron(B, A)),
full_matrices=False)
C = U[:, :n_components]
G = tmult(tmult(tmult(X, A.T, 0), B.T, 1), C.T, 2)
err = np.sum(G ** 2) - X_sq
thresh = np.abs(err - err_old) / err_old
if thresh < tol:
break
return G, A, B, C
开发者ID:pengyuan,项目名称:markov2tensor,代码行数:35,代码来源:decomposition.py
注:本文中的scipy.linalg.svd函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论