本文整理汇总了Python中scipy.dot函数的典型用法代码示例。如果您正苦于以下问题:Python dot函数的具体用法?Python dot怎么用?Python dot使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了dot函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: dot_fromfeatures
def dot_fromfeatures(features1,
features2 = None):
if features2 is None:
features2 = features1
npoints1 = features1.shape[0]
npoints2 = features2.shape[0]
features1.shape = npoints1, -1
features2.shape = npoints2, -1
ndims = features1.shape[1]
assert(features2.shape[1] == ndims)
if ndims < DOT_MAX_NDIMS:
out = sp.dot(features1, features2.T)
else:
out = sp.dot(features1[:,:DOT_MAX_NDIMS],
features2[:,:DOT_MAX_NDIMS].T)
ndims_done = DOT_MAX_NDIMS
while ndims_done < ndims:
out += sp.dot(features1[:,ndims_done:ndims_done+DOT_MAX_NDIMS],
features2[:,ndims_done:ndims_done+DOT_MAX_NDIMS].T)
ndims_done += DOT_MAX_NDIMS
return out
开发者ID:jaberg,项目名称:sclas,代码行数:27,代码来源:kernel_generate_fromcsv.py
示例2: calcInvFisher
def calcInvFisher(sigma, invSigma=None, factorSigma=None):
""" Efficiently compute the exact inverse of the FIM of a Gaussian.
Returns a list of the diagonal blocks. """
if invSigma == None:
invSigma = inv(sigma)
if factorSigma == None:
factorSigma = cholesky(sigma)
dim = sigma.shape[0]
invF = [mat(1 / (invSigma[-1, -1] + factorSigma[-1, -1] ** -2))]
invD = 1 / invSigma[-1, -1]
for k in reversed(list(range(dim - 1))):
v = invSigma[k + 1:, k]
w = invSigma[k, k]
wr = w + factorSigma[k, k] ** -2
u = dot(invD, v)
s = dot(v, u)
q = 1 / (w - s)
qr = 1 / (wr - s)
t = -(1 + q * s) / w
tr = -(1 + qr * s) / wr
invF.append(blockCombine([[qr, tr * u], [mat(tr * u).T, invD + qr * outer(u, u)]]))
invD = blockCombine([[q , t * u], [mat(t * u).T, invD + q * outer(u, u)]])
invF.append(sigma)
invF.reverse()
return invF
开发者ID:Angeliqe,项目名称:pybrain,代码行数:27,代码来源:fisher.py
示例3: fgmres
def fgmres(self,rhs,tol=1e-6,restrt=None,maxiter=None,callback=None):
if maxiter == None:
maxiter = len(rhs)
if restrt == None:
restrt = 2*maxiter
# implemented as in [Saad, 1993]
# start
x = zeros(len(rhs))
H = zeros((restrt+1, restrt))
V = zeros((len(rhs),restrt))
Z = zeros((len(rhs),restrt))
# Arnoldi process (with modified Gramm-Schmidt)
res = 1.
j = 0
r = rhs - self.point.matvec(x)
beta = norm(r)
V[:,0]=r/beta
while j < maxiter and res > tol:
Z[:,j] = self.point.psolve(V[:,j])
w = self.point.matvec(Z[:,j])
for i in range(j+1):
H[i,j]=dot(w,V[:,i])
w = w - H[i,j]*V[:,i]
H[j+1,j] = norm(w)
V[:,j+1]=w/H[j+1,j]
e = zeros(j+2)
e[0]=1.
y, res, rank, sing_val = lstsq(H[:j+2,:j+1],beta*e)
j += 1
print "# GMRES| iteration :", j, "res: ", res/beta
self.resid = r_[self.resid,res/beta]
Zy = dot(Z[:,:j],y)
x = x + Zy
info = 1
return (x,info)
开发者ID:pvnuffel,项目名称:fokkerplanck,代码行数:35,代码来源:GMRESLinearSolver.py
示例4: Areml_K_grad_i
def Areml_K_grad_i(self,i):
i = self.covar._actindex2index(i)
R = sp.dot(self.WcCtildeLcA_o_WrRF(i).T, self.dWLW())
R+= R.T
R+= -self.ALcCtildeLcA_o_FRF(i)
R+= -sp.dot(self.dWLW().T, self.Cbar_o_Sr_dWLW(i))
return R
开发者ID:mennowitteveen,项目名称:limix,代码行数:7,代码来源:gp2kronSumLR.py
示例5: rlsloo_ll1
def rlsloo_ll1( V, D, Y, lambd):
"""
Computes cs and the actual LOO errors for a single value of lambda. (lambd)
"""
n = V.shape[0]
cl = Y.shape[1]
inner = 1/(D + lambd)
inner = inner.conj()
VtY = sp.dot(V.T, Y)
VtY = VtY.conj()
# Because of signs of D are flipped (scipy.linalg.eig returns
# flipped signs for complex part of the eigenvalues)
in_dot = sp.ones((n,1)) * inner
ViD = V * in_dot
cs = sp.dot(ViD, VtY)
dGi = sp.sum(ViD*V, axis = 1)
# -- till here works fine
#check matrix dimensions
looerrs = cs.ravel()/sp.real(dGi.ravel())
looerrs = sp.real(looerrs)
cs = sp.real(cs.transpose())
return cs.ravel(), looerrs
开发者ID:abhijitbendale,项目名称:rls-lab,代码行数:25,代码来源:non_linear_rls.py
示例6: _backwardImplementation
def _backwardImplementation(self, outerr, inerr, outbuf, inbuf):
if self.onesigma:
# algorithm for one global sigma for all mu's
expln_params = expln(self.params)
sumxsquared = dot(self.state, self.state)
self._derivs += (
sum((outbuf - inbuf) ** 2 - expln_params ** 2 * sumxsquared) / expln_params * explnPrime(self.params)
)
inerr[:] = outbuf - inbuf
if not self.autoalpha and sumxsquared != 0:
inerr /= expln_params ** 2 * sumxsquared
self._derivs /= expln_params ** 2 * sumxsquared
else:
# Algorithm for seperate sigma for each mu
expln_params = expln(self.params).reshape(len(outbuf), len(self.state))
explnPrime_params = explnPrime(self.params).reshape(len(outbuf), len(self.state))
idx = 0
for j in xrange(len(outbuf)):
sigma_subst2 = dot(self.state ** 2, expln_params[j, :] ** 2)
for i in xrange(len(self.state)):
self._derivs[idx] = (
((outbuf[j] - inbuf[j]) ** 2 - sigma_subst2)
/ sigma_subst2
* self.state[i] ** 2
* expln_params[j, i]
* explnPrime_params[j, i]
)
if self.autoalpha and sigma_subst2 != 0:
self._derivs[idx] /= sigma_subst2
idx += 1
inerr[j] = outbuf[j] - inbuf[j]
if not self.autoalpha and sigma_subst2 != 0:
inerr[j] /= sigma_subst2
开发者ID:avain,项目名称:pybrain,代码行数:35,代码来源:statedependentlayer.py
示例7: _LML_covar
def _LML_covar(self, hyperparams):
"""
log marginal likelihood contributions from covariance hyperparameters
"""
try:
KV = self.get_covariances(hyperparams)
except linalg.LinAlgError:
LG.error("exception caught (%s)" % (str(hyperparams)))
return 1E6
#all in one go
#negative log marginal likelihood, see derivations
lquad = 0.5* (KV['y_rot']*KV['Si']*KV['y_rot']).sum()
ldet = 0.5*-SP.log(KV['Si'][:,:]).sum()
LML = 0.5*self.n*self.d * SP.log(2*SP.pi) + lquad + ldet
if VERBOSE:
#1. slow and explicit way
lmls_ = SP.zeros([self.d])
for i in xrange(self.d):
_y = self.y[:,i]
sigma2 = SP.exp(2*hyperparams['lik'])
_K = KV['K'] + SP.diag(KV['Knoise'][:,i])
_Ki = SP.linalg.inv(_K)
lquad_ = 0.5 * SP.dot(_y,SP.dot(_Ki,_y))
ldet_ = 0.5 * SP.log(SP.linalg.det(_K))
lmls_[i] = 0.5 * self.n* SP.log(2*SP.pi) + lquad_ + ldet_
assert SP.absolute(lmls_.sum()-LML)<1E-3, 'outch'
return LML
开发者ID:AngelBerihuete,项目名称:pygp,代码行数:30,代码来源:gplvm_ard.py
示例8: fastsvd
def fastsvd(M):
""" Fast Singular Value Decomposition
Inputs:
M -- 2d numpy array
Outputs:
U,S,V -- see scipy.linalg.svd
"""
h, w = M.shape
# -- thin matrix
if h >= w:
# subspace of M'M
U, S, V = N.linalg.svd(N.dot(M.T, M))
U = N.dot(M, V.T)
# normalize
for i in xrange(w):
S[i] = fastnorm(U[:,i])
U[:,i] = U[:,i] / S[i]
# -- fat matrix
else:
# subspace of MM'
U, S, V = N.linalg.svd(N.dot(M, M.T))
V = N.dot(U.T, M)
# normalize
for i in xrange(h):
S[i] = fastnorm(V[i])
V[i,:] = V[i] / S[i]
return U, S, V
开发者ID:npinto,项目名称:v1s-0.0.4_scene,代码行数:34,代码来源:v1s_math.py
示例9: lsaTransform
def lsaTransform(self,dimensions=1):
""" Calculate SVD of objects matrix: U . SIGMA . VT = MATRIX
Reduce the dimension of sigma by specified factor producing sigma'.
Then dot product the matrices: U . SIGMA' . VT = MATRIX'
"""
rows,cols= self.matrix.shape
if dimensions <= rows: #Its a valid reduction
#Sigma comes out as a list rather than a matrix
u,sigma,vt = linalg.svd(self.matrix)
#Dimension reduction, build SIGMA'
for index in xrange(rows-dimensions, rows):
sigma[index]=0
#print linalg.diagsvd(sigma,len(self.matrix), len(vt))
#Reconstruct MATRIX'
reconstructedMatrix= dot(dot(u,linalg.diagsvd(sigma,len(self.matrix),len(vt))),vt)
#Save transform
self.matrix=reconstructedMatrix
else:
print "dimension reduction cannot be greater than %s" % rows
开发者ID:baokhanh2710,项目名称:news-reader,代码行数:26,代码来源:lsa.py
示例10: get_stderr_fit
def get_stderr_fit(f,Xdata,popt,pcov):
Y=f(Xdata,popt)
listdY=[]
for i in xrange(len(popt)):
p=popt[i]
dp=abs(p)/1e6+1e-20
popt[i]+=dp
Yi=f(Xdata,popt)
dY=(Yi-Y)/dp
listdY.append(dY)
popt[i]-=dp
listdY=scipy.array(listdY)
#listdY is an array with N rows and M columns, N=len(popt), M=len(xdata[0])
#pcov is an array with N rows and N columns
left=scipy.dot(listdY.T,pcov)
#left is an array of M rows and N columns
right=scipy.dot(left,listdY)
#right is an array of M rows and M columns
sigma2y=right.diagonal()
#sigma2y is standard error of fit and function of X
mean_sigma2y=scipy.mean(right.diagonal())
M=Xdata.shape[1];print M
N=len(popt);print N
avg_stddev_data=scipy.sqrt(M*mean_sigma2y/N)
#this is because if exp error is constant at sig_dat,then mean_sigma2y=N/M*sig_dat**2
sigmay=scipy.sqrt(sigma2y)
return sigmay,avg_stddev_data
开发者ID:OmkarMehta,项目名称:Endsem_final,代码行数:27,代码来源:errorestimation_tutorial.py
示例11: _learnStep
def _learnStep(self):
""" Main part of the algorithm. """
I = eye(self.numParameters)
self._produceSamples()
utilities = self.shapingFunction(self._currentEvaluations)
utilities /= sum(utilities) # make the utilities sum to 1
if self.uniformBaseline:
utilities -= 1./self.batchSize
samples = array(map(self._base2sample, self._population))
dCenter = dot(samples.T, utilities)
covGradient = dot(array([outer(s,s) - I for s in samples]).T, utilities)
covTrace = trace(covGradient)
covGradient -= covTrace/self.numParameters * I
dA = 0.5 * (self.scaleLearningRate * covTrace/self.numParameters * I
+self.covLearningRate * covGradient)
self._lastLogDetA = self._logDetA
self._lastInvA = self._invA
self._center += self.centerLearningRate * dot(self._A, dCenter)
self._A = dot(self._A, expm2(dA))
self._invA = dot(expm2(-dA), self._invA)
self._logDetA += 0.5 * self.scaleLearningRate * covTrace
if self.storeAllDistributions:
self._allDistributions.append((self._center.copy(), self._A.copy()))
开发者ID:DanSGraham,项目名称:School-Projects,代码行数:26,代码来源:xnes.py
示例12: basex_core_transform
def basex_core_transform(rawdata, M_vert, M_horz, Mc_vert,
Mc_horz, vert_left, horz_right, dr=1.0):
"""
This is the internal function
that does the actual BASEX transform. It requires
that the matrices of basis set coefficients be passed.
Parameters
----------
rawdata : NxM numpy array
the raw image.
M_vert_etc. : Numpy arrays
2D arrays given by the basis set calculation function
dr : float
pixel size. This only affects the absolute scaling of the output.
Returns
-------
IM : NxM numpy array
The abel-transformed image, a slice of the 3D distribution
"""
# Reconstructing image - This is where the magic happens
Ci = scipy.dot(scipy.dot(vert_left, rawdata), horz_right) # previously: vert_left.dot(rawdata).dot(horz_right)
# use an heuristic scaling factor to match the analytical abel transform
# For more info see https://github.com/PyAbel/PyAbel/issues/4
MAGIC_NUMBER = 1.1122244156826457
Ci *= MAGIC_NUMBER/dr
IM = scipy.dot(scipy.dot(Mc_vert, Ci), Mc_horz.T) # Previously: Mc_vert.dot(Ci).dot(Mc_horz.T)
# P = dot(dot(Mc,Ci),M.T) # This calculates the projection,
# which should recreate the original image
return IM
开发者ID:PhantomYuan,项目名称:PyAbel,代码行数:35,代码来源:basex.py
示例13: dw
def dw(self):
"""Calculates the Durbin-Waston statistic
"""
de = diff(self.e,1)
dw = dot(de,de) / dot(self.e,self.e)
return dw
开发者ID:strategist922,项目名称:qikify,代码行数:7,代码来源:OLS.py
示例14: fit
def fit(self, X, y, useQR = True, addConstant = True):
'''Solve y = Xb.
Parameters
----------
x : array, shape (M, N)
y : array, shape (M,)
useQR : boolean
Whether or not to use QR decomposition to fit regression line.
addConstant: boolean
Whether or not to add a constant column to X
'''
if y.shape[0] != X.shape[0]:
raise ValueError('incompatible dimensions')
if addConstant:
self.X = c_[ones(X.shape[0]), X]
self.y = y
self.X_columns = getattr(X,'columns', None)
self.y_columns = getattr(y,'columns', None)
if useQR:
# TODO: Ehh, this is broken. Need to fix.
Q,R = scipy.linalg.qr(self.X)
Qty = dot(Q.T, y)
self.b = scipy.linalg.solve(R,Qty)
else:
self.inv_xx = inv(dot(self.X.T,self.X))
xy = dot(self.X.T,self.y)
self.b = dot(self.inv_xx,xy)
self.computeStatistics()
开发者ID:strategist922,项目名称:qikify,代码行数:32,代码来源:OLS.py
示例15: ar_fit
def ar_fit(p_data, p_or_plist=range(100), selector='sbc'):
"""fits a (multivariate) AR (_A_uto_R_egrssive) model to data
:Parameters:
p_data : ndarray
Data with observations on the rows and variables on the columns
p_or_plist : list
List of model orders to select from. This list has to be continuous
with a step size of 1, e.g. [10,11,12,13,14]
selector : str
One of 'sbc' for the Schwarz Bayesian Criterion or 'fpe' for the
log of Akaike's Final Prediction Error. This determines what metric
is used to evaluate the best model order.
"""
# checks and inits
if not isinstance(p_data, N.ndarray):
raise ValueError('p_data is not an ndarray')
data = p_data.copy()
n, m = data.shape
if selector not in ['sbc', 'fpe']:
raise ValueError('selector has to be one of: "sbc" or "fpe"!')
if not isinstance(p_or_plist, list):
p_or_plist = [p_or_plist]
p_max = max(p_or_plist)
ne = n - p_max
npmax = m * p_max
if ne <= npmax:
raise ValueError('time series to short!')
R = _ar_model_qr(data, p_max)
# model order selection
if len(p_or_plist) > 1:
sbc, fpe, ldp, np = _ar_model_select(R, m, ne, p_or_plist)
if selector == 'sbc':
crit = sbc
elif selector == 'fpe':
crit = fpe
else:
crit = N.zeros(1)
p_opt = crit.argmin()
np = m * p_opt
# get lower right triangle of R
#
# | R11 R12 |
# R = | |
# | 0 R22 |
#
R11 = R[:np, :np]
R12 = R[:np, npmax:]
R22 = R[np:, npmax:]
# build the model
A = N.dot(NL.inv(R11), R12).T
C = N.dot(R22.T, R22) / (ne - np)
# return
del R, R11, R12, R22
return A, C, crit
开发者ID:mtambos,项目名称:Neural-Simulation,代码行数:60,代码来源:ar_model.py
示例16: dlsim
def dlsim( self, u, x0 = None, Tl = 0, Ts = 0.001 ):
"""
@summary: Simulate the motor for one input
@param u: The control signal
@param Ts: Sampling time (0.001 by default)
@param x0: The initial conditions on the state vector (zero by default).
@return: The system response
"""
if not x0 is None:
self.x0 = x0
if self.x0 is None:
self.x0 = zeros( ( 5, 1 ) )
self.x0[4, 0] = Tl
( self.Ad, self.Bd, self.Cd, self.Dd ) = self.dss( self.x0, Ts )
self.x0 = dot( self.Ad, self.x0 ) + dot( self.Bd, u )
y_out = dot( self.Cd, self.x0 ) # + dot( self.Dd, u )
return ( y_out, self.x0 )
开发者ID:miroslavbucek,项目名称:ConPy,代码行数:25,代码来源:pmsm.py
示例17: GP_sample_posterior
def GP_sample_posterior(covar,X,logtheta,x,y,ns=1):
"""
Sample from the posterior distribution of a GP
x : [double]
training inputs
y : [double]
training targets
other :
See :py:func:`gp_sample.GP_sample_prior`
"""
KXx = covar.K(logtheta,x,X)
KXX = covar.K(logtheta,X)
Kxx = covar.K(logtheta,x)
iKxx = SP.linalg.inv(Kxx+eye(Kxx.shape[0])*0.01)
mu = SP.dot(KXx.T,SP.dot(iKxx,y)).reshape([-1,1])
cov = KXX - SP.dot(KXx.T,SP.dot(iKxx,KXx))
L = SP.linalg.cholesky(cov).T
Y = mu + SP.dot(L,random.randn(X.shape[0],ns))
return Y
开发者ID:AngelBerihuete,项目名称:pygp,代码行数:25,代码来源:gp_sample.py
示例18: estimate
def estimate(self):
# estimating coefficients, and basic stats
self.inv_xx = inv(dot(self.x.T,self.x))
xy = dot(self.x.T,self.y)
self.betas = dot(self.inv_xx,xy) # estimate coefficients
self.nobs = self.y.shape[0] # number of observations
self.ncoef = self.x.shape[1] # number of coef.
self.df_e = self.nobs - self.ncoef # degrees of freedom, error
self.df_r = self.ncoef - 1 # degrees of freedom, regression
self.e = self.y - dot(self.x,self.betas) # residuals
self.sse = dot(self.e,self.e)/self.df_e # SSE
self.se = sqrt(diagonal(self.sse*self.inv_xx)) # coef. standard errors
self.t = self.betas / self.se # coef. t-statistics
self.p = (1-stats.t.cdf(abs(self.t), self.df_e)) * 2 # coef. p-values
self.R2 = 1 - self.e.var()/self.y.var() # model R-squared
self.R2adj = 1-(1-self.R2)*((self.nobs-1)/(self.nobs-self.ncoef)) # adjusted R-square
self.F = (self.R2/self.df_r) / ((1-self.R2)/self.df_e) # model F-statistic
self.Fpv = 1-stats.f.cdf(self.F, self.df_r, self.df_e) # F-statistic p-value
开发者ID:aybose,项目名称:riskdash,代码行数:27,代码来源:OLS.py
示例19: _LMLgrad_lik
def _LMLgrad_lik(self,hyperparams):
"""derivative of the likelihood parameters"""
logtheta = hyperparams['covar']
try:
KV = self.get_covariances(hyperparams)
except linalg.LinAlgError:
LG.error("exception caught (%s)" % (str(hyperparams)))
return 1E6
#loop through all dimensions
#logdet term:
Kd = 2*KV['Knoise']
dldet = 0.5*(Kd*KV['Si']).sum(axis=0)
#quadratic term
y_roti = KV['y_roti']
dlquad = -0.5 * (y_roti * Kd * y_roti).sum(axis=0)
if VERBOSE:
dldet_ = SP.zeros([self.d])
dlquad_ = SP.zeros([self.d])
for d in xrange(self.d):
_K = KV['K'] + SP.diag(KV['Knoise'][:,d])
_Ki = SP.linalg.inv(_K)
dldet_[d] = 0.5* SP.dot(_Ki,SP.diag(Kd[:,d])).trace()
dlquad_[d] = -0.5*SP.dot(self.y[:,d],SP.dot(_Ki,SP.dot(SP.diag(Kd[:,d]),SP.dot(_Ki,self.y[:,d]))))
assert (SP.absolute(dldet-dldet_)<1E-3).all(), 'outch'
assert (SP.absolute(dlquad-dlquad_)<1E-3).all(), 'outch'
LMLgrad = dldet + dlquad
RV = {'lik': LMLgrad}
return RV
开发者ID:AngelBerihuete,项目名称:pygp,代码行数:34,代码来源:gplvm_ard.py
示例20: multivariateNormalPdf
def multivariateNormalPdf(z, x, sigma):
""" The pdf of a multivariate normal distribution (not in scipy).
The sample z and the mean x should be 1-dim-arrays, and sigma a square 2-dim-array. """
assert len(z.shape) == 1 and len(x.shape) == 1 and len(x) == len(z) and sigma.shape == (len(x), len(z))
tmp = -0.5 * dot(dot((z - x), inv(sigma)), (z - x))
res = (1. / power(2.0 * pi, len(z) / 2.)) * (1. / sqrt(det(sigma))) * exp(tmp)
return res
开发者ID:DanSGraham,项目名称:School-Projects,代码行数:7,代码来源:functions.py
注:本文中的scipy.dot函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论