本文整理汇总了Python中numpy.dual.inv函数的典型用法代码示例。如果您正苦于以下问题:Python inv函数的具体用法?Python inv怎么用?Python inv使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了inv函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: fishersLinearDiscriminent
def fishersLinearDiscriminent(trainData, trainLabels, testData, testLabels):
numClasses = max(trainLabels) + 1
N = [0] * numClasses
m = [0] * numClasses
for x,t in izip(trainData,trainLabels):
m[t] += x
N[t] += 1
for i in range(numClasses):
m[i] /= N[i]
Sw = zeros((trainData.shape[1], trainData.shape[1]))
for x,t in izip(trainData, trainLabels):
Sw += outer(x-m[t], x-m[t])
try:
inv(Sw)
except LinAlgError:
Sw += 0.1 * identity(Sw.shape[0], Float64)
w = dot(inv(Sw),(m[0] - m[1]))
meanVect = (N[0]*m[0] + N[1]*m[1]) / sum(N)
numCorrect = 0
for x,t in izip(testData, testLabels):
if dot(w, (x-meanVect)) > 0:
if t == 1:
numCorrect += 1
else:
if t == 0:
numCorrect += 1
return float(numCorrect) / float(len(testLabels))
开发者ID:Primer42,项目名称:TuftComp136,代码行数:29,代码来源:main.py
示例2: calcN
def calcN(classKernels, trainLabels):
N = zeros((len(trainLabels), len(trainLabels)))
for i, l in enumerate(unique(trainLabels)):
numExamplesWithLabel = len(where(trainLabels == l)[0])
Idiff = identity(numExamplesWithLabel, Float64) - (1.0 / numExamplesWithLabel) * ones(numExamplesWithLabel, Float64)
firstDot = dot(classKernels[i], Idiff)
labelTerm = dot(firstDot, transpose(classKernels[i]))
N += labelTerm
N = nan_to_num(N)
#make N more numerically stable
#if I had more time, I would train this parameter, but I don't
additionToN = ((mean(diag(N)) + 1) / 100.0) * identity(N.shape[0], Float64)
N += additionToN
#make sure N is invertable
for i in range(1000):
try:
inv(N)
except LinAlgError:
#doing this to make sure the maxtrix is invertable
#large value supported by section titled
#"numerical issues and regularization" in the paper
N += additionToN
return N
开发者ID:Primer42,项目名称:TuftComp136,代码行数:25,代码来源:main.py
示例3: estimateGaussian
def estimateGaussian(trainData, trainLabels):
numClasses = max(trainLabels) + 1
N = [0]* numClasses
mu = [0.0] * numClasses
Slist = [zeros((trainData.shape[1], trainData.shape[1]))] * numClasses
pList = [0.0] * numClasses
#calculate N, and sum x's for mu
for x,t in izip(trainData, trainLabels):
N[t] += 1
mu[t] += x
#normalize mu
for i in range(numClasses):
mu[i] = mu[i] / float(N[i])
#calculate the class probabilities
for i in range(numClasses):
pList[i] = float(N[i]) / sum(N)
#calculate S0 and S1
for x,t in izip(trainData, trainLabels):
Slist[t] += outer(x - mu[t], x - mu[t])
try:
inv(Slist[t])
except LinAlgError:
Slist[t] += 0.1 * identity(Slist[t].shape[0], Float64)
return (numClasses, N, mu, Slist, pList)
开发者ID:Primer42,项目名称:TuftComp136,代码行数:26,代码来源:main.py
示例4: _cov2wt
def _cov2wt(self, cov):
""" Convert covariance matrix(-ices) to weights.
"""
from numpy.dual import inv
if len(cov.shape) == 2:
return inv(cov)
else:
weights = numpy.zeros(cov.shape, float)
for i in range(cov.shape[-1]): # n
weights[:,:,i] = inv(cov[:,:,i])
return weights
开发者ID:beiko-lab,项目名称:gengis,代码行数:15,代码来源:odrpack.py
示例5: factor_target_pose
def factor_target_pose(world_homography, target_homography):
"""
Find the rigid transformation that maps target coordinates to world coordinates,
given homographies that map world coordinates and target coordinates to a common
coordinate system (i.e., the camera).
"""
return extract_transformation(np.dot(inv(world_homography), target_homography))
开发者ID:tfmartino,项目名称:STOcapstone-calibration,代码行数:7,代码来源:calibration.py
示例6: doubleU
def doubleU(phi, l, tVector):
#can't call lamba by it's name, because that's a reserved word in python
#so I'm calling it l
lIdentity = l*identity(phi.shape[1])
phiDotPhi = dot(phi.transpose(), phi)
firstTerm = inv(lIdentity + phiDotPhi)
phiDotT = dot(phi.transpose(), tVector)
return squeeze(dot(firstTerm, phiDotT))
开发者ID:Primer42,项目名称:TuftComp136,代码行数:8,代码来源:main.py
示例7: trainKFD
def trainKFD(trainKernel, trainLabels):
classKernels = getClassKernels(trainKernel, trainLabels)
M = calcM(classKernels, trainLabels)
N = calcN(classKernels, trainLabels)
'''
print "train kernel:",trainKernel
print "Class kernels:", classKernels
print "M",M
print "N",N
'''
try:
solutionMatrix = dot(inv(N), M)
except LinAlgError:
#if we get a singular matrix here, there isn't much we can do about it
#just skip this configuration
solutionMatrix = identity(N.shape[0], Float64)
solutionMatrix = nan_to_num(solutionMatrix)
eVals, eVects = eig(solutionMatrix)
#find the 'leading' term i.e. find the eigenvector with the highest eigenvalue
alphaVect = eVects[:, absolute(eVals).argmax()].real.astype(Float64)
trainProjections = dot(trainKernel, alphaVect)
'''
print 'alpha = ', alphaVect
print 'train kernel = ', trainKernel
print 'train projction = ', trainProjections
'''
#train sigmoid based on evaluation accuracy
#accuracyError = lambda x: 100.0 - evaluations(trainLabels, classifyKFDValues(trainProjections, *list(x)))[0]
accuracyError = lambda x: 100.0 - evaluations(trainLabels, classifyKFDValues(trainProjections, *x))[0]
#get an initial guess by brute force
#ranges = ((-100, 100, 1), (-100, 100, 1))
#x0 = brute(accuracyError, ranges)
#popt = minimize(accuracyError, x0.tolist(), method="Powell").x
rc = LSFAIL
niter = 0
i = 0
while rc in (LSFAIL, INFEASIBLE, CONSTANT, NOPROGRESS, USERABORT, MAXFUN) or niter <= 1:
if i == 10:
break
#get a 'smarter' x0
#ranges = ((-1000, 1000, 100), (-1000, 1000, 100))
ranges = ((-10**(i + 1), 10**(i + 1), 10**i),) * 2
x0 = brute(accuracyError, ranges)
(popt, niter, rc) = fmin_tnc(accuracyError, x0, approx_grad=True)
#popt = fmin_tnc(accuracyError, x0.tolist(), approx_grad=True)[0]
i += 1
return (alphaVect, popt)
开发者ID:Primer42,项目名称:TuftComp136,代码行数:52,代码来源:main.py
示例8: generativeSharedCov
def generativeSharedCov(trainData, trainLabels, testData, testLabels):
(numClasses, N, mu, Slist, pList) = estimateGaussian(trainData, trainLabels)
#i.e. calculate everything we need for the model
#normalize the S's, and calculate the final S
S = zeros(Slist[0].shape)
for i in range(numClasses):
Slist[i] = Slist[i] / float(N[i])
S += pList[i] * Slist[i]
w = dot(inv(S), (mu[1] - mu[0]))
w0 = -0.5* dot(dot(mu[1], inv(S)), mu[1]) + 0.5*dot(dot(mu[0], inv(S)), mu[0]) + log(pList[1]/pList[0])
numCorrect = 0
for x,t in izip(testData, testLabels):
probClass1 = sigmoid(dot(w, x) + w0)
if probClass1 >= 0.5:
if t == 1:
numCorrect += 1
else:
if t == 0:
numCorrect += 1
return float(numCorrect) / float(len(testLabels))
开发者ID:Primer42,项目名称:TuftComp136,代码行数:22,代码来源:main.py
示例9: logisticRegression
def logisticRegression(trainData, trainLabels, testData, testLabels):
#adjust the data, adding the 'free parameter' to the train data
trainDataWithFreeParam = hstack((trainData.copy(), ones(trainData.shape[0])[:,newaxis]))
testDataWithFreeParam = hstack((testData.copy(), ones(testData.shape[0])[:,newaxis]))
alpha = 10
oldW = zeros(trainDataWithFreeParam.shape[1])
newW = ones(trainDataWithFreeParam.shape[1])
iteration = 0
trainDataWithFreeParamTranspose = transpose(trainDataWithFreeParam)
alphaI = alpha * identity(oldW.shape[0])
while not array_equal(oldW, newW):
if iteration == 100:
break
oldW = newW.copy()
yVect = yVector(oldW, trainDataWithFreeParam)
r = R(yVect)
firstTerm = inv(alphaI + dot(dot(trainDataWithFreeParamTranspose, r), trainDataWithFreeParam))
secondTerm = dot(trainDataWithFreeParamTranspose, (yVect-trainLabels)) + alpha * oldW
newW = oldW - dot(firstTerm, secondTerm)
iteration += 1
#see how well we did
numCorrect = 0
for x,t in izip(testDataWithFreeParam, testLabels):
if yScalar(newW, x) >= 0.5:
if t == 1:
numCorrect += 1
else:
if t == 0:
numCorrect += 1
return float(numCorrect) / float(len(testLabels))
开发者ID:Primer42,项目名称:TuftComp136,代码行数:38,代码来源:main.py
示例10: back_project
def back_project(homography, points):
return transform(inv(homography), points)
开发者ID:tfmartino,项目名称:STOcapstone-calibration,代码行数:2,代码来源:cornerdetect.py
示例11: leastsq
def leastsq(self, **kws):
"""
use Levenberg-Marquardt minimization to perform fit.
This assumes that ModelParameters have been stored,
and a function to minimize has been properly set up.
This wraps scipy.optimize.leastsq, and keyword arguments are passed
directly as options to scipy.optimize.leastsq
When possible, this calculates the estimated uncertainties and
variable correlations from the covariance matrix.
writes outputs to many internal attributes, and
returns True if fit was successful, False if not.
"""
self.prepare_fit()
lskws = dict(full_output=1, xtol=1.e-7, ftol=1.e-7,
gtol=1.e-7, maxfev=2000*(self.nvarys+1), Dfun=None)
lskws.update(self.kws)
lskws.update(kws)
if lskws['Dfun'] is not None:
self.jacfcn = lskws['Dfun']
lskws['Dfun'] = self.__jacobian
# suppress runtime warnings during fit and error analysis
orig_warn_settings = np.geterr()
np.seterr(all='ignore')
lsout = scipy_leastsq(self.__residual, self.vars, **lskws)
_best, _cov, infodict, errmsg, ier = lsout
self.residual = resid = infodict['fvec']
self.ier = ier
self.lmdif_message = errmsg
self.message = 'Fit succeeded.'
self.success = ier in [1, 2, 3, 4]
if ier == 0:
self.message = 'Invalid Input Parameters.'
elif ier == 5:
self.message = self.err_maxfev % lskws['maxfev']
else:
self.message = 'Tolerance seems to be too small.'
self.nfev = infodict['nfev']
self.ndata = len(resid)
sum_sqr = (resid**2).sum()
self.chisqr = sum_sqr
self.nfree = (self.ndata - self.nvarys)
self.redchi = sum_sqr / self.nfree
# need to map _best values to params, then calculate the
# grad for the variable parameters
grad = ones_like(_best)
vbest = ones_like(_best)
# ensure that _best, vbest, and grad are not
# broken 1-element ndarrays.
if len(np.shape(_best)) == 0:
_best = np.array([_best])
if len(np.shape(vbest)) == 0:
vbest = np.array([vbest])
if len(np.shape(grad)) == 0:
grad = np.array([grad])
for ivar, varname in enumerate(self.var_map):
par = self.params[varname]
grad[ivar] = par.scale_gradient(_best[ivar])
vbest[ivar] = par.value
# modified from JJ Helmus' leastsqbound.py
infodict['fjac'] = transpose(transpose(infodict['fjac']) /
take(grad, infodict['ipvt'] - 1))
rvec = dot(triu(transpose(infodict['fjac'])[:self.nvarys, :]),
take(eye(self.nvarys), infodict['ipvt'] - 1, 0))
try:
self.covar = inv(dot(transpose(rvec), rvec))
except (LinAlgError, ValueError):
self.covar = None
has_expr = False
for par in self.params.values():
par.stderr, par.correl = 0, None
has_expr = has_expr or par.expr is not None
if self.covar is not None:
if self.scale_covar:
self.covar = self.covar * sum_sqr / self.nfree
for ivar, varname in enumerate(self.var_map):
par = self.params[varname]
par.stderr = sqrt(self.covar[ivar, ivar])
par.correl = {}
for jvar, varn2 in enumerate(self.var_map):
if jvar != ivar:
par.correl[varn2] = (self.covar[ivar, jvar] /
(par.stderr * sqrt(self.covar[jvar, jvar])))
#.........这里部分代码省略.........
开发者ID:nmearl,项目名称:lmfit-py,代码行数:101,代码来源:minimizer.py
示例12: estimate_poses
def estimate_poses(params, homographies):
K_inv = inv(params)
return [extract_transformation(np.dot(K_inv, H)) for H in homographies]
开发者ID:tfmartino,项目名称:STOcapstone-calibration,代码行数:3,代码来源:calibration.py
示例13: leastsqBound
#.........这里部分代码省略.........
x0 = np.asarray(x0).flatten()
n = len(x0)
if len(bounds) != n:
raise ValueError('the length of bounds is inconsistent with the number of parameters ')
if not isinstance(args, tuple):
args = (args,)
shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
m = shape[0]
if n > m:
raise TypeError('Improper input: N=%s must not exceed M=%s' % (n, m))
if epsfcn is None:
epsfcn = np.finfo(dtype).eps
def funcWarp(x, *args):
return func(i2e(x), *args)
xi0 = e2i(x0)
if Dfun is None:
if maxfev == 0:
maxfev = 200*(n + 1)
retval = _minpack._lmdif(funcWarp, xi0, args, full_output, ftol, xtol,
gtol, maxfev, epsfcn, factor, diag)
else:
if col_deriv:
_check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m))
else:
_check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n))
if maxfev == 0:
maxfev = 100*(n + 1)
def DfunWarp(x, *args):
return Dfun(i2e(x), *args)
retval = _minpack._lmder(funcWarp, DfunWarp, xi0, args, full_output, col_deriv,
ftol, xtol, gtol, maxfev, factor, diag)
errors = {0: ["Improper input parameters.", TypeError],
1: ["Both actual and predicted relative reductions "
"in the sum of squares\n are at most %f" % ftol, None],
2: ["The relative error between two consecutive "
"iterates is at most %f" % xtol, None],
3: ["Both actual and predicted relative reductions in "
"the sum of squares\n are at most %f and the "
"relative error between two consecutive "
"iterates is at \n most %f" % (ftol, xtol), None],
4: ["The cosine of the angle between func(x) and any "
"column of the\n Jacobian is at most %f in "
"absolute value" % gtol, None],
5: ["Number of calls to function has reached "
"maxfev = %d." % maxfev, ValueError],
6: ["ftol=%f is too small, no further reduction "
"in the sum of squares\n is possible.""" % ftol,
ValueError],
7: ["xtol=%f is too small, no further improvement in "
"the approximate\n solution is possible." % xtol,
ValueError],
8: ["gtol=%f is too small, func(x) is orthogonal to the "
"columns of\n the Jacobian to machine "
"precision." % gtol, ValueError],
'unknown': ["Unknown error.", TypeError]}
info = retval[-1] # The FORTRAN return value
if info not in [1, 2, 3, 4] and not full_output:
if info in [5, 6, 7, 8]:
np.warnings.warn(errors[info][0], RuntimeWarning)
else:
try:
raise errors[info][1](errors[info][0])
except KeyError:
raise errors['unknown'][1](errors['unknown'][0])
mesg = errors[info][0]
x = i2e(retval[0])
if full_output:
grad = _int2extGrad(retval[0], bounds)
retval[1]['fjac'] = (retval[1]['fjac'].T / np.take(grad,
retval[1]['ipvt'] - 1)).T
cov_x = None
if info in [1, 2, 3, 4]:
from numpy.dual import inv
from numpy.linalg import LinAlgError
perm = np.take(np.eye(n), retval[1]['ipvt'] - 1, 0)
r = np.triu(np.transpose(retval[1]['fjac'])[:n, :])
R = np.dot(r, perm)
try:
cov_x = inv(np.dot(np.transpose(R), R))
except LinAlgError as inverror:
print(inverror)
pass
return (x, cov_x) + retval[1:-1] + (mesg, info)
else:
return (x, info)
开发者ID:puwe,项目名称:DAMASK,代码行数:101,代码来源:util.py
示例14: approx_covar
def approx_covar(hess, red_chi2):
return red_chi2 * inv(phess / 2.)
开发者ID:pyfit,项目名称:pyfit,代码行数:2,代码来源:hessian.py
示例15: leastsq
#.........这里部分代码省略.........
ftol -- Relative error desired in the sum of squares.
xtol -- Relative error desired in the approximate solution.
gtol -- Orthogonality desired between the function vector
and the columns of the Jacobian.
maxfev -- The maximum number of calls to the function. If zero,
then 100*(N+1) is the maximum where N is the number
of elements in x0.
epsfcn -- A suitable step length for the forward-difference
approximation of the Jacobian (for Dfun=None). If
epsfcn is less than the machine precision, it is assumed
that the relative errors in the functions are of
the order of the machine precision.
factor -- A parameter determining the initial step bound
(factor * || diag * x||). Should be in interval (0.1,100).
diag -- A sequency of N positive entries that serve as a
scale factors for the variables.
Remarks:
"leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms.
See also:
scikits.openopt, which offers a unified syntax to call this and other solvers
fmin, fmin_powell, fmin_cg,
fmin_bfgs, fmin_ncg -- multivariate local optimizers
fmin_l_bfgs_b, fmin_tnc,
fmin_cobyla -- constrained multivariate optimizers
anneal, brute -- global optimizers
fminbound, brent, golden, bracket -- local scalar minimizers
fsolve -- n-dimenstional root-finding
brentq, brenth, ridder, bisect, newton -- one-dimensional root-finding
fixed_point -- scalar and vector fixed-point finder
curve_fit -- find parameters for a curve-fitting problem.
"""
x0 = array(x0,ndmin=1)
n = len(x0)
if type(args) != type(()): args = (args,)
m = check_func(func,x0,args,n)[0]
if Dfun is None:
if (maxfev == 0):
maxfev = 200*(n+1)
retval = _minpack._lmdif(func,x0,args,full_output,ftol,xtol,gtol,maxfev,epsfcn,factor,diag)
else:
if col_deriv:
check_func(Dfun,x0,args,n,(n,m))
else:
check_func(Dfun,x0,args,n,(m,n))
if (maxfev == 0):
maxfev = 100*(n+1)
retval = _minpack._lmder(func,Dfun,x0,args,full_output,col_deriv,ftol,xtol,gtol,maxfev,factor,diag)
errors = {0:["Improper input parameters.", TypeError],
1:["Both actual and predicted relative reductions in the sum of squares\n are at most %f" % ftol, None],
2:["The relative error between two consecutive iterates is at most %f" % xtol, None],
3:["Both actual and predicted relative reductions in the sum of squares\n are at most %f and the relative error between two consecutive iterates is at \n most %f" % (ftol,xtol), None],
4:["The cosine of the angle between func(x) and any column of the\n Jacobian is at most %f in absolute value" % gtol, None],
5:["Number of calls to function has reached maxfev = %d." % maxfev, ValueError],
6:["ftol=%f is too small, no further reduction in the sum of squares\n is possible.""" % ftol, ValueError],
7:["xtol=%f is too small, no further improvement in the approximate\n solution is possible." % xtol, ValueError],
8:["gtol=%f is too small, func(x) is orthogonal to the columns of\n the Jacobian to machine precision." % gtol, ValueError],
'unknown':["Unknown error.", TypeError]}
info = retval[-1] # The FORTRAN return value
if (info not in [1,2,3,4] and not full_output):
if info in [5,6,7,8]:
if warning: print "Warning: " + errors[info][0]
else:
try:
raise errors[info][1], errors[info][0]
except KeyError:
raise errors['unknown'][1], errors['unknown'][0]
if n == 1:
retval = (retval[0][0],) + retval[1:]
mesg = errors[info][0]
if full_output:
from numpy.dual import inv
from numpy.linalg import LinAlgError
perm = take(eye(n),retval[1]['ipvt']-1,0)
r = triu(transpose(retval[1]['fjac'])[:n,:])
R = dot(r, perm)
try:
cov_x = inv(dot(transpose(R),R))
except LinAlgError:
cov_x = None
return (retval[0], cov_x) + retval[1:-1] + (mesg,info)
else:
return (retval[0], info)
开发者ID:stefanv,项目名称:scipy3,代码行数:101,代码来源:minpack.py
示例16: leastsq
#.........这里部分代码省略.........
factorization of the final approximate
Jacobian matrix, stored column wise.
Together with ipvt, the covariance of the
estimate can be approximated.
- 'ipvt' : an integer array of length N which defines
a permutation matrix, p, such that
fjac*p = q*r, where r is upper triangular
with diagonal elements of nonincreasing
magnitude. Column j of p is column ipvt(j)
of the identity matrix.
- 'qtf' : the vector (transpose(q) * fvec).
mesg : str
A string message giving information about the cause of failure.
ier : int
An integer flag. If it is equal to 1, 2, 3 or 4, the solution was
found. Otherwise, the solution was not found. In either case, the
optional output variable 'mesg' gives more information.
Notes
-----
"leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms.
"""
if not warning :
msg = "The warning keyword is deprecated. Use the warnings module."
warnings.warn(msg, DeprecationWarning)
x0 = array(x0,ndmin=1)
n = len(x0)
if type(args) != type(()): args = (args,)
m = check_func(func,x0,args,n)[0]
if n>m:
raise TypeError('Improper input: N=%s must not exceed M=%s' % (n,m))
if Dfun is None:
if (maxfev == 0):
maxfev = 200*(n+1)
retval = _minpack._lmdif(func, x0, args, full_output,
ftol, xtol, gtol,
maxfev, epsfcn, factor, diag)
else:
if col_deriv:
check_func(Dfun,x0,args,n,(n,m))
else:
check_func(Dfun,x0,args,n,(m,n))
if (maxfev == 0):
maxfev = 100*(n+1)
retval = _minpack._lmder(func,Dfun,x0,args,full_output,col_deriv,ftol,xtol,gtol,maxfev,factor,diag)
errors = {0:["Improper input parameters.", TypeError],
1:["Both actual and predicted relative reductions "
"in the sum of squares\n are at most %f" % ftol, None],
2:["The relative error between two consecutive "
"iterates is at most %f" % xtol, None],
3:["Both actual and predicted relative reductions in "
"the sum of squares\n are at most %f and the "
"relative error between two consecutive "
"iterates is at \n most %f" % (ftol,xtol), None],
4:["The cosine of the angle between func(x) and any "
"column of the\n Jacobian is at most %f in "
"absolute value" % gtol, None],
5:["Number of calls to function has reached "
"maxfev = %d." % maxfev, ValueError],
6:["ftol=%f is too small, no further reduction "
"in the sum of squares\n is possible.""" % ftol, ValueError],
7:["xtol=%f is too small, no further improvement in "
"the approximate\n solution is possible." % xtol, ValueError],
8:["gtol=%f is too small, func(x) is orthogonal to the "
"columns of\n the Jacobian to machine "
"precision." % gtol, ValueError],
'unknown':["Unknown error.", TypeError]}
info = retval[-1] # The FORTRAN return value
if (info not in [1,2,3,4] and not full_output):
if info in [5,6,7,8]:
warnings.warn(errors[info][0], RuntimeWarning)
else:
try:
raise errors[info][1](errors[info][0])
except KeyError:
raise errors['unknown'][1](errors['unknown'][0])
if n == 1:
retval = (retval[0][0],) + retval[1:]
mesg = errors[info][0]
if full_output:
cov_x = None
if info in [1,2,3,4]:
from numpy.dual import inv
from numpy.linalg import LinAlgError
perm = take(eye(n),retval[1]['ipvt']-1,0)
r = triu(transpose(retval[1]['fjac'])[:n,:])
R = dot(r, perm)
try:
cov_x = inv(dot(transpose(R),R))
except LinAlgError:
pass
return (retval[0], cov_x) + retval[1:-1] + (mesg,info)
else:
return (retval[0], info)
开发者ID:e-johnson,项目名称:AndroidProject,代码行数:101,代码来源:minpack.py
示例17: SN
def SN(alpha, beta, phi):
betaPhiTphi = beta * dot(phi.transpose(), phi)
alphaI = alpha * identity(betaPhiTphi.shape[0])
SNinverse = alphaI + betaPhiTphi
return inv(SNinverse)
开发者ID:Primer42,项目名称:TuftComp136,代码行数:5,代码来源:main.py
示例18: lmdif
#.........这里部分代码省略.........
for i in range(m):
fvec[i] = wa4[i]
xnorm = enorm(wa2)
if utility.wm_trace:
print(" √ ||x|| ↓ %.10f -> %.10f" %
(fnorm - fnorm1, fnorm1))
fnorm = fnorm1
iter += 1
elif utility.wm_trace:
print(" × ||x|| not changed")
# > test for convergence
if np.abs(act_red) <= ftol and pre_red <= ftol \
and p5 * ratio <= 1.0:
ier = 1
if delta <= xtol * xnorm:
ier = 2
if np.abs(act_red) <= ftol and pre_red <= ftol \
and p5 * ratio <= 1.0 and ier is 2:
ier = 3
if ier is not 0:
break
# > test for termination and stringent tolerances
if nfev >= maxfev:
ier = 5
if np.abs(act_red) <= eps_machine and pre_red <= \
eps_machine and p5 * ratio <= 1.0:
ier = 6
if delta <= eps_machine * xnorm:
ier = 7
if gnorm <= eps_machine:
ier = 8
if ier is not 0:
break
tmp = 1
if ratio >= p0001:
break
if ier is not 0:
break
# endregion : Main loop
# > wrap results
errors = {0: ["Improper input parameters.", TypeError],
1: ["Both actual and predicted relative reductions "
"in the sum of squares are at most %f * 1e-8" %
(ftol * 1e8), None],
2: ["The relative error between two consecutive "
"iterates is at most %f * 1e-8" % (xtol * 1e8), None],
3: ["Both actual and predicted relative reductions in "
"the sum of squares\n are at most %f and the "
"relative error between two consecutive "
"iterates is at \n most %f" % (ftol, xtol), None],
4: ["The cosine of the angle between func(x) and any "
"column of the\n Jacobian is at most %f in "
"absolute value" % gtol, None],
5: ["Number of calls to function has reached "
"maxfev = %d." % maxfev, ValueError],
6: ["ftol=%f is too small, no further reduction "
"in the sum of squares\n is possible.""" % ftol,
ValueError],
7: ["xtol=%f is too small, no further improvement in "
"the approximate\n solution is possible." % xtol,
ValueError],
8: ["gtol=%f is too small, func(x) is orthogonal to the "
"columns of\n the Jacobian to machine "
"precision." % gtol, ValueError],
'unknown': ["Unknown error.", TypeError]}
if ier not in [1, 2, 3, 4] and not full_output:
if ier in [5, 6, 7, 8]:
print("!!! leastsq warning: %s" % errors[ier][0])
mesg = errors[ier][0]
if utility.wm_trace:
print(">>> " + mesg)
if full_output:
cov_x = None
if ier in [1, 2, 3, 4]:
from numpy.dual import inv
from numpy.linalg import LinAlgError
perm = take(eye(n), ipvt - 1, 0)
r = triu(transpose(fjac.reshape(n, m))[:n, :])
R = dot(r, perm)
try:
cov_x = inv(dot(transpose(R), R))
except (LinAlgError, ValueError):
pass
dct = {'fjac': fjac, 'fvec': fvec, 'ipvt': ipvt,
'nfev': nfev, 'qtf': qtf}
return x, cov_x, dct, mesg, ier
else:
return x, ier
开发者ID:WilliamRo,项目名称:CLipPYME,代码行数:101,代码来源:leastsq.py
示例19: leastsqbound
#.........这里部分代码省略.........
i2e = _internal2external_func(bounds)
e2i = _external2internal_func(bounds)
x0 = asarray(x0).flatten()
i0 = e2i(x0)
n = len(x0)
if len(bounds) != n:
raise ValueError('length of x0 != length of bounds')
if not isinstance(args, tuple):
args = (args,)
shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
m = shape[0]
if n > m:
raise TypeError('Improper input: N=%s must not exceed M=%s' % (n, m))
if epsfcn is None:
epsfcn = finfo(dtype).eps
# define a wrapped func which accept internal parameters, converts them
# to external parameters and calls func
def wfunc(x, *args):
return func(i2e(x), *args)
if Dfun is None:
if (maxfev == 0):
maxfev = 200 * (n + 1)
retval = _minpack._lmdif(wfunc, i0, args, full_output, ftol, xtol,
gtol, maxfev, epsfcn, factor, diag)
else:
if col_deriv:
_check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m))
else:
_check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n))
if (maxfev == 0):
maxfev = 100 * (n + 1)
def wDfun(x, *args): # wrapped Dfun
return Dfun(i2e(x), *args)
retval = _minpack._lmder(wfunc, wDfun, i0, args, full_output,
col_deriv, ftol, xtol, gtol, maxfev,
factor, diag)
errors = {0: ["Improper input parameters.", TypeError],
1: ["Both actual and predicted relative reductions "
"in the sum of squares\n are at most %f" % ftol, None],
2: ["The relative error between two consecutive "
"iterates is at most %f" % xtol, None],
3: ["Both actual and predicted relative reductions in "
"the sum of squares\n are at most %f and the "
"relative error between two consecutive "
"iterates is at \n most %f" % (ftol, xtol), None],
4: ["The cosine of the angle between func(x) and any "
"column of the\n Jacobian is at most %f in "
"absolute value" % gtol, None],
5: ["Number of calls to function has reached "
"maxfev = %d." % maxfev, ValueError],
6: ["ftol=%f is too small, no further reduction "
"in the sum of squares\n is possible.""" % ftol,
ValueError],
7: ["xtol=%f is too small, no further improvement in "
"the approximate\n solution is possible." % xtol,
ValueError],
8: ["gtol=%f is too small, func(x) is orthogonal to the "
"columns of\n the Jacobian to machine "
"precision." % gtol, ValueError],
'unknown': ["Unknown error.", TypeError]}
info = retval[-1] # The FORTRAN return value
if (info not in [1, 2, 3, 4] and not full_output):
if info in [5, 6, 7, 8]:
warnings.warn(errors[info][0], RuntimeWarning)
else:
try:
raise errors[info][1](errors[info][0])
except KeyError:
raise errors['unknown'][1](errors['unknown'][0])
mesg = errors[info][0]
x = i2e(retval[0]) # internal params to external params
if full_output:
# convert fjac from internal params to external
grad = _internal2external_grad(retval[0], bounds)
retval[1]['fjac'] = (retval[1]['fjac'].T / take(grad,
retval[1]['ipvt'] - 1)).T
cov_x = None
if info in [1, 2, 3, 4]:
from numpy.dual import inv
from numpy.linalg import LinAlgError
perm = take(eye(n), retval[1]['ipvt'] - 1, 0)
r = triu(transpose(retval[1]['fjac'])[:n, :])
R = dot(r, perm)
try:
cov_x = inv(dot(transpose(R), R))
except LinAlgError:
pass
return (x, cov_x) + retval[1:-1] + (mesg, info)
else:
return (x, info)
开发者ID:nealegibson,项目名称:Infer,代码行数:101,代码来源:leastsqbound.py
示例20: leastsq
def leastsq(self, scale_covar=True, **kws):
"""
use Levenberg-Marquardt minimization to perform fit.
This assumes that ModelParameters have been stored,
and a function to minimize has been properly set up.
This wraps scipy.optimize.leastsq, and keyword arguments are passed
directly as options to scipy.optimize.leastsq
When possible, this calculates the estimated uncertainties and
variable correlations from the covariance matrix.
writes outputs to many internal attributes, and
returns True if fit was successful, False if not.
"""
# print 'RUNNING LEASTSQ'
self.prepare_fit()
lskws = dict(full_output=1, xtol=1.0e-7, ftol=1.0e-7, gtol=1.0e-7, maxfev=2000 * (self.nvarys + 1), Dfun=None)
lskws.update(self.kws)
lskws.update(kws)
if lskws["Dfun"] is not None:
self.jacfcn = lskws["Dfun"]
lskws["Dfun"] = self.__jacobian
lsout = scipy_leastsq(self.__residual, self.vars, **lskws)
_best, _cov, infodict, errmsg, ier = lsout
self.residual = resid = infodict["fvec"]
self.ier = ier
self.lmdif_message = errmsg
self.message = "Fit succeeded."
self.success = ier in [1, 2, 3, 4]
if ier == 0:
self.message = "Invalid Input Parameters."
elif ier == 5:
self.message = self.err_maxfev % lskws["maxfev"]
else:
self.message = "Tolerance seems to be too small."
self.nfev = infodict["nfev"]
self.ndata = len(resid)
sum_sqr = (resid ** 2).sum()
self.chisqr = sum_sqr
self.nfree = self.ndata - self.nvarys
self.redchi = sum_sqr / self.nfree
# need to map _best values to params, then calculate the
# grad for the variable parameters
grad = ones_like(_best)
vbest = ones_like(_best)
for ivar, varname in enumerate(self.var_map):
par = self.params[varname]
grad[ivar] = par.scale_gradient(_best[ivar])
vbest[ivar] = par.value
# modified from JJ Helmus' leastsqbound.py
infodict["fjac"] = transpose(transpose(infodict["fjac"]) / take(grad, infodict["ipvt"] - 1)
|
请发表评论