本文整理汇总了Python中scipy.reshape函数的典型用法代码示例。如果您正苦于以下问题:Python reshape函数的具体用法?Python reshape怎么用?Python reshape使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了reshape函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: MNEfit
def MNEfit(stim,resp,order):
# in order for dlogloss to work, we need to know -<g(yt(n),xt)>data
# == calculate the constrained averages over the data set
Nsamples = sp.size(stim,0)
Ndim = sp.size(stim,1)
psp = sp.mean(sp.mean(resp)) #spike probability (first constraint)
avg = (1.0*stim.T*resp)/(Nsamples*1.0)
avgs = sp.vstack((psp,avg))
if(order > 1):
avgsqrd = (stim.T*1.0)*(sp.array(sp.tile(resp,(1,Ndim)))*sp.array(stim))/(Nsamples*1.0)
avgsqrd = sp.reshape(avgsqrd,(Ndim**2,1))
avgs = sp.vstack((avgs,avgsqrd))
#initialize params:
pstart = sp.log(1/avgs[0,0] - 1)
pstart = sp.hstack((pstart,(.001*(2*sp.random.rand(Ndim)-1))))
if(order > 1):
temp = .0005*(2*sp.random.rand(Ndim,Ndim)-1)
pstart = sp.hstack((pstart,sp.reshape(temp+temp.T,(1,Ndim**2))[0]))
#redefine functions with fixed vals:
def logLoss(p):
return LLF.log_loss(p, stim, resp, order)
def dlogLoss(p):
return LLF.d_log_loss(p, stim, avgs, order)
#run the function:
#pfinal = opt.fmin_tnc(logLoss,pstart,fprime=dlogLoss)
# conjugate-gradient:
pfinal = opt.fmin_cg(logLoss,pstart,fprime=dlogLoss)
#pfinal = opt.fmin(logLoss,pstart,fprime=dlogLoss)
return pfinal
开发者ID:MarvinT,项目名称:pyMNE,代码行数:31,代码来源:MNEfit.py
示例2: KDTForest_ErrorCor
def KDTForest_ErrorCor(galleryL, probesL, ground_truth, K=3, forest_size=6,
numForests=5, binary_score=True):
Np = len(probesL)
gx = sp.array(galleryL)
px = sp.array(probesL)
ground_truth = sp.reshape(ground_truth,(Np,K)) if K==1 else ground_truth
forests = []
print "Building %d separate KDT Forests from input data..."%numForests
for _idx in range(numForests):
f = pyf.FLANN()
_params = f.build_index(gx,algorithm='kdtree', trees=forest_size)
forests.append(f)
errs = []
print "Testing %d Probe Points across %d KDT Forestsa"%(Np, numForests)
for f in forests:
print ".",
sys.stdout.flush()
[res, _ds] = f.nn_index(px, K)
if K==1:
res = sp.reshape(res,(Np,1))
err_vec = compute_errors(res, ground_truth, binary_score=binary_score)
errs.append( sp.reshape(err_vec, (Np,1) ) )
print ""
ErrM = sp.hstack(errs)
return ErrM
开发者ID:Sciumo,项目名称:ProximityForest,代码行数:30,代码来源:ANN_ErrorCorrelations.py
示例3: steepest_descent
def steepest_descent(A, b, x0, tol=1e-8):
"""
Uses the steepest descent method to find the x that satisfies Ax = b.
Inputs:
A: An m x n NumPy array
b: An m x 1 NumPy array
x0: An n x 1 NumPy array that represents the initial guess at a
solution.
tol (optional): The tolerance level for convergence. This is compared
against the norm(x_n+1 - x_n) each iteration.
Outputs:
x: The x that satisfies the equation.
"""
A = sp.mat(A)
b = sp.reshape(sp.mat(b),(b.size,1))
def grad(A, b, x):
"""
Find the gradient of ||Ax - b||
Inputs:
A: An m x n NumPy matrix.
b: An m x 1 NumPy matrix.
x: An n x a NumPy matrix.
Outputs:
grad: A NumPy matrix representing the gradient of ||Ax - b||
"""
return np.mat(2 * A.T*(A*x - b))
def solve_alpha_k(A, b, x):
"""
Solves for alpha in the steepest descent algorithm
x_n+1 = x_n - alpha * grad(x_n)
Inputs:
A: An m x n NumPy array
b: An m x 1 NumPy array
x: The x value where you want alpha to be defined for.
Outputs:
alpha: The alpha satisfying the algorithm above.
"""
gradient = grad(A, b, x)
return np.array(
(gradient.T * gradient)/(2 * gradient.T * A.T * A * gradient))[0]
xold = sp.reshape(sp.mat(x0),(x0.size,1))
xnew = xold - grad(A, b, xold) * solve_alpha_k(A,b,xold)
while la.norm(xold - xnew) > tol:
xold = xnew
xnew = xold - grad(A, b, xold) * solve_alpha_k(A,b,xold)
return xnew
开发者ID:snowdj,项目名称:byu_macro_boot_camp,代码行数:60,代码来源:GeneralDescent.py
示例4: func
def func(self, X, V):
k = self.C.TFdata.k
v1 = self.C.TFdata.v1
w1 = self.C.TFdata.w1
if k >=0:
J_coords = self.F.sysfunc.J_coords
w = sqrt(k)
q = v1 - (1j/w)*matrixmultiply(self.F.sysfunc.J_coords,v1)
p = w1 + (1j/w)*matrixmultiply(transpose(self.F.sysfunc.J_coords),w1)
p /= linalg.norm(p)
q /= linalg.norm(q)
p = reshape(p,(p.shape[0],))
q = reshape(q,(q.shape[0],))
direc = conjugate(1/matrixmultiply(transpose(conjugate(p)),q))
p = direc*p
l1 = firstlyapunov(X, self.F.sysfunc, w, J_coords=J_coords, p=p, q=q)
return array([l1])
else:
return array([1])
开发者ID:BenjaminBerhault,项目名称:Python_Classes4MAD,代码行数:26,代码来源:TestFunc.py
示例5: Au
def Au(U,GF,EpsArr,NX,NY,NZ):
"""Returns the result of matrix-vector multiplication
by the system matrix A=I-GX
"""
# reshaping input vector into 4-D array
Uarr=sci.reshape(U,(NX,NY,NZ,3))
# extended zero-padded arrays
Uext=sci.zeros((2*NX,2*NY,2*NZ,3),complex)
Vext=sci.zeros((2*NX,2*NY,2*NZ,3),complex)
Jext=sci.zeros((2*NX,2*NY,2*NZ,3),complex)
JFext=sci.zeros((2*NX,2*NY,2*NZ,3),complex)
Uext[0:NX,0:NY,0:NZ,:]=Uarr
# contrast current array
s=0
while s<=2:
Jext[0:NX,0:NY,0:NZ,s]=Uext[0:NX,0:NY,0:NZ,s]*(EpsArr[0:NX,0:NY,0:NZ]-1.0)
JFext[:,:,:,s]=fft.fftn(sci.squeeze(Jext[:,:,:,s]))
s=s+1
Vext[:,:,:,0]=Uext[:,:,:,0]-\
fft.ifftn(sci.squeeze(sci.multiply(GF[:,:,:,0,0],JFext[:,:,:,0])+\
sci.multiply(GF[:,:,:,0,1],JFext[:,:,:,1])+\
sci.multiply(GF[:,:,:,0,2],JFext[:,:,:,2])))
Vext[:,:,:,1]=Uext[:,:,:,1]-\
fft.ifftn(sci.squeeze(sci.multiply(GF[:,:,:,1,0],JFext[:,:,:,0])+\
sci.multiply(GF[:,:,:,1,1],JFext[:,:,:,1])+\
sci.multiply(GF[:,:,:,1,2],JFext[:,:,:,2])))
Vext[:,:,:,2]=Uext[:,:,:,2]-\
fft.ifftn(sci.squeeze(sci.multiply(GF[:,:,:,2,0],JFext[:,:,:,0])+\
sci.multiply(GF[:,:,:,2,1],JFext[:,:,:,1])+\
sci.multiply(GF[:,:,:,2,2],JFext[:,:,:,2])))
# reshaping output into column vector
V=sci.reshape(Vext[0:NX,0:NY,0:NZ,:],(NX*NY*NZ*3,1))
return V
开发者ID:the-iterator,项目名称:VIE,代码行数:34,代码来源:matvec.py
示例6: rerun_dfa
def rerun_dfa(chrom,xdata,mask,groups,names,DFs):
"""Run DFA in min app"""
#extract vars from xdata
slice = meancent(_slice(xdata,chrom))
#split in to training and test
tr_slice,cv_slice,ts_slice,tr_grp,cv_grp,ts_grp,tr_nm,cv_nm,ts_nm=_split(slice,groups,mask,names)
#get indexes
idx = scipy.arange(xdata.shape[0])[:,nA]
tr_idx = scipy.take(idx,_index(mask,0),0)
cv_idx = scipy.take(idx,_index(mask,1),0)
ts_idx = scipy.take(idx,_index(mask,2),0)
#model DFA on training samples
u,v,eigs,dummy = cva(tr_slice,tr_grp,DFs)
#project xval and test samples
projUcv = scipy.dot(cv_slice,v)
projUt = scipy.dot(ts_slice,v)
uout = scipy.zeros((xdata.shape[0],DFs),'d')
_put(uout,scipy.reshape(tr_idx,(len(tr_idx),)).tolist(),u)
_put(uout,scipy.reshape(cv_idx,(len(cv_idx),)).tolist(),projUcv)
_put(uout,scipy.reshape(ts_idx,(len(ts_idx),)).tolist(),projUt)
return uout,v,eigs
开发者ID:myw,项目名称:dataiap,代码行数:27,代码来源:fitfun.py
示例7: draw_cone
def draw_cone(event=None):
# cone radius 1
Radius1 = 30.0
# cone radius 2
Radius2 = 70.0
# cone height
Height = 90.0
# The center point at one of the flat cone faces
Point = scipy.array([-25.0, -50.0, 50.0])
Point = scipy.reshape(Point,(3,1))
# The direction of the cone from the point given above
DirectionFromPoint = scipy.array([25.0, 50.0, 150.0])
DirectionFromPoint = scipy.reshape(DirectionFromPoint,(3,1))
# create the cone object
MyCone = cone_from_point_height_directionvector_and_two_radii( \
Point,
DirectionFromPoint,
Height,
Radius1,
Radius2 )
MyConeShape = MyCone.Shape()
ais_shape_MyConeShape = AIS_Shape( MyConeShape ).GetHandle()
ais_context = display.GetContext().GetObject()
ais_context.SetMaterial( ais_shape_MyConeShape,
Graphic3d.Graphic3d_NOM_STONE )
ais_context.Display( ais_shape_MyConeShape )
开发者ID:NiSchultz,项目名称:pythonocc,代码行数:27,代码来源:Step4_3.py
示例8: condition
def condition(point,art_index):
l = len(point.param['free'])
a = len(point.param['artificial'])
neq = point.neq
dx = point.system.dx
nx = len(point.u)/neq
y0 = scipy.reshape(point.u,(neq,nx))
left = scipy.zeros((neq,1),scipy.float64)
right = scipy.zeros((neq,1),scipy.float64)
left[:,0]=y0[:,0]
right[:,0]=y0[:,-1]
u=scipy.c_[left,y0,right]
deriv = 1./(2*dx)*scipy.reshape(scipy.transpose(\
u[:,2:]-u[:,:-2]),(nx*neq,))
result = {}
result['column'] = deriv
result['row'] = deriv*dx
result['d'] = scipy.zeros((l+a,),scipy.float64)
result['eq_term'] = deriv*point.lambd[art_index]
result['res'] = 0
return result
开发者ID:pvnuffel,项目名称:riskmodel,代码行数:28,代码来源:artificial.py
示例9: ideal_data
def ideal_data(num, dimU, dimY, dimX, noise=1):
"""Linear system data"""
# generate randomized linear system matrices
A = randn(dimX, dimX)
B = randn(dimX, dimU)
C = randn(dimY, dimX)
D = randn(dimY, dimU)
# make sure state evolution is stable
U, S, V = svd(A)
A = dot(U, dot(diag(S / max(S)), V))
U, S, V = svd(B)
S2 = zeros((size(U,1), size(V,0)))
S2[:,:size(U,1)] = diag(S / max(S))
B = dot(U, dot(S2, V))
# random input
U = randn(num, dimU)
# initial state
X = reshape(randn(dimX), (1,-1))
# initial output
Y = reshape(dot(C, X[-1]) + dot(D, U[0]), (1,-1))
# generate next state
X = concatenate((X, reshape(dot(A, X[-1]) + dot(B, U[0]), (1,-1))))
# and so forth
for u in U[1:]:
Y = concatenate((Y, reshape(dot(C, X[-1]) + dot(D, u), (1,-1))))
X = concatenate((X, reshape(dot(A, X[-1]) + dot(B, u), (1,-1))))
return U, Y + randn(num, dimY) * noise
开发者ID:riscy,项目名称:mllm,代码行数:34,代码来源:system_identifier.py
示例10: _generate_masked_mesh
def _generate_masked_mesh(self, cell_mask=None):
r"""
Generates the mesh based on the cell mask provided
"""
#
if cell_mask is None:
cell_mask = sp.ones(self.data_map.shape, dtype=bool)
#
# initializing arrays
self._edges = sp.ones(0, dtype=str)
self._merge_patch_pairs = sp.ones(0, dtype=str)
self._create_blocks(cell_mask)
#
# building face arrays
mapper = sp.ravel(sp.array(cell_mask, dtype=int))
mapper[mapper == 1] = sp.arange(sp.count_nonzero(mapper))
mapper = sp.reshape(mapper, (self.nz, self.nx))
mapper[~cell_mask] = -sp.iinfo(int).max
#
boundary_dict = {
'bottom':
{'bottom': mapper[0, :][cell_mask[0, :]]},
'top':
{'top': mapper[-1, :][cell_mask[-1, :]]},
'left':
{'left': mapper[:, 0][cell_mask[:, 0]]},
'right':
{'right': mapper[:, -1][cell_mask[:, -1]]},
'front':
{'front': mapper[cell_mask]},
'back':
{'back': mapper[cell_mask]},
'internal':
{'bottom': [], 'top': [], 'left': [], 'right': []}
}
#
# determining cells linked to a masked cell
cell_mask = sp.where(~sp.ravel(cell_mask))[0]
inds = sp.in1d(self._field._cell_interfaces, cell_mask)
inds = sp.reshape(inds, (len(self._field._cell_interfaces), 2))
inds = inds[:, 0].astype(int) + inds[:, 1].astype(int)
inds = (inds == 1)
links = self._field._cell_interfaces[inds]
#
# adjusting order so masked cells are all on links[:, 1]
swap = sp.in1d(links[:, 0], cell_mask)
links[swap] = links[swap, ::-1]
#
# setting side based on index difference
sides = sp.ndarray(len(links), dtype='<U6')
sides[sp.where(links[:, 1] == links[:, 0]-self.nx)[0]] = 'bottom'
sides[sp.where(links[:, 1] == links[:, 0]+self.nx)[0]] = 'top'
sides[sp.where(links[:, 1] == links[:, 0]-1)[0]] = 'left'
sides[sp.where(links[:, 1] == links[:, 0]+1)[0]] = 'right'
#
# adding each block to the internal face dictionary
inds = sp.ravel(mapper)[links[:, 0]]
for side, block_id in zip(sides, inds):
boundary_dict['internal'][side].append(block_id)
self.set_boundary_patches(boundary_dict, reset=True)
开发者ID:stadelmanma,项目名称:netl-AP_MAP_FLOW,代码行数:60,代码来源:__BlockMeshDict__.py
示例11: get_introns
def get_introns(self):
_introns = sp.reshape(self.exons1.ravel()[1:-1], (self.exons1.shape[0] - 1, 2))
if len(self.exons2.shape) > 1:
_introns = sp.r_[_introns, sp.reshape(self.exons2.ravel()[1:-1], (self.exons2.shape[0] - 1, 2))]
return _introns
开发者ID:ratschlab,项目名称:spladder,代码行数:7,代码来源:event.py
示例12: quantize0
def quantize0 (image):
row,col = image.shape
vect = reshape(image,(row*col,))
vect = AA(int)((vect-min)*scalingFact + 0.5)
vect = np.array(vect)
vect = vect/scalingFact + min
return reshape(vect,(row,col))
开发者ID:FabioMaster,项目名称:larpy,代码行数:7,代码来源:CTimage2blocks.py
示例13: crossValidate
def crossValidate(y, X, K=None, folds=3, model=None, returnModel=False):
errors = SP.empty(folds)
n = y.shape[0]
indexes = crossValidationScheme(folds,n)
predictions = SP.empty(y.shape)
alpha = []
alphas = []
msePath = []
for cvRun in SP.arange(len(indexes)):
testIndexes = indexes[cvRun]
yTrain = y[~testIndexes]
XTrain = X[~testIndexes]
if K == None:
model.fit(XTrain, yTrain)
prediction = SP.reshape(model.predict(X[testIndexes]), (-1,1))
else: # models having population structure
KTrain = K[~testIndexes]
KTrain = KTrain[:,~testIndexes]
KTest=K[testIndexes]
KTest=KTest[:,~testIndexes]
model.reset()
model.kernel = KTrain #TODO: make nice integration
model.fit(XTrain, yTrain)
prediction = SP.reshape(model.predict(X[testIndexes], k=KTest), (-1,1))
predictions[testIndexes] = prediction
errors[cvRun] = predictionError(y[testIndexes], prediction)
print(('prediction error right now is', errors[cvRun]))
if returnModel:
alpha.append(model.alpha)
alphas.append(model.alphas)
msePath.append(model.mse_path)
if returnModel:
return indexes, predictions, errors, alpha, alphas, msePath
else:
return indexes, predictions, errors
开发者ID:PMBio,项目名称:limix,代码行数:35,代码来源:lmm_forest_utils.py
示例14: draw_arrow
def draw_arrow(event=None):
# Length of the Arrow
Arrowlength = 400.0
# Shaft radius
RadiusOfArrowShaft = 20.0
# Length of the the arrow heads cone
LenghtOfArrowHead = 100.0
# Radius of the the arrow heads cone
RadiusOfArrowHead = 50.0
# The center point at one of the flat cone faces
Point = scipy.array([-50.0, -50.0, 0.0])
Point = scipy.reshape(Point,(3,1))
# The direction of the cone from the point given above
DirectionFromPoint = scipy.array([-25.0, -50.0, -150.0])
DirectionFromPoint = scipy.reshape(DirectionFromPoint,(3,1))
# create the arrow shape
# Look at the difference to the other functions and note that it is
# also possible to create the shape in a function. If we do that we
# get a shape and not the object.
MyArrowShape = arrowShape( Point,
DirectionFromPoint,
Arrowlength,
RadiusOfArrowShaft,
LenghtOfArrowHead,
RadiusOfArrowHead )
display.DisplayColoredShape( MyArrowShape , 'BLACK' )
开发者ID:NiSchultz,项目名称:pythonocc,代码行数:27,代码来源:Step4_1.py
示例15: pca_svd
def pca_svd(myarray,type='covar'):
"""Run principal components analysis (PCA) by singular
value decomposition (SVD)
>>> import scipy
>>> a = scipy.array([[1,2,3],[0,1,1.5],[-1,-6,34],[8,15,2]])
>>> a
array([[ 1. , 2. , 3. ],
[ 0. , 1. , 1.5],
[ -1. , -6. , 34. ],
[ 8. , 15. , 2. ]])
>>> # There are four samples, with three variables each
>>> tt,pp,pr,eigs = pca_svd(a)
>>> tt
array([[ 5.86463567e+00, -4.28370443e+00, 1.46798845e-01],
[ 6.65979784e+00, -6.16620433e+00, -1.25067331e-01],
[ -2.56257861e+01, 1.82610701e+00, -6.62877855e-03],
[ 1.31013526e+01, 8.62380175e+00, -1.51027354e-02]])
>>> pp
array([[ 0.15026487, 0.40643255, -0.90123973],
[ 0.46898935, 0.77318935, 0.4268808 ],
[ 0.87032721, -0.48681703, -0.07442934]])
>>> # This is the 'rotation matrix' - you can imagine colm labels
>>> # of PC1, PC2, PC3 and row labels of variable1, variable2, variable3.
>>> pr
array([[ 0. ],
[ 97.1073744 ],
[ 98.88788958],
[ 99.98141011]])
>>> eigs
array([[ 30.11765617],
[ 11.57915467],
[ 0.1935556 ]])
>>> a
array([[ 1. , 2. , 3. ],
[ 0. , 1. , 1.5],
[ -1. , -6. , 34. ],
[ 8. , 15. , 2. ]])
"""
if type=='covar':
myarray = _meancent(myarray)
elif type=='corr':
myarray = _autoscale(myarray)
else:
raise KeyError, "'type' must be one of 'covar or 'corr'"
# I think this may run faster if myarray is converted to a matrix first.
# (This should be tested - anyone got a large dataset?)
# mymat = scipy.mat(myarray)
u,s,v = scipy.linalg.svd(myarray)
tt = scipy.dot(myarray,scipy.transpose(v))
pp = v
pr = (1-(s/scipy.sum(scipy.sum(myarray**2))))*100
pr = scipy.reshape(pr,(1,len(pr)))
pr = scipy.concatenate((scipy.array([[0.0]]),pr),1)
pr = scipy.reshape(pr,(pr.shape[1],))
eigs = s
return tt,pp,pr[:,nA],eigs[:,nA]
开发者ID:jikhanjung,项目名称:modan,代码行数:59,代码来源:chemometrics.py
示例16: __init__
def __init__(self, U, Y, statedim, reg=None):
if size(shape(U)) == 1:
U = reshape(U, (-1,1))
if size(shape(Y)) == 1:
Y = reshape(Y, (-1,1))
if reg is None:
reg = 0
yDim = size(Y,1)
uDim = size(U,1)
self.output_size = size(Y,1) # placeholder
# number of samples of past/future we'll mash together into a 'state'
width = 1
# total number of past/future pairings we get as a result
K = size(U,0) - 2 * width + 1
# build hankel matrices containing pasts and futures
U_p = array([ravel(U[t : t + width]) for t in range(K)]).T
U_f = array([ravel(U[t + width : t + 2 * width]) for t in range(K)]).T
Y_p = array([ravel(Y[t : t + width]) for t in range(K)]).T
Y_f = array([ravel(Y[t + width : t + 2 * width]) for t in range(K)]).T
# solve the eigenvalue problem
YfUfT = dot(Y_f, U_f.T)
YfUpT = dot(Y_f, U_p.T)
YfYpT = dot(Y_f, Y_p.T)
UfUpT = dot(U_f, U_p.T)
UfYpT = dot(U_f, Y_p.T)
UpYpT = dot(U_p, Y_p.T)
F = bmat([[None, YfUfT, YfUpT, YfYpT],
[YfUfT.T, None, UfUpT, UfYpT],
[YfUpT.T, UfUpT.T, None, UpYpT],
[YfYpT.T, UfYpT.T, UpYpT.T, None]])
Ginv = bmat([[pinv(dot(Y_f,Y_f.T)), None, None, None],
[None, pinv(dot(U_f,U_f.T)), None, None],
[None, None, pinv(dot(U_p,U_p.T)), None],
[None, None, None, pinv(dot(Y_p,Y_p.T))]])
F = F - eye(size(F, 0)) * reg
# Take smallest eigenvalues
_, W = eigs(Ginv.dot(F), k=statedim, which='SR')
# State sequence is a weighted combination of the past
W_U_p = W[ width * (yDim + uDim) : width * (yDim + uDim + uDim), :]
W_Y_p = W[ width * (yDim + uDim + uDim):, :]
X_hist = dot(W_U_p.T, U_p) + dot(W_Y_p.T, Y_p)
# Regress; trim inputs to match the states we retrieved
R = concatenate((X_hist[:, :-1], U[width:-width].T), 0)
L = concatenate((X_hist[:, 1: ], Y[width:-width].T), 0)
RRi = pinv(dot(R, R.T))
RL = dot(R, L.T)
Sys = dot(RRi, RL).T
self.A = Sys[:statedim, :statedim]
self.B = Sys[:statedim, statedim:]
self.C = Sys[statedim:, :statedim]
self.D = Sys[statedim:, statedim:]
开发者ID:riscy,项目名称:mllm,代码行数:59,代码来源:system_identifier.py
示例17: _forwardImplementation
def _forwardImplementation(self, inbuf, outbuf):
par = reshape(self.params, (3, self.outdim))
inn = reshape(inbuf, (self.dx, self.dy))
self.out = numpy.zeros((self.outdim, self.dx, self.dy))
for k in xrange(0, len(outbuf)):
kernel = ((self.xx - par[0][k]) ** 2 + (self.yy - par[1][k]) ** 2) / (2 * par[2][k] ** 2)
self.out[k] = numpy.multiply(inn, pybrain.tools.functions.safeExp(-kernel))
outbuf[k] += numpy.sum(self.out[k])
开发者ID:ioam,项目名称:svn-history,代码行数:8,代码来源:pybrainstuff.py
示例18: write_gmm_data_file_depth
def write_gmm_data_file_depth(
model_name, mag, dist, depth, result_type, periods, file_out, component_type="AVERAGE_HORIZONTAL"
):
"""
Create a file of input and output parameters for the sommerville GMM.
params:
model_name: The ground motion model, as a string.
mag: dictionary, key - the mag column name, values, the mag vectors,
as a list
dist: dictionary, key - the distance column name, value,
the distance vectors, as a list.
depth: depth in km.
result_type: MEAN or TOTAL_STDDEV
periods: A list of periods requiring SA values.
The first value has to be 0.0.
Mag, distance and periods will be iterated over to give a single SA for
each combination.
file_out: The file name and location of the produced data file.
"""
assert periods[0] == 0.0
handle = open(file_out, "wb")
writer = csv.writer(handle, delimiter=",", quoting=csv.QUOTE_NONE)
# write title
title = [depth[0], mag[0], dist[0], "result_type", "component_type"] + periods[1:] + ["pga"]
writer.writerow(title)
# prepare the coefficients
model = Ground_motion_specification(model_name)
coeff = model.calc_coefficient(periods)
coeff = reshape(coeff, (coeff.shape[0], 1, 1, coeff.shape[1]))
sigma_coeff = model.calc_sigma_coefficient(periods)
sigma_coeff = reshape(sigma_coeff, (sigma_coeff.shape[0], 1, 1, sigma_coeff.shape[1]))
# Iterate
for depi in depth[1]:
for magi in mag[1]:
for disti in dist[1]:
dist_args = {
"mag": array([[[magi]]]),
dist[0]: array([[[disti]]]),
"depth": array([[[depi]]]),
"coefficient": coeff,
"sigma_coefficient": sigma_coeff,
}
log_mean, log_sigma = model.distribution(**dist_args)
sa_mod = list(log_mean.reshape(-1))
sa_mod = [math.exp(x) for x in sa_mod]
sigma_mod = list(log_sigma.reshape(-1))
if result_type == "MEAN":
row = [depi, magi, disti, result_type, component_type] + sa_mod[1:] + [sa_mod[0]]
else:
row = [depi, magi, disti, result_type, component_type] + sigma_mod[1:] + [sigma_mod[0]]
writer.writerow(row)
handle.close()
开发者ID:vipkolon,项目名称:eqrm,代码行数:58,代码来源:create_gmm_data_4_nhlib.py
示例19: segmented
def segmented():
radius = 5
sigmaI = 0.02
sigmaX = 3.0
height = img.shape[0]
width = img.shape[1]
flatImg = img.flatten()
darkImg = flatImg
brightImg = flatImg
nodes = img.flatten()
W = spar.lil_matrix((nodes.size, nodes.size),dtype=float)
D = sp.zeros((1,nodes.size))
for row in range(height):
for col in range(width):
for k in range(row-radius,row+radius):
for l in range(col-radius,col+radius):
try:
w = weight(row,col,k,l)
W[row*width+col,k*width+l] = w
D[0,row*width+col] += w
except:
continue
D = spar.spdiags(D, 0, nodes.size, nodes.size)
Q = D - W
D1 = D.todense()
Q1 = Q.todense()
diags = sp.diag(D1)
DminusHalf = sp.diag(diags**-0.5)
segQ = sp.dot(sp.dot(DminusHalf, Q1),DminusHalf)
vals, vecs = la.eig(segQ)
vecind = sp.argsort(vals)[1]
theVec = vecs[vecind]
for i in range(0,height**2):
if theVec[i] < 0:
darkImg[i] = 0.0
else:
brightImg[i] = 0.0
darkImg = sp.reshape(darkImg, (height,height))
brightImg = sp.reshape(brightImg, (height,height))
return darkImg, flatImg, brightImg
开发者ID:snowdj,项目名称:byu_macro_boot_camp,代码行数:58,代码来源:Lab16b.py
示例20: _provide
def _provide(self):
self.stochfun._newSample(self.paramdim*self.batch_size, override=True)
if self.record_samples:
ls = self.stochfun._lastseen
if self.batch_size == 1:
self._seen.append(ls)
else:
for l in reshape(ls, (self.batch_size, self.paramdim)):
self._seen.append(reshape(l, (1, self.paramdim)))
开发者ID:bitfort,项目名称:py-optim,代码行数:9,代码来源:datainterface.py
注:本文中的scipy.reshape函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论