本文整理汇总了Python中numpy.asfortranarray函数的典型用法代码示例。如果您正苦于以下问题:Python asfortranarray函数的具体用法?Python asfortranarray怎么用?Python asfortranarray使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了asfortranarray函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: adjoint
def adjoint(self, inputs, outputs):
"""The adjoint operator.
Reads from inputs and writes to outputs.
"""
if self.implementation == Impl['halide'] :
#Halide implementation
if len(self.H.shape) == 2:
tmpin = np.asfortranarray( inputs[0][..., np.newaxis].astype(np.float32) )
else:
tmpin = np.asfortranarray( inputs[0].astype(np.float32) )
Halide('At_warp.cpp').At_warp( tmpin, self.Hf, self.tmpadj ) #Call
np.copyto(outputs[0], self.tmpadj )
else:
#CV2 version
inimg = inputs[0]
if len(self.H.shape) == 2:
# + cv2.WARP_INVERSE_MAP
warpedInput = cv2.warpPerspective(np.asfortranarray(inimg), self.Hinv.T, inimg.shape[1::-1], flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT, borderValue=0.)
np.copyto( outputs[0], warpedInput )
else:
outputs[0][:] = 0.0
for j in range(self.H.shape[2]):
warpedInput = cv2.warpPerspective(np.asfortranarray(inimg[:,:,:,j]), self.Hinv[:,:,j].T, inimg.shape[1::-1], flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT, borderValue=0.) #Necessary due to array layout in opencv
outputs[0] += warpedInput
开发者ID:spillai,项目名称:ProxImaL,代码行数:34,代码来源:warp.py
示例2: test_bind
def test_bind():
mod = Representation(1, k_states=2)
# Test invalid endogenous array (it must be ndarray)
assert_raises(ValueError, lambda: mod.bind([1,2,3,4]))
# Test valid (nobs x 1) endogenous array
mod.bind(np.arange(10)*1.)
assert_equal(mod.nobs, 10)
# Test valid (k_endog x 0) endogenous array
mod.bind(np.zeros(0,dtype=np.float64))
# Test invalid (3-dim) endogenous array
assert_raises(ValueError, lambda: mod.bind(np.arange(12).reshape(2,2,3)*1.))
# Test valid F-contiguous
mod.bind(np.asfortranarray(np.arange(10).reshape(1,10)))
assert_equal(mod.nobs, 10)
# Test valid C-contiguous
mod.bind(np.arange(10).reshape(10,1))
assert_equal(mod.nobs, 10)
# Test invalid F-contiguous
assert_raises(ValueError, lambda: mod.bind(np.asfortranarray(np.arange(10).reshape(10,1))))
# Test invalid C-contiguous
assert_raises(ValueError, lambda: mod.bind(np.arange(10).reshape(1,10)))
开发者ID:andreas-koukorinis,项目名称:statsmodels,代码行数:29,代码来源:test_representation.py
示例3: test_mem_layout
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
开发者ID:amueller,项目名称:scikit-learn,代码行数:27,代码来源:test_gradient_boosting.py
示例4: psiDerivativecomputations
def psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, lengthscale, Z, variational_posterior):
ARD = (len(lengthscale)!=1)
N,M,Q = self.get_dimensions(Z, variational_posterior)
psi1_gpu = self.gpuCache['psi1_gpu']
psi2n_gpu = self.gpuCache['psi2n_gpu']
l_gpu = self.gpuCache['l_gpu']
Z_gpu = self.gpuCache['Z_gpu']
mu_gpu = self.gpuCache['mu_gpu']
S_gpu = self.gpuCache['S_gpu']
gamma_gpu = self.gpuCache['gamma_gpu']
dvar_gpu = self.gpuCache['dvar_gpu']
dl_gpu = self.gpuCache['dl_gpu']
dZ_gpu = self.gpuCache['dZ_gpu']
dmu_gpu = self.gpuCache['dmu_gpu']
dS_gpu = self.gpuCache['dS_gpu']
dgamma_gpu = self.gpuCache['dgamma_gpu']
grad_l_gpu = self.gpuCache['grad_l_gpu']
grad_mu_gpu = self.gpuCache['grad_mu_gpu']
grad_S_gpu = self.gpuCache['grad_S_gpu']
grad_gamma_gpu = self.gpuCache['grad_gamma_gpu']
log_denom1_gpu = self.gpuCache['log_denom1_gpu']
log_denom2_gpu = self.gpuCache['log_denom2_gpu']
log_gamma_gpu = self.gpuCache['log_gamma_gpu']
log_gamma1_gpu = self.gpuCache['log_gamma1_gpu']
if self.GPU_direct:
dL_dpsi1_gpu = dL_dpsi1
dL_dpsi2_gpu = dL_dpsi2
dL_dpsi0_sum = gpuarray.sum(dL_dpsi0).get()
else:
dL_dpsi1_gpu = self.gpuCache['dL_dpsi1_gpu']
dL_dpsi2_gpu = self.gpuCache['dL_dpsi2_gpu']
dL_dpsi1_gpu.set(np.asfortranarray(dL_dpsi1))
dL_dpsi2_gpu.set(np.asfortranarray(dL_dpsi2))
dL_dpsi0_sum = dL_dpsi0.sum()
self.reset_derivative()
# t=self.g_psi1compDer(dvar_gpu,dl_gpu,dZ_gpu,dmu_gpu,dS_gpu,dL_dpsi1_gpu,psi1_gpu, np.float64(variance),l_gpu,Z_gpu,mu_gpu,S_gpu, np.int32(N), np.int32(M), np.int32(Q), block=(self.threadnum,1,1), grid=(self.blocknum,1),time_kernel=True)
# print 'g_psi1compDer '+str(t)
# t=self.g_psi2compDer(dvar_gpu,dl_gpu,dZ_gpu,dmu_gpu,dS_gpu,dL_dpsi2_gpu,psi2n_gpu, np.float64(variance),l_gpu,Z_gpu,mu_gpu,S_gpu, np.int32(N), np.int32(M), np.int32(Q), block=(self.threadnum,1,1), grid=(self.blocknum,1),time_kernel=True)
# print 'g_psi2compDer '+str(t)
self.g_psi1compDer.prepared_call((self.blocknum,1),(self.threadnum,1,1),dvar_gpu.gpudata,dl_gpu.gpudata,dZ_gpu.gpudata,dmu_gpu.gpudata,dS_gpu.gpudata,dgamma_gpu.gpudata,dL_dpsi1_gpu.gpudata,psi1_gpu.gpudata, log_denom1_gpu.gpudata, log_gamma_gpu.gpudata, log_gamma1_gpu.gpudata, np.float64(variance),l_gpu.gpudata,Z_gpu.gpudata,mu_gpu.gpudata,S_gpu.gpudata,gamma_gpu.gpudata,np.int32(N), np.int32(M), np.int32(Q))
self.g_psi2compDer.prepared_call((self.blocknum,1),(self.threadnum,1,1),dvar_gpu.gpudata,dl_gpu.gpudata,dZ_gpu.gpudata,dmu_gpu.gpudata,dS_gpu.gpudata,dgamma_gpu.gpudata,dL_dpsi2_gpu.gpudata,psi2n_gpu.gpudata, log_denom2_gpu.gpudata, log_gamma_gpu.gpudata, log_gamma1_gpu.gpudata, np.float64(variance),l_gpu.gpudata,Z_gpu.gpudata,mu_gpu.gpudata,S_gpu.gpudata,gamma_gpu.gpudata,np.int32(N), np.int32(M), np.int32(Q))
dL_dvar = dL_dpsi0_sum + gpuarray.sum(dvar_gpu).get()
sum_axis(grad_mu_gpu,dmu_gpu,N*Q,self.blocknum)
dL_dmu = grad_mu_gpu.get()
sum_axis(grad_S_gpu,dS_gpu,N*Q,self.blocknum)
dL_dS = grad_S_gpu.get()
sum_axis(grad_gamma_gpu,dgamma_gpu,N*Q,self.blocknum)
dL_dgamma = grad_gamma_gpu.get()
dL_dZ = dZ_gpu.get()
if ARD:
sum_axis(grad_l_gpu,dl_gpu,Q,self.blocknum)
dL_dlengscale = grad_l_gpu.get()
else:
dL_dlengscale = gpuarray.sum(dl_gpu).get()
return dL_dvar, dL_dlengscale, dL_dZ, dL_dmu, dL_dS, dL_dgamma
开发者ID:Arthurkorn,项目名称:GPy,代码行数:60,代码来源:ssrbf_psi_gpucomp.py
示例5: test_nmf
def test_nmf():
img_file = 'boat.png'
try:
img = Image.open(img_file)
except:
print("Cannot load image %s : skipping test" %img_file)
return None
I = np.array(img) / 255.
if I.ndim == 3:
A = np.asfortranarray(I.reshape((I.shape[0],I.shape[1] * I.shape[2])),dtype = myfloat)
rgb = True
else:
A = np.asfortranarray(I,dtype = myfloat)
rgb = False
m = 16;n = 16;
X = spams.im2col_sliding(A,m,n,rgb)
X = X[:,::10]
X = np.asfortranarray(X / np.tile(np.sqrt((X * X).sum(axis=0)),(X.shape[0],1)),dtype = myfloat)
########## FIRST EXPERIMENT ###########
tic = time.time()
(U,V) = spams.nmf(X,return_lasso= True,K = 49,numThreads=4,iter = -5)
tac = time.time()
t = tac - tic
print('time of computation for Dictionary Learning: %f' %t)
print('Evaluating cost function...')
Y = X - U * V
R = np.mean(0.5 * (Y * Y).sum(axis=0))
print('objective function: %f' %R)
return None
开发者ID:samuelstjean,项目名称:spams-python,代码行数:31,代码来源:test_dictLearn.py
示例6: test_mul
def test_mul():
## Test multiply method of a distributed matrix
ms, ns = 5, 14
gA = np.random.standard_normal((ms, ns)).astype(np.float64)
gA = np.asfortranarray(gA)
dA = core.DistributedMatrix.from_global_array(gA, rank=0)
gB = np.random.standard_normal((ms, ns)).astype(np.float64)
gB = np.asfortranarray(gB)
dB = core.DistributedMatrix.from_global_array(gB, rank=0)
dC = dA * dB
gC = dC.to_global_array(rank=0)
a = np.random.standard_normal(ns).astype(np.float64)
comm.Bcast(a, root=0) # ensure all process have the same data
dD = dA * a
gD = dD.to_global_array(rank=0)
alpha = 2.345
dE = dA * alpha
gE = dE.to_global_array(rank=0)
if rank == 0:
assert allclose(gA * gB, gC)
assert allclose(gA * a, gD)
assert allclose(gA * alpha, gE)
开发者ID:jrs65,项目名称:scalapy,代码行数:28,代码来源:test_multiply.py
示例7: subset_selection_xtx
def subset_selection_xtx(X, Y):
""" Subsets selection using EvalSubsetsUsingXtx in the Earth package.
"""
X = numpy.asfortranarray(X, dtype=ctypes.c_double)
Y = numpy.asfortranarray(Y, dtype=ctypes.c_double)
if Y.ndim == 1:
Y = Y.reshape((-1, 1), order="F")
if X.shape[0] != Y.shape[0]:
raise ValueError("First dimensions of bx and y must be the same")
var_count = X.shape[1]
resp_count = Y.shape[1]
cases = X.shape[0]
subsets = numpy.zeros((var_count, var_count), dtype=ctypes.c_bool,
order="F")
rss_vec = numpy.zeros((var_count,), dtype=ctypes.c_double, order="F")
weights = numpy.ones((cases,), dtype=ctypes.c_double, order="F")
rval = _c_eval_subsets_xtx(subsets, rss_vec, cases, resp_count, var_count,
X, Y, weights)
if rval == 1:
raise numpy.linalg.LinAlgError("Lin. dep. terms in X")
elif rval == 2:
raise Exception("Trying to prune the intercept.")
elif rval != 0:
raise Exception("Error %i" % rval)
subsets_ind = numpy.zeros((var_count, var_count), dtype=int)
for i, used in enumerate(subsets.T):
subsets_ind[i, :i + 1] = numpy.where(used)[0]
return subsets_ind, rss_vec
开发者ID:pauloortins,项目名称:Computer-Vision-Classes---UFBA,代码行数:33,代码来源:earth.py
示例8: test_mask_halide
def test_mask_halide(self):
"""Test mask lin op in halide.
"""
if halide_installed():
# Load image
testimg_filename = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'data', 'angela.jpg')
# opens the file using Pillow - it's not an array yet
img = Image.open(testimg_filename)
np_img = np.asfortranarray(im2nparray(img))
# Test problem
output = np.zeros_like(np_img)
mask = np.asfortranarray(np.random.randn(*list(np_img.shape)).astype(np.float32))
mask = np.maximum(mask, 0.)
Halide('A_mask.cpp').A_mask(np_img, mask, output) # Call
output_ref = mask * np_img
# Transpose
output_trans = np.zeros_like(np_img)
Halide('At_mask.cpp').At_mask(np_img, mask, output_trans) # Call
self.assertItemsAlmostEqual(output, output_ref)
self.assertItemsAlmostEqual(output_trans, output_ref)
开发者ID:comp-imaging,项目名称:ProxImaL,代码行数:25,代码来源:test_lin_ops.py
示例9: test_matrix_multiply_ff
def test_matrix_multiply_ff(self):
"""matrix multiply two FORTRAN layout matrices"""
a = np.asfortranarray(np.random.randn(M,N))
b = np.asfortranarray(np.random.randn(N,K))
res = gulinalg.matrix_multiply(a,b)
ref = np.dot(a,b)
assert_allclose(res, ref)
开发者ID:ContinuumIO,项目名称:gulinalg,代码行数:7,代码来源:test_matrix_multiply.py
示例10: runRandomWalking
def runRandomWalking(self, w, nAlgs, nWalks = 1, nIters=-1, nErrorsLimit=-1, allowSimilar=False, pTransition=0.8,
randomSeed=0):
RunRandomWalkingResult = collections.namedtuple('RunRandomWalkingResult', 'W isSource')
nFeatures = w.shape[0]
w0 = np.tile(w, (nWalks, 1))
sessionStats = self.getStats()
if sessionStats.nFeatures != nFeatures:
raise Exception('sessionStats.nFeatures != w0.shape[1]')
W = np.asfortranarray(np.zeros((nAlgs, nFeatures)).astype(np.float32))
isSource = np.asfortranarray(np.zeros((nAlgs, 1)).astype(np.uint8))
w0_p = w0.ctypes.data_as(self.lsPlugin.c_float_p)
W_p = W.ctypes.data_as(self.lsPlugin.c_float_p)
isSource_p = isSource.ctypes.data_as(self.lsPlugin.c_uint8_p)
pTransition_p = (ctypes.c_float * 1)()
pTransition_p[0] = pTransition;
#pTransition.ctypes.data_as(self.c_float_p)
nAlgs = self.lsPlugin.dll.runRandomWalking(self.sessionId, w0_p, nWalks, nAlgs, nIters, nErrorsLimit,
allowSimilar, pTransition_p, randomSeed, W_p, isSource_p)
self.lsPlugin.verifyCall(nAlgs)
return RunRandomWalkingResult(W, isSource)
开发者ID:sashafrey,项目名称:latex,代码行数:25,代码来源:api.py
示例11: setup
def setup(self):
self.p = numpy.array([[27, 51],
[66, 85],
[77, 45]])
self.p3 = numpy.array([[27, 51, 37],
[66, 85, 25],
[77, 45, 73]])
self.space = numpy.array((100, 100))
self.space3 = numpy.array((100, 100, 100))
self.radii = numpy.array((5, 6, 7))
self.g = nanshe.syn.data.generate_hypersphere_masks(
self.space, self.p, self.radii
)
self.g = self.g.reshape((self.g.shape[0], -1))
self.g = self.g.transpose()
self.g = numpy.asmatrix(self.g)
self.g = numpy.asfortranarray(self.g)
self.g3 = nanshe.syn.data.generate_hypersphere_masks(
self.space3, self.p3, self.radii
)
self.g3 = self.g3.reshape((self.g3.shape[0], -1))
self.g3 = self.g3.transpose()
self.g3 = numpy.asmatrix(self.g3)
self.g3 = numpy.asfortranarray(self.g3)
开发者ID:gitter-badger,项目名称:nanshe,代码行数:30,代码来源:test_spams_sandbox.py
示例12: endog
def endog(self, value):
self._endog = np.array(value, order='A')
# (T x M)
if (self.nobs, self.k_endog) == self._endog.shape:
self._endog = self._endog.T
# (M x T)
elif (self.k_endog, self.nobs) == self._endog.shape:
pass
else:
raise ValueError('Invalid endogenous array shape. Required'
'(%d, %d) or (%d, %d). Got %s'
% (self.nobs, self.k_endog, self.k_endog,
self.nobs, str(self._endog.shape)))
if not self._endog.flags['F_CONTIGUOUS']:
self._endog = np.asfortranarray(self._endog)
# Create a new lag matrix, shaped (k_ar, nobs) = (k_ar, T)
self._lagged = np.asfortranarray(np.hstack([
self.endog[:, self.order-i:-i].T
for i in range(1, self.order+1)
]).T)
# Set calculation flags
self._recalculate = True
开发者ID:dismalpy,项目名称:dismalpy,代码行数:26,代码来源:var.py
示例13: _set_params
def _set_params(self, p):
new_kern_params = p[:self.kern.num_params_transformed()]
new_likelihood_params = p[self.kern.num_params_transformed():]
old_likelihood_params = self.likelihood._get_params()
self.kern._set_params_transformed(new_kern_params)
self.likelihood._set_params_transformed(new_likelihood_params)
self.K = self.kern.K(self.X)
#Re fit likelihood approximation (if it is an approx), as parameters have changed
if isinstance(self.likelihood, Laplace):
self.likelihood.fit_full(self.K)
self.K += self.likelihood.covariance_matrix
self.Ki, self.L, self.Li, self.K_logdet = pdinv(self.K)
# the gradient of the likelihood wrt the covariance matrix
if self.likelihood.YYT is None:
# alpha = np.dot(self.Ki, self.likelihood.Y)
alpha, _ = dpotrs(self.L, self.likelihood.Y, lower=1)
self.dL_dK = 0.5 * (tdot(alpha) - self.output_dim * self.Ki)
else:
# tmp = mdot(self.Ki, self.likelihood.YYT, self.Ki)
tmp, _ = dpotrs(self.L, np.asfortranarray(self.likelihood.YYT), lower=1)
tmp, _ = dpotrs(self.L, np.asfortranarray(tmp.T), lower=1)
self.dL_dK = 0.5 * (tmp - self.output_dim * self.Ki)
#Adding dZ_dK (0 for a non-approximate likelihood, compensates for
#additional gradients of K when log-likelihood has non-zero Z term)
self.dL_dK += self.likelihood.dZ_dK
开发者ID:Dalar,项目名称:GPy,代码行数:33,代码来源:gp.py
示例14: test_conjGrad
def test_conjGrad():
A = np.asfortranarray(np.random.normal(size = (5000,500)))
#* np.random.seed(0)
#* A = np.asfortranarray(np.random.normal(size = (10,5)))
A = np.asfortranarray(np.dot(A.T,A),dtype=myfloat)
b = np.ones((A.shape[1],),dtype=myfloat,order="FORTRAN")
x0 = b
tol = 1e-4
itermax = int(0.5 * len(b))
tic = time.time()
for i in xrange(0,20):
y1 = np.linalg.solve(A,b)
tac = time.time()
print " Time (numpy): ", tac - tic
x1 = np.abs(b - np.dot(A,y1))
print "Mean error on b : %f" %(x1.sum() / b.shape[0])
tic = time.time()
for i in xrange(0,20):
y2 = spams.conjGrad(A,b,x0,tol,itermax)
#* y2 = spams.conjGrad(A,b)
tac = time.time()
print " Time (spams): ", tac - tic
x1 = np.dot(A,y2)
x2 = np.abs(b - x1)
print "Mean error on b : %f" %(x2.sum() / b.shape[0])
err = abs(y1 - y2)
return err.max()
开发者ID:SemanticMD,项目名称:spams-python,代码行数:30,代码来源:test_linalg.py
示例15: test_cd
def test_cd():
np.random.seed(0)
X = np.asfortranarray(np.random.normal(size = (64,100)))
X = np.asfortranarray(X / np.tile(np.sqrt((X*X).sum(axis=0)),(X.shape[0],1)),dtype=myfloat)
D = np.asfortranarray(np.random.normal(size = (64,100)))
D = np.asfortranarray(D / np.tile(np.sqrt((D*D).sum(axis=0)),(D.shape[0],1)),dtype=myfloat)
# parameter of the optimization procedure are chosen
lambda1 = 0.015
mode = spams.PENALTY
tic = time.time()
alpha = spams.lasso(X,D,lambda1 = lambda1,mode = mode,numThreads = 4)
tac = time.time()
t = tac - tic
xd = X - D * alpha
E = np.mean(0.5 * (xd * xd).sum(axis=0) + lambda1 * np.abs(alpha).sum(axis=0))
print("%f signals processed per second for LARS" %(X.shape[1] / t))
print('Objective function for LARS: %g' %E)
tol = 0.001
itermax = 1000
tic = time.time()
# A0 = ssp.csc_matrix(np.empty((alpha.shape[0],alpha.shape[1])))
A0 = ssp.csc_matrix((alpha.shape[0],alpha.shape[1]),dtype=myfloat)
alpha2 = spams.cd(X,D,A0,lambda1 = lambda1,mode = mode,tol = tol, itermax = itermax,numThreads = 4)
tac = time.time()
t = tac - tic
print("%f signals processed per second for CD" %(X.shape[1] / t))
xd = X - D * alpha2
E = np.mean(0.5 * (xd * xd).sum(axis=0) + lambda1 * np.abs(alpha).sum(axis=0))
print('Objective function for CD: %g' %E)
print('With Random Design, CD can be much faster than LARS')
return None
开发者ID:samuelstjean,项目名称:spams-python,代码行数:32,代码来源:test_decomp.py
示例16: forward
def forward(self, inputs, outputs):
"""The forward operator.
Reads from inputs and writes to outputs.
"""
if self.implementation == Impl['halide']:
# Halide implementation
tmpin = np.asfortranarray(inputs[0].astype(np.float32))
Halide('A_warp.cpp').A_warp(tmpin, self.Hinvf, self.tmpfwd) # Call
np.copyto(outputs[0], np.reshape(self.tmpfwd, self.shape))
else:
# CV2 version
inimg = inputs[0]
if len(self.H.shape) == 2:
warpedInput = cv2.warpPerspective(np.asfortranarray(inimg), self.H.T,
inimg.shape[1::-1], flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT, borderValue=0.)
# Necessary due to array layout in opencv
np.copyto(outputs[0], warpedInput)
else:
for j in range(self.H.shape[2]):
warpedInput = cv2.warpPerspective(np.asfortranarray(inimg),
self.H[:, :, j].T, inimg.shape[1::-1],
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT,
borderValue=0.)
# Necessary due to array layout in opencv
np.copyto(outputs[0][:, :, :, j], warpedInput)
开发者ID:PeterZs,项目名称:ProxImaL,代码行数:34,代码来源:warp.py
示例17: _computations
def _computations(self,do_Kmm=True, do_Kmm_grad=True):
"""
All of the computations needed. Some are optional, see kwargs.
"""
if do_Kmm:
self.Lm = jitchol(self.Kmm)
# The rather complex computations of self.A
if self.has_uncertain_inputs:
if self.likelihood.is_heteroscedastic:
psi2_beta = (self.psi2 * (self.likelihood.precision.flatten().reshape(self.batchsize, 1, 1))).sum(0)
else:
psi2_beta = self.psi2.sum(0) * self.likelihood.precision
evals, evecs = np.linalg.eigh(psi2_beta)
clipped_evals = np.clip(evals, 0., 1e6) # TODO: make clipping configurable
tmp = evecs * np.sqrt(clipped_evals)
else:
if self.likelihood.is_heteroscedastic:
tmp = self.psi1.T * (np.sqrt(self.likelihood.precision.flatten().reshape(1, self.batchsize)))
else:
tmp = self.psi1.T * (np.sqrt(self.likelihood.precision))
tmp, _ = dtrtrs(self.Lm, np.asfortranarray(tmp), lower=1)
self.A = tdot(tmp)
self.V = self.likelihood.precision*self.likelihood.Y
self.VmT = np.dot(self.V,self.q_u_expectation[0].T)
self.psi1V = np.dot(self.psi1.T, self.V)
self.B = np.eye(self.num_inducing)*self.data_prop + self.A
self.Lambda = backsub_both_sides(self.Lm, self.B.T)
self.LQL = backsub_both_sides(self.Lm,self.q_u_expectation[1].T,transpose='right')
self.trace_K = self.psi0.sum() - np.trace(self.A)/self.likelihood.precision
self.Kmmi_m, _ = dpotrs(self.Lm, self.q_u_expectation[0], lower=1)
self.projected_mean = np.dot(self.psi1,self.Kmmi_m)
# Compute dL_dpsi
self.dL_dpsi0 = - 0.5 * self.output_dim * self.likelihood.precision * np.ones(self.batchsize)
self.dL_dpsi1, _ = dpotrs(self.Lm,np.asfortranarray(self.VmT.T),lower=1)
self.dL_dpsi1 = self.dL_dpsi1.T
dL_dpsi2 = -0.5 * self.likelihood.precision * backsub_both_sides(self.Lm, self.LQL - self.output_dim * np.eye(self.num_inducing))
if self.has_uncertain_inputs:
self.dL_dpsi2 = np.repeat(dL_dpsi2[None,:,:],self.batchsize,axis=0)
else:
self.dL_dpsi1 += 2.*np.dot(dL_dpsi2,self.psi1.T).T
self.dL_dpsi2 = None
# Compute dL_dKmm
if do_Kmm_grad:
tmp = np.dot(self.LQL,self.A) - backsub_both_sides(self.Lm,np.dot(self.q_u_expectation[0],self.psi1V.T),transpose='right')
tmp += tmp.T
tmp += -self.output_dim*self.B
tmp += self.data_prop*self.LQL
self.dL_dKmm = 0.5*backsub_both_sides(self.Lm,tmp)
#Compute the gradient of the log likelihood wrt noise variance
self.partial_for_likelihood = -0.5*(self.batchsize*self.output_dim - np.sum(self.A*self.LQL))*self.likelihood.precision
self.partial_for_likelihood += (0.5*self.output_dim*self.trace_K + 0.5 * self.likelihood.trYYT - np.sum(self.likelihood.Y*self.projected_mean))*self.likelihood.precision**2
开发者ID:Dalar,项目名称:GPy,代码行数:60,代码来源:svigp.py
示例18: dictEval
def dictEval( X, D, param, lam=None, dsfactor=None, patchSize=None, patchFnGrp=None, kind='avg'):
if dsfactor is not None:
X_useme,dsz = downsamplePatchList( X, patchSize, dsfactor, kind=kind )
D_useme,Ddsz = downsamplePatchList( D, patchSize, dsfactor, kind=kind )
if patchFnGrp:
patchFnGrp.create_dataset('patchesDown', data=X_useme)
else:
X_useme = X
D_useme = D
if lam is None:
lam = param['lambda1']
alpha = spams.lasso( np.asfortranarray(X_useme), D = np.asfortranarray(D_useme), **param )
Xre = ( D * alpha )
if patchFnGrp:
patchFnGrp.create_dataset('patchesRecon', data=Xre)
xd = X - Xre
R = np.mean( (xd * xd).sum(axis=0))
if lam > 0:
print " dictEval - lambda: ", lam
R = R + lam * np.mean( np.abs(alpha).sum(axis=0))
return R
开发者ID:hanslovsky,项目名称:dictionary-feature-classification,代码行数:29,代码来源:evaluation.py
示例19: actionAngleStaeckel_calcu0
def actionAngleStaeckel_calcu0(E,Lz,pot,delta):
"""
NAME:
actionAngleStaeckel_calcu0
PURPOSE:
Use C to calculate u0 in the Staeckel approximation
INPUT:
E, Lz - energy and angular momentum
pot - Potential or list of such instances
delta - focal length of prolate spheroidal coordinates
OUTPUT:
(u0,err)
u0 : array, shape (len(E))
err - non-zero if error occured
HISTORY:
2012-12-03 - Written - Bovy (IAS)
"""
#Parse the potential
npot, pot_type, pot_args= _parse_pot(pot,potforactions=True)
#Set up result arrays
u0= numpy.empty(len(E))
err= ctypes.c_int(0)
#Set up the C code
ndarrayFlags= ('C_CONTIGUOUS','WRITEABLE')
actionAngleStaeckel_actionsFunc= _lib.calcu0
actionAngleStaeckel_actionsFunc.argtypes= [ctypes.c_int,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=numpy.int32,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_double,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.POINTER(ctypes.c_int)]
#Array requirements, first store old order
f_cont= [E.flags['F_CONTIGUOUS'],
Lz.flags['F_CONTIGUOUS']]
E= numpy.require(E,dtype=numpy.float64,requirements=['C','W'])
Lz= numpy.require(Lz,dtype=numpy.float64,requirements=['C','W'])
u0= numpy.require(u0,dtype=numpy.float64,requirements=['C','W'])
#Run the C code
actionAngleStaeckel_actionsFunc(len(E),
E,
Lz,
ctypes.c_int(npot),
pot_type,
pot_args,
ctypes.c_double(delta),
u0,
ctypes.byref(err))
#Reset input arrays
if f_cont[0]: E= numpy.asfortranarray(E)
if f_cont[1]: Lz= numpy.asfortranarray(Lz)
return (u0,err.value)
开发者ID:jls713,项目名称:galpy,代码行数:60,代码来源:actionAngleStaeckel_c.py
示例20: test_singular_a
def test_singular_a(self):
for b in [self.b_1dim, self.b_2dim]:
for dtype in self.dtypes:
a = np.asfortranarray(self.a_singular, dtype=dtype)
b = np.asfortranarray(b, dtype=dtype)
r = _numba_linalg_solve(a, b)
ok_(r != 0)
开发者ID:fo-github,项目名称:quant-econ,代码行数:7,代码来源:test_numba.py
注:本文中的numpy.asfortranarray函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论