本文整理汇总了Python中scikits.cuda.linalg.dot函数的典型用法代码示例。如果您正苦于以下问题:Python dot函数的具体用法?Python dot怎么用?Python dot使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了dot函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: multinomial_log_likelihood
def multinomial_log_likelihood(softmax_vals,Y,one_n_trans,one_c):
# add small amount to protect against log(0)
small_val = 1e-9
prod = Y*cumath.log(softmax_vals+small_val)
prod = linalg.dot(one_n_trans,prod)
prod = linalg.dot(prod,one_c)
return(prod.get())
开发者ID:beamandrew,项目名称:HMC_GPU,代码行数:7,代码来源:mnist_fit.py
示例2: calculate_H_gpu
def calculate_H_gpu(X, W, P):
WPW = la.add_diag(P, la.dot(W, W, "t", "n"))
tmp = la.dot(W, la.inv(WPW, overwrite=True))
H = la.dot(X, tmp, "n", "t")
H = gpu.maximum(H, 0)
H = to_unit_variance(H)
return H, tmp
开发者ID:Tamme,项目名称:mutationalsignaturesNCSUT,代码行数:7,代码来源:cpu_basic_python_implementation.py
示例3: backward
def backward(self, top, propagate_down, bottom):
with pu.caffe_cuda_context():
h = caffe.cublas_handle()
import scikits.cuda.linalg as linalg
top_diff = top[0].diff_as_pycuda_gpuarray()
ts = [self.t1_, self.t2_]
for i in xrange(len(bottom)):
if not propagate_down[i]:
continue
diff = bottom[i].diff_as_pycuda_gpuarray()
data = bottom[(i + 1) % 2].data_as_pycuda_gpuarray()
# Belew 3 conditions are complicated and might be hard to
# understand.
swap = ts[i] ^ bool(i)
t1 = ts[i]
t2 = (not t1) ^ ts[(i + 1) % 2]
for b in xrange(bottom[0].shape[0]):
x = top_diff[b]
y = data[b]
t1_, t2_ = t1, t2
if swap:
x, y = y, x
t1_, t2_ = t2_, t1_
linalg.dot(x, y,
transa=blas_trans(t1_), transb=blas_trans(t2_),
handle=h, out=diff[b])
开发者ID:NHZlX,项目名称:tnarihi-caffe-helper,代码行数:26,代码来源:common_layers.py
示例4: backprop
def backprop(self, input_data, df_output, cache=None):
""" Backpropagate through the hidden layer
**Parameters:**
input_data : ``GPUArray``
Inpute data to compute activations for.
df_output : ``GPUArray``
Gradients with respect to the activations of this layer
(received from the layer above).
cache : list of ``GPUArray``
Cache obtained from forward pass. If the cache is
provided, then the activations are not recalculated.
**Returns:**
gradients : tuple of ``GPUArray``
Gradients with respect to the weights and biases in the
form ``(df_weights, df_biases)``.
df_input : ``GPUArray``
Gradients with respect to the input.
"""
# Get cache if it wasn't provided
if cache is None:
cache = self.feed_forward(input_data,
prediction=False)
if len(cache) == 2:
activations, dropout_mask = cache
else:
activations = cache[0]
# Multiply the binary mask with the incoming gradients
if self.dropout and dropout_mask is not None:
apply_dropout_mask(df_output, dropout_mask)
# Get gradient wrt activation function
df_activations = self.df(activations)
delta = df_activations * df_output
# Gradient wrt weights
df_W = linalg.dot(input_data, delta, transa='T')
# Gradient wrt bias
df_b = matrix_sum_out_axis(delta, 0)
# Gradient wrt inputs
df_input = linalg.dot(delta, self.W, transb='T')
# L1 weight decay
if self.l1_penalty_weight:
df_W -= self.l1_penalty_weight * sign(self.W)
# L2 weight decay
if self.l2_penalty_weight:
df_W -= self.l2_penalty_weight * self.W
return (df_W, df_b), df_input
开发者ID:DamonAnderson,项目名称:hebel,代码行数:60,代码来源:hidden_layer.py
示例5: eps_r
def eps_r(x, A1, A2, out, handle):
out.fill(0)
#tmp = garr.empty((A1[0].shape[0], x.shape[1]), dtype=A1[0].dtype)
#tmp2 = garr.empty((tmp.shape[0], A2[0].shape[0]), dtype=A1[0].dtype)
for s in range(len(A1)):
tmp = cla.dot(A1[s], x, handle=handle)
tmp2 = cla.dot(tmp, A2[s], transb='C', handle=handle)
out += tmp2
return out
开发者ID:amilsted,项目名称:evoMPS,代码行数:10,代码来源:cuda_alternatives.py
示例6: forward
def forward(self, bottom, top):
with pu.caffe_cuda_context():
h = caffe.cublas_handle()
import scikits.cuda.linalg as linalg
mat1 = bottom[0].data_as_pycuda_gpuarray()
mat2 = bottom[1].data_as_pycuda_gpuarray()
mato = top[0].data_as_pycuda_gpuarray()
for b in xrange(bottom[0].shape[0]):
linalg.dot(mat1[b], mat2[b],
transa=blas_trans(self.t1_),
transb=blas_trans(self.t2_),
handle=h, out=mato[b])
开发者ID:NHZlX,项目名称:tnarihi-caffe-helper,代码行数:12,代码来源:common_layers.py
示例7: decompose
def decompose(self):
gcov = cla.dot(self._Y_gpu, self._Y_gpu, transa='C')
ge_g, gh_g = np.linalg.eigh(gcov.get())
I = np.argsort(ge_g)[::-1]
ge_g, gh_g = np.sqrt(ge_g[I]), gh_g[:,I]
# push the matrix back out
gpueigs = gpuarray.to_gpu(gh_g)
W_g = cla.dot(self._Y_gpu, gpueigs)
# Unitize W_g - could be done on gpu to allow async returning
W_g = W_g.get()
W_g = W_g / np.sqrt(np.sum(W_g**2, axis=0))[np.newaxis, :]
return W_g, ge_g, gh_g.T # Not sure whether the last one should be transposed
开发者ID:gcasey,项目名称:pyds,代码行数:13,代码来源:cudaframebuffer.py
示例8: test_dot_matrix_h_complex128
def test_dot_matrix_h_complex128(self):
a = np.asarray(np.random.rand(2, 4) + 1j * np.random.rand(2, 4), np.complex128)
b = np.asarray(np.random.rand(2, 2) + 1j * np.random.rand(2, 2), np.complex128)
a_gpu = gpuarray.to_gpu(a)
b_gpu = gpuarray.to_gpu(b)
c_gpu = linalg.dot(a_gpu, b_gpu, "c")
assert np.allclose(np.dot(a.conj().T, b), c_gpu.get())
a = a.astype(np.complex128, order="F", copy=True)
b = b.astype(np.complex128, order="F", copy=True)
a_gpu = gpuarray.to_gpu(a)
b_gpu = gpuarray.to_gpu(b)
c_gpu = linalg.dot(a_gpu, b_gpu, "c")
assert np.allclose(np.dot(a.conj().T, b), c_gpu.get())
开发者ID:zhonghai2810,项目名称:scikits.cuda,代码行数:13,代码来源:test_linalg.py
示例9: test_dot_vector_complex128
def test_dot_vector_complex128(self):
a = np.asarray(np.random.rand(5), np.complex128)
b = np.asarray(np.random.rand(5), np.complex128)
a_gpu = gpuarray.to_gpu(a)
b_gpu = gpuarray.to_gpu(b)
c = linalg.dot(a_gpu, b_gpu)
assert np.allclose(np.dot(a, b), c)
a = a.astype(np.complex128, order="F", copy=True)
b = b.astype(np.complex128, order="F", copy=True)
a_gpu = gpuarray.to_gpu(a)
b_gpu = gpuarray.to_gpu(b)
c = linalg.dot(a_gpu, b_gpu)
assert np.allclose(np.dot(a, b), c)
开发者ID:zhonghai2810,项目名称:scikits.cuda,代码行数:13,代码来源:test_linalg.py
示例10: backprop
def backprop(self, input_data, targets,
cache=None):
""" Backpropagate through the logistic layer.
**Parameters:**
input_data : ``GPUArray``
Inpute data to compute activations for.
targets : ``GPUArray``
The target values of the units.
cache : list of ``GPUArray``
Cache obtained from forward pass. If the cache is
provided, then the activations are not recalculated.
**Returns:**
gradients : tuple of ``GPUArray``
Gradients with respect to the weights and biases in the
form ``(df_weights, df_biases)``.
df_input : ``GPUArray``
Gradients with respect to the input.
"""
if cache is not None:
activations = cache
else:
activations = self.feed_forward(input_data, prediction=False)
delta = activations - targets
nan_to_zeros(delta, delta)
# Gradient wrt weights
df_W = linalg.dot(input_data, delta, transa='T')
# Gradient wrt bias
df_b = matrix_sum_out_axis(delta, 0)
# Gradient wrt input
df_input = linalg.dot(delta, self.W, transb='T')
# L1 penalty
if self.l1_penalty_weight:
df_W -= self.l1_penalty_weight * sign(self.W)
# L2 penalty
if self.l2_penalty_weight:
df_W -= self.l2_penalty_weight * self.W
return (df_W, df_b), df_input
开发者ID:DamonAnderson,项目名称:hebel,代码行数:51,代码来源:logistic_layer.py
示例11: backprop
def backprop(self, input_data, df_output, cache=None):
""" Backpropagate through the hidden layer
Inputs:
input_data
df_output: the gradient wrt the output units
cache (optional): cache object from the forward pass
Output:
df_W: gradient wrt the weights
df_b: gradient wrt the bias
df_input: gradient wrt the input
"""
# Get cache if it wasn't provided
if cache is None:
cache = self.feed_forward(input_data,
prediction=False)
if len(cache) == 2:
activations, dropout_mask = cache
else:
activations = cache[0]
# Multiply the binary mask with the incoming gradients
if self.dropout and dropout_mask is not None:
apply_dropout_mask(df_output, dropout_mask)
# Get gradient wrt activation function
df_activations = self.df(activations)
delta = df_activations * df_output
# Gradient wrt weights
df_W = linalg.dot(input_data, delta, transa='T')
# Gradient wrt bias
df_b = matrix_sum_out_axis(delta, 0)
# Gradient wrt inputs
df_input = linalg.dot(delta, self.W, transb='T')
# L1 weight decay
if self.l1_penalty_weight:
df_W -= self.l1_penalty_weight * sign(self.W)
# L2 weight decay
if self.l2_penalty_weight:
df_W -= self.l2_penalty_weight * self.W
return (df_W, df_b), df_input
开发者ID:dreamfrog,项目名称:hebel,代码行数:49,代码来源:hidden_layer.py
示例12: feed_forward
def feed_forward(self, input_data, prediction=False):
"""Propagate forward through the layer
**Parameters:**
input_data : ``GPUArray``
Inpute data to compute activations for.
prediction : bool, optional
Whether to use prediction model. Only relevant when using
dropout. If true, then weights are halved if the layers
uses dropout.
**Returns:**
activations : ``GPUArray``
The activations of the hidden units.
"""
activations = linalg.dot(input_data, self.W)
activations = add_vec_to_mat(activations, self.b, inplace=True)
self.f(activations)
if self.dropout and prediction:
activations *= .5
if self.dropout and not prediction:
dropout_mask = sample_dropout_mask(activations)
return activations, dropout_mask
return (activations,)
开发者ID:DamonAnderson,项目名称:hebel,代码行数:32,代码来源:hidden_layer.py
示例13: thunk
def thunk():
x = inputs[0]
y = inputs[1]
# chop off the real/imag dimension
input_shape_x = x[0].shape # (a, b, 2)
input_shape_y = y[0].shape # (b, c, 2)
output_shape = (input_shape_x[0], input_shape_y[1], 2) # (a, c, 2)
input_x_pycuda = to_complex_gpuarray(x[0])
input_y_pycuda = to_complex_gpuarray(y[0])
# multistream experiment
# print "DEBUG: Setting stream to %d" % current_stream[0]
# prev_stream_obj = stream_pool[(current_stream[0] - 1) % num_streams]
# print "PREV STREAM IS DONE?"
# print prev_stream_obj.is_done()
# print
stream_obj = stream_pool[current_stream[0]]
cublas.cublasSetStream(handle[0], stream_obj.handle)
current_stream[0] += 1
current_stream[0] %= num_streams
# print "DEBUG: set next stream id to %d" % current_stream[0]
output_pycuda = linalg.dot(input_x_pycuda, input_y_pycuda, handle=handle[0])
outputs[0][0] = to_complex_cudandarray(output_pycuda)
开发者ID:HarveyLiuFly,项目名称:theano_fftconv,代码行数:30,代码来源:cufftop.py
示例14: test_dot_matrix_h_complex128
def test_dot_matrix_h_complex128(self):
a = np.asarray(np.random.rand(2, 4)+1j*np.random.rand(2, 4), np.complex128)
b = np.asarray(np.random.rand(2, 2)+1j*np.random.rand(2, 2), np.complex128)
a_gpu = gpuarray.to_gpu(a)
b_gpu = gpuarray.to_gpu(b)
c_gpu = linalg.dot(a_gpu, b_gpu, 'c')
assert np.allclose(np.dot(a.conj().T, b), c_gpu.get())
开发者ID:Lurkman,项目名称:scikits.cuda,代码行数:7,代码来源:test_linalg.py
示例15: test_dot_vector_float64
def test_dot_vector_float64(self):
a = np.asarray(np.random.rand(5), np.float64)
b = np.asarray(np.random.rand(5), np.float64)
a_gpu = gpuarray.to_gpu(a)
b_gpu = gpuarray.to_gpu(b)
c = linalg.dot(a_gpu, b_gpu)
assert np.allclose(np.dot(a, b), c)
开发者ID:Lurkman,项目名称:scikits.cuda,代码行数:7,代码来源:test_linalg.py
示例16: test_dot_vector_complex128
def test_dot_vector_complex128(self):
a = np.asarray(np.random.rand(5), np.complex128)
b = np.asarray(np.random.rand(5), np.complex128)
a_gpu = gpuarray.to_gpu(a)
b_gpu = gpuarray.to_gpu(b)
c = linalg.dot(a_gpu, b_gpu)
assert np.allclose(np.dot(a, b), c)
开发者ID:Lurkman,项目名称:scikits.cuda,代码行数:7,代码来源:test_linalg.py
示例17: test_dot_matrix_float32
def test_dot_matrix_float32(self):
a = np.asarray(np.random.rand(4, 2), np.float32)
b = np.asarray(np.random.rand(2, 2), np.float32)
a_gpu = gpuarray.to_gpu(a)
b_gpu = gpuarray.to_gpu(b)
c_gpu = linalg.dot(a_gpu, b_gpu)
assert np.allclose(np.dot(a, b), c_gpu.get())
开发者ID:Lurkman,项目名称:scikits.cuda,代码行数:7,代码来源:test_linalg.py
示例18: feed_forward
def feed_forward(self, input_data, prediction=False):
""" Propagate forward through the hidden layer.
Inputs:
input_data -- input from the previous layer
prediction -- (bool) whether predicting or training
Outputs:
lin_activations
activations
If self.dropout = True and prediction=False:
Output:
lin_activations
activations
dropout_mask: binary mask of dropped units
"""
activations = linalg.dot(input_data, self.W)
activations = add_vec_to_mat(activations, self.b, inplace=True)
self.f(activations)
if self.dropout and prediction:
activations *= .5
if self.dropout and not prediction:
dropout_mask = sample_dropout_mask(activations)
return activations, dropout_mask
return (activations,)
开发者ID:dreamfrog,项目名称:hebel,代码行数:31,代码来源:hidden_layer.py
示例19: test_dot_matrix_t_complex64
def test_dot_matrix_t_complex64(self):
a = np.asarray(np.random.rand(2, 4), np.complex64)
b = np.asarray(np.random.rand(2, 2), np.complex64)
a_gpu = gpuarray.to_gpu(a)
b_gpu = gpuarray.to_gpu(b)
c_gpu = linalg.dot(a_gpu, b_gpu, 't')
assert np.allclose(np.dot(a.T, b), c_gpu.get())
开发者ID:Lurkman,项目名称:scikits.cuda,代码行数:7,代码来源:test_linalg.py
示例20: calc_x_G
def calc_x_G(Kp1, C, Cm1, rp1, lm2, Am1, A, Ap1, lm1_s, lm1_si, r_s, r_si, Vsh, handle=None):
D = A[0].shape[1]
Dm1 = A[0].shape[0]
q = len(A)
x = garr.zeros((Dm1, q * D - Dm1), dtype=A[0].dtype)
x_part = garr.empty_like(x)
x_subpart = garr.empty_like(A[0])
if not (C is None and Kp1 is None):
assert (not C is None) and (not Kp1 is None)
x_part.fill(0)
for s in range(q):
x_subpart = eps_r(rp1, C[s], Ap1, x_subpart, handle) #~1st line
x_subpart += cla.dot(A[s], Kp1, handle=handle) #~3rd line
x_part += cla.dot(cla.dot(x_subpart, r_si, handle=handle), Vsh[s], handle=handle)
x += cla.dot(lm1_s, x_part, handle=handle)
if not lm2 is None:
x_part.fill(0)
for s in range(q): #~2nd line
x_subpart = eps_l(lm2, Am1, Cm1[s], x_subpart, handle)
x_part += cla.dot(x_subpart, cla.dot(r_s, Vsh[s], handle=handle), handle=handle)
x += cla.dot(lm1_si, x_part, handle=handle)
return x
开发者ID:amilsted,项目名称:evoMPS,代码行数:29,代码来源:cuda_alternatives.py
注:本文中的scikits.cuda.linalg.dot函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论