本文整理汇总了Python中theano.sandbox.cuda.gpu_from_host函数的典型用法代码示例。如果您正苦于以下问题:Python gpu_from_host函数的具体用法?Python gpu_from_host怎么用?Python gpu_from_host使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了gpu_from_host函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_reject_rect
def test_reject_rect():
for cls in (FilterActs, ImageActs):
# Tests that running FilterActs with a non-square
# kernel is an error
rng = np.random.RandomState([2012, 10, 9])
batch_size = 5
rows = 10
cols = 9
channels = 3
filter_rows = 4
filter_cols = filter_rows + 1
num_filters = 6
images = shared(rng.uniform(-1., 1., (channels, rows, cols,
batch_size)).astype('float32'), name='images')
filters = shared(rng.uniform(-1., 1., (channels, filter_rows,
filter_cols, num_filters)).astype('float32'), name='filters')
gpu_images = gpu_from_host(images)
gpu_filters = gpu_from_host(filters)
if cls is ImageActs:
output = cls()(gpu_images, gpu_filters,
as_tensor_variable((rows, cols)))
else:
output = cls()(gpu_images, gpu_filters)
f = function([], output)
try:
output = f()
except ValueError:
continue
assert False
开发者ID:Alienfeel,项目名称:pylearn2,代码行数:33,代码来源:test_common.py
示例2: local_to_gpu
def local_to_gpu(node):
"""
op(host_from_gpu()) -> host_from_gpu(op)
gpu_from_host(op) -> op(gpu_from_host)
"""
if isinstance(node.op, op):
#op(host_from_gpu()) -> host_from_gpu(op)
#If any of the input that go on the GPU are on the GPU,
#move the op to the gpu.
if any(node.inputs[idx].owner and
isinstance(node.inputs[idx].owner.op, cuda.HostFromGpu)
for idx in to_gpu):
new_inp = list(node.inputs)
for idx in to_gpu:
new_inp[idx] = cuda.gpu_from_host(new_inp[idx])
return [cuda.host_from_gpu(op()(*new_inp))]
if node.op == cuda.gpu_from_host:
#gpu_from_host(op) -> op(gpu_from_host)
host_input = node.inputs[0]
if host_input.owner and isinstance(host_input.owner.op,
op):
op_node = host_input.owner
new_inp = list(op_node.inputs)
for idx in to_gpu:
new_inp[idx] = cuda.gpu_from_host(new_inp[idx])
return [op()(*new_inp)]
return False
开发者ID:sordonia,项目名称:Theano,代码行数:27,代码来源:conv3d2d.py
示例3: local_to_gpu
def local_to_gpu(node):
"""
op(host_from_gpu()) -> host_from_gpu(op)
gpu_from_host(op) -> op(gpu_from_host)
"""
if isinstance(node.op, op):
# op(host_from_gpu()) -> host_from_gpu(op)
# If any of the input that go on the GPU are on the GPU,
# move the op to the gpu.
if any(node.inputs[idx].owner and
isinstance(node.inputs[idx].owner.op, cuda.HostFromGpu)
for idx in to_gpu):
new_inp = list(node.inputs)
for idx in to_gpu:
new_inp[idx] = cuda.gpu_from_host(new_inp[idx])
result_node = op()(*new_inp)
copy_stack_trace(node.outputs[0], result_node)
transfer_node = result_node.transfer('cpu')
copy_stack_trace(node.outputs[0], transfer_node)
return [transfer_node]
if node.op == cuda.gpu_from_host:
# gpu_from_host(op) -> op(gpu_from_host)
host_input = node.inputs[0]
if host_input.owner and isinstance(host_input.owner.op,
op):
op_node = host_input.owner
new_inp = list(op_node.inputs)
for idx in to_gpu:
new_inp[idx] = cuda.gpu_from_host(new_inp[idx])
new_node = op()(*new_inp)
copy_stack_trace(host_input, new_node)
return [new_node]
return False
开发者ID:julianser,项目名称:Theano,代码行数:34,代码来源:conv3d2d.py
示例4: test_gpu_opt
def test_gpu_opt():
if not cuda.cuda_available:
# Skip test if cuda_ndarray is not available.
from nose.plugins.skip import SkipTest
raise SkipTest('Optional package cuda not available')
# We test the case where we put the op on the gpu when the output
# is moved to the gpu.
p = tensor.fmatrix()
u = tensor.fvector()
m = multinomial.MultinomialFromUniform('auto')(p, u)
assert m.dtype == 'float32', m.dtype
m_gpu = cuda.gpu_from_host(m)
f = function([p, u], m_gpu, allow_input_downcast=True, mode=get_mode(True))
assert any([type(node.op) is multinomial.GpuMultinomialFromUniform
for node in f.maker.fgraph.toposort()])
pval = numpy.arange(10000 * 4, dtype='float32').reshape((10000, 4))+0.1
pval = pval / pval.sum(axis=1)[:, None]
uval = numpy.ones_like(pval[:, 0]) * 0.5
mval = f(pval, uval)
# Test with a row, it was failing in the past.
r = tensor.frow()
m = multinomial.MultinomialFromUniform('auto')(r, u)
assert m.dtype == 'float32', m.dtype
m_gpu = cuda.gpu_from_host(m)
f = function([r, u], m_gpu, allow_input_downcast=True, mode=get_mode(True))
assert any([type(node.op) is multinomial.GpuMultinomialFromUniform
for node in f.maker.fgraph.toposort()])
pval = numpy.arange(1 * 4, dtype='float32').reshape((1, 4))+0.1
pval = pval / pval.sum(axis=1)[:, None]
uval = numpy.ones_like(pval[:, 0]) * 0.5
mval2 = f(pval, uval)
开发者ID:Jackwangyang,项目名称:Theano,代码行数:35,代码来源:test_multinomial.py
示例5: test_alloc_memset_0
def test_alloc_memset_0():
i = tensor.iscalar()
z = numpy.zeros((1,), dtype='float32')
o = numpy.ones((1,), dtype='float32')
ones = numpy.ones((2,), dtype='float32')
# Test with 0
a = basic_ops.gpu_alloc(cuda.gpu_from_host(tensor.constant(z)), i)
f = theano.function([i], a, mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, basic_ops.GpuAlloc) and topo[0].op.memset_0
assert (numpy.asarray(f(6)) == 0).all()
# Test with 1
a = basic_ops.gpu_alloc(cuda.gpu_from_host(tensor.constant(o)), i)
f = theano.function([i], a, mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, basic_ops.GpuAlloc)
assert not topo[0].op.memset_0
assert (numpy.asarray(f(6)) == 1).all()
# Test with 1, 1
a = basic_ops.gpu_alloc(cuda.gpu_from_host(tensor.constant(ones)), i)
f = theano.function([i], a, mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, basic_ops.GpuAlloc)
assert not topo[0].op.memset_0
assert (numpy.asarray(f(2)) == 1).all()
开发者ID:Abioy,项目名称:Theano,代码行数:31,代码来源:test_opt.py
示例6: test_reject_bad_filt_number
def test_reject_bad_filt_number():
for cls in (FilterActs, ImageActs):
# Tests that running FilterActs with a # of filters per
# group that is not 16 is an error
rng = np.random.RandomState([2012, 10, 9])
batch_size = 5
rows = 10
cols = 9
channels = 3
filter_rows = 4
filter_cols = filter_rows
num_filters = 6
images = shared(rng.uniform(-1., 1., (channels, rows, cols,
batch_size)).astype('float32'), name='images')
filters = shared(rng.uniform(-1., 1., (channels, filter_rows,
filter_cols, num_filters)).astype('float32'), name='filters')
gpu_images = gpu_from_host(images)
gpu_filters = gpu_from_host(filters)
output = cls()(gpu_images, gpu_filters)
f = function([], output)
try:
output = f()
except ValueError:
continue
assert False
开发者ID:casperkaae,项目名称:pylearn2,代码行数:28,代码来源:test_common.py
示例7: test_grad
def test_grad():
rng = np.random.RandomState([2012, 10, 9])
batch_size = 5
rows = 10
cols = 9
channels = 3
filter_rows = 4
filter_cols = filter_rows
num_filters = 16
images = shared(rng.uniform(-1.0, 1.0, (channels, rows, cols, batch_size)).astype("float32"), name="images")
filters = shared(
rng.uniform(-1.0, 1.0, (channels, filter_rows, filter_cols, num_filters)).astype("float32"), name="filters"
)
gpu_images = gpu_from_host(images)
gpu_filters = gpu_from_host(filters)
output = FilterActs()(gpu_images, gpu_filters)
output = host_from_gpu(output)
# XXX: use verify_grad
output_grad = grad(output.sum(), images)
images_bc01 = images.dimshuffle(3, 0, 1, 2)
filters_bc01 = filters.dimshuffle(3, 0, 1, 2)
filters_bc01 = filters_bc01[:, :, ::-1, ::-1]
output_conv2d = conv2d(images_bc01, filters_bc01, border_mode="valid")
output_conv2d = output_conv2d.dimshuffle(1, 2, 3, 0)
# XXX: use verify_grad
output_conv2d_grad = grad(output_conv2d.sum(), images)
f = function([], [output_grad, output_conv2d_grad])
output_grad, output_conv2d_grad = f()
warnings.warn(
"""test_match_valid_conv success criterion is not very strict. Can we verify that this is OK?
One possibility is that theano is numerically unstable and Alex's code is better.
Probably theano CPU 64 bit is OK but it's worth checking the others."""
)
if np.abs(output_grad - output_conv2d_grad).max() > 7.7e-6:
assert type(output_grad) == type(output_conv2d_grad)
assert output_grad.dtype == output_conv2d_grad.dtype
if output_grad.shape != output_conv2d_grad.shape:
print "cuda-convnet shape: ", output_grad.shape
print "theano shape: ", output_conv2d_grad.shape
assert False
err = np.abs(output_grad - output_conv2d_grad)
print "absolute error range: ", (err.min(), err.max())
print "mean absolute error: ", err.mean()
print "cuda-convnet value range: ", (output_grad.min(), output_grad.max())
print "theano value range: ", (output_conv2d_grad.min(), output_conv2d_grad.max())
assert False
开发者ID:gbcolborne,项目名称:pylearn2,代码行数:56,代码来源:test_filter_acts.py
示例8: test_match_valid_conv_strided
def test_match_valid_conv_strided():
# Tests that running FilterActs with stride is the same as running
# theano's conv2D in valid mode and then downsampling
rng = np.random.RandomState([2012,10,9])
batch_size = 5
rows = 9
cols = 9
channels = 3
filter_rows = 3
filter_cols = filter_rows
stride = 3
num_filters = 16
images = shared(rng.uniform(-1., 1., (channels, rows, cols,
batch_size)).astype('float32'), name='images')
filters = shared(rng.uniform(-1., 1., (channels, filter_rows,
filter_cols, num_filters)).astype('float32'), name='filters')
gpu_images = gpu_from_host(images)
gpu_filters = gpu_from_host(filters)
output = FilterActs(stride=stride)(gpu_images, gpu_filters)
output = host_from_gpu(output)
images_bc01 = images.dimshuffle(3,0,1,2)
filters_bc01 = filters.dimshuffle(3,0,1,2)
filters_bc01 = filters_bc01[:,:,::-1,::-1]
output_conv2d = conv2d(images_bc01, filters_bc01,
border_mode='valid', subsample=(stride, stride))
output_conv2d_orig = output_conv2d.dimshuffle(1,2,3,0)
output_conv2d = output_conv2d_orig # [:, ::stride, ::stride, :]
f = function([], [output, output_conv2d, output_conv2d_orig])
output, output_conv2d, output_conv2d_orig = f()
warnings.warn("""test_match_valid_conv success criterion is not very strict. Can we verify that this is OK?
One possibility is that theano is numerically unstable and Alex's code is better.
Probably theano CPU 64 bit is OK but it's worth checking the others.""")
if np.abs(output - output_conv2d).max() > 2.4e-6:
assert type(output) == type(output_conv2d)
assert output.dtype == output_conv2d.dtype
if output.shape != output_conv2d.shape:
print 'cuda-convnet shape: ',output.shape
print 'theano shape: ',output_conv2d.shape
assert False
err = np.abs(output - output_conv2d)
print 'absolute error range: ', (err.min(), err.max())
print 'mean absolute error: ', err.mean()
print 'cuda-convnet value range: ', (output.min(), output.max())
print 'theano value range: ', (output_conv2d.min(), output_conv2d.max())
assert False
开发者ID:Alienfeel,项目名称:pylearn2,代码行数:56,代码来源:test_filter_acts.py
示例9: test_match_valid_conv
def test_match_valid_conv():
# Tests that running FilterActs with no padding is the same as running
# theano's conv2D in valid mode
rng = np.random.RandomState([2012,10,9])
batch_size = 5
rows = 10
cols = 9
channels = 3
filter_rows = 4
filter_cols = filter_rows
num_filters = 16
images = shared(rng.uniform(-1., 1., (channels, rows, cols,
batch_size)).astype('float32'), name='images')
filters = shared(rng.uniform(-1., 1., (channels, filter_rows,
filter_cols, num_filters)).astype('float32'), name='filters')
gpu_images = gpu_from_host(images)
gpu_filters = gpu_from_host(filters)
output = FilterActs()(gpu_images, gpu_filters)
output = host_from_gpu(output)
images_bc01 = images.dimshuffle(3,0,1,2)
filters_bc01 = filters.dimshuffle(3,0,1,2)
filters_bc01 = filters_bc01[:,:,::-1,::-1]
output_conv2d = conv2d(images_bc01, filters_bc01,
border_mode='valid')
output_conv2d = output_conv2d.dimshuffle(1,2,3,0)
try:
f = function([], [output, output_conv2d])
except:
raise KnownFailureTest("cuda-convnet code depends on an unmerged theano feature.")
output, output_conv2d = f()
warnings.warn("test_match_valid_conv success criterion is not very strict. Can we verify that this is OK?")
if np.abs(output - output_conv2d).max() > 2.4e-6:
assert type(output) == type(output_conv2d)
assert output.dtype == output_conv2d.dtype
if output.shape != output_conv2d.shape:
print 'cuda-convnet shape: ',output.shape
print 'theano shape: ',output_conv2d.shape
assert False
err = np.abs(output - output_conv2d)
print 'absolute error range: ', (err.min(), err.max())
print 'mean absolute error: ', err.mean()
print 'cuda-convnet value range: ', (output.min(), output.max())
print 'theano value range: ', (output_conv2d.min(), output_conv2d.max())
assert False
开发者ID:deigen,项目名称:pylearn,代码行数:56,代码来源:test_filter_acts.py
示例10: insert_gpu_filter_acts
def insert_gpu_filter_acts(node):
if isinstance(node.op, FilterActs):
images, filters = node.inputs
if any_from_gpu(images, filters) or any_gpu_client(*node.outputs):
gpu_filter_acts = GpuFilterActs(
module_stride=node.op.module_stride,
partial_sum=1)
return [host_from_gpu(gpu_filter_acts(
gpu_from_host(images),
gpu_from_host(filters)))]
开发者ID:Alienfeel,项目名称:pylearn2,代码行数:10,代码来源:gpu_unshared_conv.py
示例11: insert_gpu_weight_acts
def insert_gpu_weight_acts(node):
if isinstance(node.op, WeightActs):
images, hidacts, frows, fcols = node.inputs
if any_from_gpu(images, hidacts) or any_gpu_client(*node.outputs):
gpu_weight_acts = GpuWeightActs(
module_stride=node.op.module_stride,
partial_sum=1)
return [host_from_gpu(gpu_weight_acts(
gpu_from_host(images),
gpu_from_host(hidacts),
frows,
fcols,
))]
开发者ID:Alienfeel,项目名称:pylearn2,代码行数:13,代码来源:gpu_unshared_conv.py
示例12: insert_gpu_img_acts
def insert_gpu_img_acts(node):
if isinstance(node.op, ImgActs):
filters, hidacts, irows, icols = node.inputs
if any_from_gpu(filters, hidacts) or any_gpu_client(*node.outputs):
gpu_img_acts = GpuImgActs(
module_stride=node.op.module_stride,
partial_sum=1)
return [host_from_gpu(gpu_img_acts(
gpu_from_host(filters),
gpu_from_host(hidacts),
irows,
icols,
))]
开发者ID:Alienfeel,项目名称:pylearn2,代码行数:13,代码来源:gpu_unshared_conv.py
示例13: traverse
def traverse(out, x, x_copy, d, visited=None):
''' Function used by scan to parse the tree and figure out which nodes
it needs to replace. There are two options :
1) x and x_copy or on host, then you would replace x with x_copy
2) x is on gpu, x_copy on host, then you need to replace
host_from_gpu(x) with x_copy
This happens because initially shared variables are on GPU .. which is
fine for the main computational graph but confuses things a bit for the
inner graph of scan '''
# ``visited`` is a set of nodes that are already known and don't need to be
# checked again, speeding up the traversal of multiply-connected graphs.
# if a ``visited`` set is given, it will be updated in-place so the callee
# knows which nodes we have seen.
if visited is None:
visited = set()
if out in visited:
return d
visited.add(out)
import theano.sandbox.cuda as cuda
if out == x:
d[out] = cuda.gpu_from_host(x_copy)
return d
elif out.owner is None:
return d
elif (cuda.cuda_available and
out.owner.op == cuda.host_from_gpu and
out.owner.inputs == [x]):
d[out] = tensor.as_tensor_variable(x_copy)
return d
else:
for inp in out.owner.inputs:
d = traverse(inp, x, x_copy, d, visited)
return d
开发者ID:Yangqing,项目名称:Theano,代码行数:33,代码来源:scan_utils.py
示例14: lmul
def lmul(self, x):
"""
dot(x, A)
aka, do convolution with input image x
"""
check_cuda(str(type(self)) + ".lmul")
# TODO Why is it CPU??
print "Por que?!?!", type(x)
cpu = "Cuda" not in str(type(x))
if cpu:
x = gpu_from_host(x)
assert x.ndim == 5
x_axes = self.input_axes
assert len(x_axes) == 5
op_axes = ("c", 0, 1, "t", "b")
if tuple(x_axes) != op_axes:
print "ssssssssssssssss"
x = x.dimshuffle(*[x_axes.index(axis) for axis in op_axes])
_x_4d_shape = (
self.signal_shape[0],
self.signal_shape[1],
self.signal_shape[2],
self.signal_shape[3] * self.signal_shape[4],
)
x = x.reshape(_x_4d_shape)
x = gpu_contiguous(x)
rval = FilterActs(self.pad, self.partial_sum, self.kernel_stride[0])(x, self._filters)
if cpu:
rval = host_from_gpu(rval)
rval = rval.reshape(
(
self.filter_shape[3],
self.filter_shape[4],
rval.shape[1],
rval.shape[2],
self.signal_shape[3],
self.signal_shape[4],
)
)
rval = diagonal_subtensor(rval, 4, 0).sum(axis=0)
# Format the output based on the output space
rval_axes = self.output_axes
assert len(rval_axes) == 5
if tuple(rval_axes) != op_axes:
rval = rval.dimshuffle(*[op_axes.index(axis) for axis in rval_axes])
return rval
开发者ID:YangXS,项目名称:lisa_emotiw,代码行数:60,代码来源:conv3d_c01tb.py
示例15: lmul
def lmul(self, x):
"""
dot(x, A)
aka, do convolution with input image x
"""
check_cuda(str(type(self)) + ".lmul")
cpu = 'Cuda' not in str(type(x))
#assert cpu
if cpu:
x = gpu_from_host(x)
assert x.ndim == 5
x_axes = self.input_axes
assert len(x_axes) == 5
#x = shapeprint(x)
op_axes = ('b', 'c', 0, 1, 't')
print x_axes, op_axes
#if tuple(x_axes) != op_axes:
# x = x.dimshuffle(*[x_axes.index(axis) for axis in op_axes])
#x = shapeprint(x)
#self._filters = shapeprint(self._filters)
rval = cuda.blas.GpuCorr3dMM(border_mode= 'valid',
subsample = tuple(self.kernel_stride),
pad=tuple(self.pad))(x, self._filters)
#rval = conv3d(im, filt, None, None, (self.kernel_stride[0], self.kernel_stride[1]) )
#rval = rval.dimshuffle(0,4,1,2,3)
#print "hello"
return rval
开发者ID:AtousaTorabi,项目名称:HumanActivityRecognition,代码行数:33,代码来源:conv3d_btc01new3.py
示例16: lmul
def lmul(self, x):
"""
dot(x, A)
aka, do convolution with input image x
"""
check_cuda(str(type(self)) + ".lmul")
cpu = 'Cuda' not in str(type(x))
assert cpu
if cpu:
x = gpu_from_host(x)
assert x.ndim == 5
x_axes = self.input_axes
assert len(x_axes) == 5
#x = shapeprint(x)
op_axes = ('b', 0, 1, 't', 'c')
print x_axes, op_axes
if tuple(x_axes) != op_axes:
x = x.dimshuffle(*[x_axes.index(axis) for axis in op_axes])
#x = shapeprint(x)
#self._filters = shapeprint(self._filters)
rval = self.conv3d_op(x, self._filters, self.b, self.kernel_stride)
#assert len(rval_axes) == 5
#op_axes = self.output_axes
#if tuple(rval_axes) != op_axes:
# rval = rval.dimshuffle(*[op_axes.index(axis) for axis in rval_axes])
return rval
开发者ID:AtousaTorabi,项目名称:HumanActivityRecognition,代码行数:33,代码来源:conv3d_b01tc.py
示例17: local_gpu_fft_conv
def local_gpu_fft_conv(node):
"""
gpu_conv -> gpu_fft_conv_op
"""
if not isinstance(node.op, GpuConv):
return
if (node.op.border_mode=='full' and
node.op.subsample==(1,1)):
img, kern = node.inputs
img = gpu_contiguous(img)
kern = gpu_contiguous(kern)
gpu_fft_conv = GpuFFTConvOp(node.op.border_mode, check=node.op.verbose)
return [gpu_fft_conv(img,kern)]
if (config.GpuFFTConvOp.valid and
node.op.border_mode=='valid' and
node.op.subsample==(1,1) and
node.op.kshp and node.op.imshp):
kshp = node.op.kshp
ishp = node.op.imshp[1:]
pad_up = kshp[0]-1
pad_left = kshp[1]-1
size_height = ishp[0]-kshp[0]+1
size_width = ishp[1]-kshp[1]+1
img = gpu_contiguous(node.inputs[0])
kern = gpu_contiguous(node.inputs[1])
gpu_fft_conv = GpuFFTConvOp("full", check=node.op.verbose)(img,kern)[:,:,pad_up:pad_up+size_height,pad_left:pad_left+size_width]
gpu_fft_conv = cuda.gpu_from_host(gpu_fft_conv)
return [gpu_fft_conv]
开发者ID:lumberlabs,项目名称:fft_conv_op,代码行数:30,代码来源:fft_conv_op.py
示例18: lmul
def lmul(self, x):
"""
dot(x, A)
aka, do convolution with input image x
"""
check_cuda(str(type(self)) + ".lmul")
cpu = 'Cuda' not in str(type(x))
assert cpu
if cpu:
x = gpu_from_host(x)
assert x.ndim == 5
x_axes = self.input_axes
assert len(x_axes) == 5
#x = shapeprint(x)
op_axes = ('b', 0, 1, 't', 'c')
print x_axes, op_axes
if tuple(x_axes) != op_axes:
x = x.dimshuffle(*[x_axes.index(axis) for axis in op_axes])
#x = shapeprint(x)
#self._filters = shapeprint(self._filters)
im = x.dimshuffle(0,3,4,1,2)
filt = self._filters.dimshuffle(0,3,4,1,2)
rval = conv3d(im, filt, None, None, (self.kernel_stride[0], self.kernel_stride[1]) )
rval = rval.dimshuffle(0,3,4,1,2)
return rval
开发者ID:AtousaTorabi,项目名称:HumanActivityRecognition,代码行数:34,代码来源:conv3d_btc01new.py
示例19: test_gemv1
def test_gemv1(self):
''' test vector1+dot(matrix,vector2) '''
v1 = theano.tensor._shared(numpy.array(numpy.random.rand(2),
dtype='float32'))
v2 = theano.tensor._shared(numpy.array(numpy.random.rand(5),
dtype='float32'))
m = theano.tensor._shared(numpy.array(numpy.random.rand(5, 2),
dtype='float32'))
no_gpu_f = theano.function([], v2 + theano.dot(m, v1),
mode=mode_without_gpu)
gpu_f = theano.function([], v2 + theano.dot(m, v1), mode=mode_with_gpu)
#gpu_f2 is needed to test the case when the input is not on the gpu
#but the output is moved to the gpu.
gpu_f2 = theano.function([], tcn.gpu_from_host(v2 + theano.dot(m, v1)),
mode=mode_with_gpu)
# Assert they produce the same output
assert numpy.allclose(no_gpu_f(), gpu_f(), atol=self.atol)
assert numpy.allclose(no_gpu_f(), gpu_f2(), atol=self.atol)
# Assert that the gpu version actually uses gpu
assert sum([node.op is gpu_gemv_inplace for node in
gpu_f2.maker.fgraph.toposort()]) == 1
assert sum([node.op is gpu_gemv_inplace for node in
gpu_f.maker.fgraph.toposort()]) == 1
开发者ID:gyenney,项目名称:Tools,代码行数:25,代码来源:test_blas.py
示例20: test_dot_vm
def test_dot_vm(self):
''' Test vector dot matrix '''
v = theano.shared(numpy.array(numpy.random.rand(2), dtype='float32'))
m = theano.shared(numpy.array(numpy.random.rand(2, 5),
dtype='float32'))
no_gpu_f = theano.function([], theano.dot(v, m), mode=mode_without_gpu)
gpu_f = theano.function([], theano.dot(v, m), mode=mode_with_gpu)
#gpu_f2 is needed to test the case when the input is not on the gpu
#but the output is moved to the gpu.
gpu_f2 = theano.function([], tcn.gpu_from_host(theano.dot(v, m)),
mode=mode_with_gpu)
# Assert they produce the same output
assert numpy.allclose(no_gpu_f(), gpu_f(), atol=self.atol)
assert numpy.allclose(no_gpu_f(), gpu_f2(), atol=self.atol)
# Assert that the gpu version actually uses gpu
assert sum([node.op is gpu_gemv_inplace for node in
gpu_f.maker.fgraph.toposort()]) == 1
assert sum([node.op is gpu_gemv_inplace for node in
gpu_f2.maker.fgraph.toposort()]) == 1
# Check double-strided m
m.set_value(
m.get_value(borrow=True,
return_internal_type=True)[::-1, ::-1],
borrow=True)
assert numpy.allclose(no_gpu_f(), gpu_f(), atol=self.atol)
assert numpy.allclose(no_gpu_f(), gpu_f2(), atol=self.atol)
开发者ID:gyenney,项目名称:Tools,代码行数:28,代码来源:test_blas.py
注:本文中的theano.sandbox.cuda.gpu_from_host函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论