• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python basic_ops.gpu_from_host函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.sandbox.cuda.basic_ops.gpu_from_host函数的典型用法代码示例。如果您正苦于以下问题:Python gpu_from_host函数的具体用法?Python gpu_from_host怎么用?Python gpu_from_host使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了gpu_from_host函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: use_gpu_cumsum

def use_gpu_cumsum(node):
    if type(node.op) is CumOp \
       and node.inputs[0].dtype == 'float32' \
       and node.inputs[0].owner \
       and isinstance(node.inputs[0].owner.op, HostFromGpu):

        if node.op.mode != 'add':
            return None

        axis = node.op.axis
        x = node.inputs[0]

        if axis is not None and x.ndim > GpuCumsum.SUPPORTED_NDIMS:
            return None

        x = gpu_from_host(x)

        if axis is None and x.ndim > 1:
            x = gpu_flatten(x)

        # ``gpu_cumsum`` assume array has been flattened if needed.
        if axis is None:
            axis = 0

        ret = host_from_gpu(GpuCumsum(axis)(x))
        ret.tag.values_eq_approx = values_eq_approx_high_tol
        return [ret]
开发者ID:Faruk-Ahmed,项目名称:Theano,代码行数:27,代码来源:extra_ops.py


示例2: use_gpu_images2neibs

def use_gpu_images2neibs(node):
    if type(node.op) is Images2Neibs:
        return [
            host_from_gpu(
                gpu_images2neibs(gpu_from_host(node.inputs[0]), node.inputs[1], node.inputs[2], mode=node.op.mode)
            )
        ]
开发者ID:daien,项目名称:Theano,代码行数:7,代码来源:neighbours.py


示例3: use_gpu_images2neibs

def use_gpu_images2neibs(node):
    if (type(node.op) is Images2Neibs and
        node.inputs[0].dtype == 'float32' and
        node.op.mode in ['valid', 'wrap_centered']):
        return [host_from_gpu(gpu_images2neibs(gpu_from_host(node.inputs[0]),
                                               node.inputs[1], node.inputs[2],
                                               mode=node.op.mode))]
开发者ID:wqren,项目名称:Theano,代码行数:7,代码来源:neighbours.py


示例4: local_gpu_minres

 def local_gpu_minres(node):
     if isinstance(node.op, MinresQLP):
         sw = False
         for inp in node.inputs:
             if inp.owner and inp.owner.op == host_from_gpu:
                 sw = True
         if sw:
             inps = node.inputs
             nw_inps = []
             for inp in inps:
                 if not isinstance(inp.type, CudaNdarrayType):
                     nw_inps.append(gpu_from_host(inp))
                 else:
                     nw_inps.append(inp)
             new_op = node.op
             new_op.gpu = 1
             _new_outs = node.op(*nw_inps)
             new_outs = []
             for out in _new_outs:
                 if isinstance(out.type, CudaNdarrayType):
                     new_outs.append(host_from_gpu(out))
                 else:
                     new_outs.append(out)
             return new_outs
         else:
             return False
开发者ID:LeeEdel,项目名称:theano_optimize,代码行数:26,代码来源:minresQLP.py


示例5: __init__

    def __init__(self, **kwargs):
        
        self.num_layers = kwargs.get('num_layers', None)
        self.num_filters = kwargs.get('num_filters', None)
        self.filter_size = kwargs.get('filter_size', None)
        
        self.rng = kwargs.get('rng', np.random.RandomState(42))
        self.load_folder = kwargs.get('weights_folder', None)
        self.activation = kwargs.get('activation', 'relu')
        self.cost_func = kwargs.get('cost_func', 'MSE')  
        
        #Initialize (or load) the weights for the network
        if(self.load_folder == None):
            try:
                assert (self.num_layers != None) and (self.num_filters != None) and (self.filter_size != None)
                self.__define_network()
                self.__init_weights()
            except:
                print "ERROR: Insufficient parameters for generating new network"
                sys.exit(0)
        else:
            self.__load_weights()

        #Input and Target variables for symbolic representation of network
        self.X = T.tensor4('X')            
        
        #Create the network model
        self.__model()
        
        if(theano.config.device == 'cpu'):
            #Create a predicter based on this network model
            self.forward = theano.function(inputs=[self.X], outputs=self.out, allow_input_downcast=True)
        else:
            #Create a predicter based on this network model
            self.forward = theano.function(inputs=[self.X], outputs=Out(gpu_from_host(self.out), borrow=True), allow_input_downcast=True)
开发者ID:schurterb,项目名称:convnet,代码行数:35,代码来源:cnn.py


示例6: ctc_cost

def ctc_cost(acts, input_lengths, flat_labels, label_lengths):
  # This should be properly integrated into the theano optimization catalog.
  # Until then, this forces the choice based on device configuration.
  if theano.config.device.startswith("gpu") or theano.sandbox.cuda.cuda_enabled:
    if not isinstance(acts.type, CudaNdarrayType): # if not already on the device
      acts = gpu_from_host(acts)  # this should get optimized away
    return gpu_ctc_cost(acts, input_lengths, flat_labels, label_lengths)
  else:
    return cpu_ctc_cost(acts, input_lengths, flat_labels, label_lengths)
开发者ID:DingKe,项目名称:theano_ctc,代码行数:9,代码来源:__init__.py


示例7: local_gpu_argmax

def local_gpu_argmax(node):
    if type(node.op) is KArgmax:
        p, = node.inputs
        vals, indx, = node.outputs
        if (p.dtype == vals.dtype == 'float32' and
            any([i.owner and isinstance(i.owner.op, theano.sandbox.cuda.HostFromGpu) for i in node.inputs])):
            gpu_op = GpuKArgmax(node.op.K)
            ret_vals, ret_indx = gpu_op(gpu_from_host(p))
            return [host_from_gpu(ret_vals), T.cast(host_from_gpu(ret_indx), "int32")]
    if (isinstance(node.op, theano.sandbox.cuda.GpuFromHost) and
        node.inputs[0].owner and type(node.inputs[0].owner.op)
        is KArgmax):
        multi = node.inputs[0].owner
        p, = multi.inputs
        vals, indx, = multi.outputs
        if (p.dtype == vals.dtype == 'float32'):
            gpu_op = GpuKArgmax(node.inputs[0].owner.op.K)
            ret_vals, ret_indx = gpu_op(gpu_from_host(p)) 
            return [gpu_from_host(ret_vals), gpu_from_host(ret_indx)]
开发者ID:hydercps,项目名称:hred-qs,代码行数:19,代码来源:theano_extensions.py


示例8: local_assigner

def local_assigner(node):
    if type(node.op) is Assigner:
        p, indx, gr, = node.inputs
        vals, = node.outputs
        if (p.dtype == vals.dtype == 'float32' and
            any([i.owner and isinstance(i.owner.op, theano.sandbox.cuda.HostFromGpu) for i in node.inputs])):
            gpu_op = GpuAssigner()
            ret = gpu_op(gpu_from_host(p),indx,gpu_from_host(gr))
            return [host_from_gpu(ret),]
    if (isinstance(node.op, theano.sandbox.cuda.GpuFromHost) and
        node.inputs[0].owner and type(node.inputs[0].owner.op)
        is Assigner):
        multi = node.inputs[0].owner
        p,indx,gr = multi.inputs
        vals, = multi.outputs
        if (p.dtype == vals.dtype == 'float32'):
            gpu_op = GpuAssigner()
            ret_vals = gpu_op(gpu_from_host(p),indx,gpu_from_host(gr)) 
            return [gpu_from_host(ret_vals)]
开发者ID:hydercps,项目名称:hred-qs,代码行数:19,代码来源:theano_extensions.py


示例9: local_gpu_multinomial

def local_gpu_multinomial(node):
    if type(node.op) is MultinomialFromUniform:
        p, u = node.inputs
        m, = node.outputs
        if (p.dtype == u.dtype == m.dtype == 'float32' and
            any([i.owner and isinstance(i.owner.op, theano.sandbox.cuda.HostFromGpu)
                 for i in node.inputs])):
            gpu_op = GpuMultinomialFromUniform(node.op.odtype)
            return [host_from_gpu(gpu_op(*[gpu_from_host(i) for i in node.inputs])).T]
    if (isinstance(node.op, theano.sandbox.cuda.GpuFromHost) and
        node.inputs[0].owner and type(node.inputs[0].owner.op) is MultinomialFromUniform):
        multi = node.inputs[0].owner
        p, u = multi.inputs
        m, = multi.outputs
        if (p.dtype == u.dtype == m.dtype == 'float32'):
            gpu_op = GpuMultinomialFromUniform(multi.op.odtype)
            ret = gpu_op(*[gpu_from_host(i) for i in multi.inputs]).T
            # The dimshuffle is on the cpu, but will be moved to the gpu by an opt.
            return [gpu_from_host(ret)]
开发者ID:jmarinero,项目名称:Theano,代码行数:19,代码来源:multinomial.py


示例10: local_gpu_multinomial

def local_gpu_multinomial(node):
    # TODO : need description for function
    if type(node.op) is MultinomialFromUniform:
        if len(node.inputs) == 2:
            p, u = node.inputs
            n_samples = 1
        else:
            p, u, n_samples = node.inputs
        try:
            if get_scalar_constant_value(n_samples) != 1:
                return None
        except NotScalarConstantError:
            return None
        m, = node.outputs
        if (p.dtype == u.dtype == m.dtype == 'float32' and
            any([i.owner and isinstance(i.owner.op,
                                        theano.sandbox.cuda.HostFromGpu)
                 for i in node.inputs])):
            gpu_op = GpuMultinomialFromUniform(node.op.odtype)
            return [host_from_gpu(gpu_op(*[gpu_from_host(i)
                                           for i in [p, u]])).T]
    if (isinstance(node.op, theano.sandbox.cuda.GpuFromHost) and
            node.inputs[0].owner and
            type(node.inputs[0].owner.op) is MultinomialFromUniform):
        multi = node.inputs[0].owner
        if len(node.inputs) == 2:
            p, u = node.inputs
            n_samples = 1
        else:
            p, u, n_samples = node.inputs
        try:
            if get_scalar_constant_value(n_samples) != 1:
                return None
        except NotScalarConstantError:
            return None
        m, = multi.outputs
        if (p.dtype == u.dtype == m.dtype == 'float32'):
            gpu_op = GpuMultinomialFromUniform(multi.op.odtype)
            ret = gpu_op(*[gpu_from_host(i) for i in [p, u]]).T
            # The dimshuffle is on the cpu, but will be moved to the
            # gpu by an opt.
            return [gpu_from_host(ret)]
开发者ID:Faruk-Ahmed,项目名称:Theano,代码行数:42,代码来源:multinomial.py


示例11: use_gpu_images2neibs

def use_gpu_images2neibs(node):
    if (
        type(node.op) is Images2Neibs
        and node.inputs[0].dtype == "float32"
        and node.op.mode in ["valid", "ignore_borders", "wrap_centered"]
    ):
        return [
            host_from_gpu(
                gpu_images2neibs(gpu_from_host(node.inputs[0]), node.inputs[1], node.inputs[2], mode=node.op.mode)
            )
        ]
开发者ID:Jerryzcn,项目名称:Theano,代码行数:11,代码来源:neighbours.py


示例12: parse_args

    def parse_args(self, bottom, top):
        function_str = self.pythonargs[0]
        top_shape = self.pythonargs[1]

        old_function_str = self.function_str
        old_top_shape = self.top_shape
        self.function_str = function_str
        self.top_shape = top_shape
        if function_str != old_function_str or len(top_shape) != len(old_top_shape):
            if old_function_str != '':
                print('TheanoGPU function string different from cache: recompiling')
            import theano.tensor as T
            import theano
            from theano.sandbox.cuda.basic_ops import gpu_from_host
            x = []
            for i in range(len(bottom)):
                if len(bottom[i].shape) == 1:
                    x.append(T.vector('x%d' % i))
                if len(bottom[i].shape) == 2:
                    x.append(T.matrix('x%d' % i))
                if len(bottom[i].shape) == 3:
                    x.append(T.tensor3('x%d' % i))
                if len(bottom[i].shape) == 4:
                    x.append(T.tensor4('x%d' % i))

            y = eval(function_str)
            self.f = theano.function(x, gpu_from_host(y), on_unused_input='ignore')

            if len(self.top_shape) == 1:
                v = T.vector('v')
            elif len(self.top_shape) == 2:
                v = T.matrix('v')
            elif len(self.top_shape) == 3:
                v = T.tensor3('v')
            elif len(self.top_shape) == 4:
                v = T.tensor4('v')
            self.b = []
            for i in range(len(bottom)):
                yg = T.Lop(y, x[i], v)
                self.b.append(theano.function(x + [v], gpu_from_host(yg), on_unused_input='ignore'))
开发者ID:NoListen,项目名称:apollocaffe,代码行数:40,代码来源:python_layers.py


示例13: compileModel

def compileModel(data, nInputs, nOutputs, hiddenLayersSize = [1200, 1200], dropoutRates = [0.2, 0.5, 0.5],
                  activation = 'relu', weightInitMode = 'normal', regularizer = 0.0001):
    """
    Creates a symbolic model given the specified parameters using Theano
    
    Output:
    A list containing three the training, validation and test compiled functions of Theano
    """
    
    
    np.random.seed(815)
    
    x = T.matrix('x')
    y = T.wvector('y')
    learningRate = T.scalar('learningRate')
    regularization = T.scalar('regularization')
    
    #Data sets
    train_x, train_y = data[0]
    valid_x, valid_y = data[1]
    test_x, test_y = data[2]
    
    nnet = MLP(x, nInputs, hiddenLayersSize, nOutputs, dropoutRates = dropoutRates,
                activation = activation, weightInitMode = weightInitMode)
    
    loss = nnet.loss(y, regularization)
    error = nnet.error(y)
    
    gParams = T.grad(loss, nnet.params)
    
    weightUpdates = [(param, param - learningRate * gParam) for param, gParam in zip(nnet.params, gParams)]    
    
    
    batchIndicesVecctor = T.ivector('batchIndicesVecctor')
    trainF = function([batchIndicesVecctor, learningRate, regularization], Out(sbasic.gpu_from_host(loss), borrow = True), updates = weightUpdates, givens = {x: train_x[batchIndicesVecctor], y: train_y[batchIndicesVecctor]})
    validF = function([batchIndicesVecctor], Out(sbasic.gpu_from_host(T.cast(error, T.config.floatX)), borrow = True), givens = {x: valid_x[batchIndicesVecctor], y: valid_y[batchIndicesVecctor]})
    testF = function([batchIndicesVecctor], Out(sbasic.gpu_from_host(T.cast(error, T.config.floatX)), borrow = True), givens = {x: test_x[batchIndicesVecctor], y: test_y[batchIndicesVecctor]})
    
    return [trainF, validF, testF]
开发者ID:Amir-Arsalan,项目名称:Neural-Networks,代码行数:39,代码来源:NeuralNetworks.py


示例14: parse_args

    def parse_args(self, bottom, top):
        function_str = self.pythonargs[0]
        top_shape = self.pythonargs[1]

        if self.function_str != function_str or self.top_shape != top_shape:
            self.function_str = function_str
            self.top_shape = top_shape

            import theano.tensor as T
            import theano
            from theano.sandbox.cuda.basic_ops import gpu_from_host
            x = []
            for i in range(len(bottom)):
                if len(bottom[i].shape) == 1:
                    x.append(T.vector('x%d' % i))
                if len(bottom[i].shape) == 2:
                    x.append(T.matrix('x%d' % i))
                if len(bottom[i].shape) == 3:
                    x.append(T.tensor3('x%d' % i))
                if len(bottom[i].shape) == 4:
                    x.append(T.tensor4('x%d' % i))

            y = eval(function_str)
            self.f = theano.function(x, gpu_from_host(y), on_unused_input='ignore')

            if len(self.top_shape) == 1:
                v = T.vector('v')
            elif len(self.top_shape) == 2:
                v = T.matrix('v')
            elif len(self.top_shape) == 3:
                v = T.tensor3('v')
            elif len(self.top_shape) == 4:
                v = T.tensor4('v')
            self.b = []
            for i in range(len(bottom)):
                yg = T.Lop(y, x[i], v)
                self.b.append(theano.function(x + [v], gpu_from_host(yg), on_unused_input='ignore'))
开发者ID:ShuaiGitHub,项目名称:apollocaffe,代码行数:37,代码来源:python_layers.py


示例15: grad_step

        def grad_step(*args):

            idx = TT.cast(args[0], 'int32')
            nw_inps = [x[idx * options['cbs']: \
                         (idx + 1) * options['cbs']]
                       for x in loc_inputs]
            replace = dict(zip(model.inputs, nw_inps))
            nw_cost = safe_clone(model.train_cost, replace=replace)
            gs = TT.grad(nw_cost, model.params)
            nw_gs = [op + np for op, np in zip(args[2: 2 + n_params], gs)]
            _gs = [x for x in gs]
            _nw_gs = [gpu_from_host(g) for g in nw_gs]
            nw_gs = ifelse(comp_grad, _nw_gs, _gs, gpu=True)
            nw_gs = [x.type.filter_variable(y) for x,y in zip(args[2:],nw_gs)]
            return [args[0] + const(1), args[1] + nw_cost] + nw_gs
开发者ID:cc13ny,项目名称:galatea,代码行数:15,代码来源:natSGD_ls.py


示例16: local_gpu_forloop

 def local_gpu_forloop(node):
     if isinstance(node.op, forloop):
         sw = False
         for inp in node.inputs:
             if inp.owner and inp.owner.op == host_from_gpu:
                 sw = True
         if sw:
             inps = node.inputs
             nw_inps = []
             for inp in inps:
                 if not isinstance(inp.type, CudaNdarrayType):
                     nw_inps.append(gpu_from_host(inp))
                 else:
                     nw_inps.append(inp)
             new_outs = node.op(*nw_inps)
             return [host_from_gpu(x) for x in new_outs]
         else:
             return False
开发者ID:pascanur,项目名称:natgrad,代码行数:18,代码来源:utils.py


示例17: test_gpualloc_output_to_gpu

def test_gpualloc_output_to_gpu():
    a_val = numpy.asarray(numpy.random.rand(4,5),dtype='float32')
    a = tcn.shared_constructor(a_val)

    b = T.fscalar()
    f = theano.function([b], T.ones_like(a)+b, mode=mode_without_gpu)
    f_gpu = theano.function([b], B.gpu_from_host(T.ones_like(a))+b, mode=mode_with_gpu)

    print f.maker.env.toposort()
    print f_gpu.maker.env.toposort()
    print f(2)
    print f_gpu(2)

    assert sum([node.op == T.alloc for node in f.maker.env.toposort()])==1
    assert sum([node.op == B.gpu_alloc for node in f_gpu.maker.env.toposort()])==1

    assert numpy.allclose(numpy.ones(a.get_value(borrow=True).shape)+9,f_gpu(9))
    assert numpy.allclose(f(5),f_gpu(5))
开发者ID:delallea,项目名称:Theano,代码行数:18,代码来源:test_basic_ops.py


示例18: use_gpu_cumsum

def use_gpu_cumsum(node):
    if type(node.op) is CumsumOp \
       and node.inputs[0].dtype == 'float32' \
       and node.inputs[0].owner \
       and isinstance(node.inputs[0].owner.op, HostFromGpu):

        axis = node.op.axis
        x = node.inputs[0]

        if axis is not None and x.ndim > GpuCumsum.SUPPORTED_NDIMS:
            return None

        x = gpu_from_host(x)

        if axis is None and x.ndim > 1:
            x = GpuFlatten()(x)

        # ``gpu_cumsum`` assume array has been flattened if needed.
        if axis is None:
            axis = 0

        return [host_from_gpu(GpuCumsum(axis)(x))]
开发者ID:KarnUllrich,项目名称:Theano,代码行数:22,代码来源:extra_ops.py


示例19: shared

from theano import function, config, shared, sandbox, tensor, Out
import theano
import numpy
import time
from theano.sandbox.cuda.basic_ops import gpu_from_host
vlen = 10 * 30 * 768  # 10 x # cores x # threads per core
iters = 1000
#http://deeplearning.net/software/theano/tutorial/aliasing.html
rng = numpy.random.RandomState(22)

x = shared(numpy.asarray(rng.rand(vlen), theano.config.floatX))

f1 = function([], gpu_from_host(tensor.exp(x)))
f2 = function([],
              Out(gpu_from_host(tensor.exp(x)),
                  borrow=True))
t0 = time.time()
for i in xrange(iters):
    r = f1()
t1 = time.time()
no_borrow = t1 - t0
t0 = time.time()
for i in xrange(iters):
    r = f2()
t1 = time.time()
print 'Looping', iters, 'times took', no_borrow, 'seconds without borrow',
print 'and', t1 - t0, 'seconds with borrow.'
if numpy.any([isinstance(x.op, tensor.Elemwise) and
              ('Gpu' not in type(x.op).__name__)
              for x in f1.maker.fgraph.toposort()]):
    print 'Used the cpu'
开发者ID:phongdk92,项目名称:Phonemic-Restoration,代码行数:31,代码来源:Bias.py


示例20: train_nn

def train_nn(data_file_name, reg_lambda=0.01, learning_rate=0.01, n_eigs=100, 
        n_neurons_per_layer=100, batch_size=100, display=True):
    train_data, test_data, file_names = old_load_images(data_file_name)
    eig_face = EigenFace.from_file(train_data[0], data_file_name, n_eigs)
    train_data[0] = get_face_space(data_file_name, 'train_x', train_data[0],
                                   eig_face)
    test_data[0] = get_face_space(data_file_name, 'test_x', test_data[0],
                                  eig_face)
    n_features, n_training_examples = train_data[0].shape
    real_scores = test_data[1].T.tolist()

    train_data = to_theano_shared(train_data)
    test_data = to_theano_shared(test_data)

    rng = numpy.random.RandomState(1234)
    x = T.matrix('x')
    y = T.vector('y')

    mlp = MLP(rng, x, n_features, n_neurons_per_layer, n_training_examples)
    cost = mlp.cost(y) + reg_lambda * mlp.L2_sqr

    test_model =theano.function([],
            outputs=[cost, mlp.output],
            givens={x:test_data[0][:], y:test_data[1][:]})

    g_params = []
    for param in mlp.params:
        g_param = T.grad(cost, param)
        g_params.append(g_param)

    updates = {}

    for param, g_param in zip(mlp.params, g_params):
        updates[param] = param - learning_rate * g_param

    train_model = theano.function([],
            outputs=theano.Out(gpu_from_host(cost), borrow=True), updates=updates,
            givens={x:train_data[0][:], y:train_data[1][:]})

    current_cost = numpy.asarray(train_model())
    logging.info('initial cost %f' % current_cost)
    old_cost = 0
    iterations = 0
    logging.info('beginning stochastic gradient descent')
    while ((abs(current_cost- old_cost)) > 0.001):
        old_cost = current_cost
        current_cost = numpy.asarray(train_model())
        if iterations % 10 == 0:
            logging.info('iteration % 9d cost % 9f' % (iterations, current_cost))
        iterations += 1

    error, predictions = test_model()

    # Print the results
    logging.info('training cost minimised: %f' % current_cost)
    logging.info('test error: %f' % error)
    
    predictions = predictions[0].tolist()
    logging.debug('predictions %s', str(predictions))
    pearsons = pearsonr(real_scores, predictions)
    logging.info('pearsons correlation: %f, %f' % pearsons)
    # Save our weights should we ever need them again
    plot_title_data = (n_neurons_per_layer, learning_rate, reg_lambda,
            pearsons[0])
    plot_correlation(real_scores, predictions, file_names, 'neural network with %d neurons' \
            'learning rate %f and reg-lambda %f pearsons %f' % plot_title_data,
            'nn', show=True, pearsons=pearsons)
开发者ID:eldog,项目名称:fface,代码行数:67,代码来源:neural_net_eig_faces.py



注:本文中的theano.sandbox.cuda.basic_ops.gpu_from_host函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python basic_ops.host_from_gpu函数代码示例发布时间:2022-05-27
下一篇:
Python basic_ops.gpu_contiguous函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap