• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python ndarray.zeros函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中mxnet.ndarray.zeros函数的典型用法代码示例。如果您正苦于以下问题:Python zeros函数的具体用法?Python zeros怎么用?Python zeros使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了zeros函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: run_boston_housing_DistilledSGLD

def run_boston_housing_DistilledSGLD():
    X, Y, X_test, Y_test, X_mean, X_std, Y_mean, Y_std = load_boston_housing()
    print(X.shape, Y.shape, X_test.shape, Y_test.shape)
    minibatch_size = 1
    teacher_noise_precision = 1.25
    teacher_net = get_boston_housing_sym(True, teacher_noise_precision)
    student_net = get_boston_housing_sym(False)
    data_shape = (minibatch_size,) + X.shape[1::]
    teacher_data_inputs = {'data': nd.zeros(data_shape, ctx=dev()),
                           'teacher_output_label': nd.zeros((minibatch_size, 1), ctx=dev())}
    student_data_inputs = {'data': nd.zeros(data_shape, ctx=dev())}
    #                   'softmax_label': nd.zeros((minibatch_size, 10), ctx=dev())}
    teacher_initializer = BiasXavier(factor_type="in", magnitude=1)
    student_initializer = BiasXavier(factor_type="in", magnitude=1)
    student_grad_f = lambda student_outputs, teacher_pred: \
        regression_student_grad(student_outputs, teacher_pred, teacher_noise_precision)
    student_exe, student_params, _ = \
        DistilledSGLD(teacher_sym=teacher_net, student_sym=student_net,
                      teacher_data_inputs=teacher_data_inputs,
                      student_data_inputs=student_data_inputs,
                      X=X, Y=Y, X_test=X_test, Y_test=Y_test,
                      X_mean=X_mean, X_std=X_std, Y_mean=Y_mean, Y_std=Y_std,
                      total_iter_num=5000000,
                      teacher_initializer=teacher_initializer,
                      student_initializer=student_initializer,
                      teacher_learning_rate=2E-7, student_learning_rate=1E-2,
                      student_optimizing_algorithm='sgd',
                      teacher_lr_scheduler=mx.lr_scheduler.FactorScheduler(80000, 0.5, 1E-7),
                      student_lr_scheduler=mx.lr_scheduler.FactorScheduler(step=5000, factor=0.8,
                                                                           stop_factor_lr=1E-6),
                      student_grad_f=student_grad_f,
                      teacher_prior_precision=2.5, student_prior_precision=0.001,
                      perturb_deviation=0.05, minibatch_size=minibatch_size, task='boston',
                      dev=dev())
开发者ID:sxjscience,项目名称:mxnet,代码行数:34,代码来源:bdk_demo.py


示例2: run_toy_DistilledSGLD

def run_toy_DistilledSGLD(gpu_id=None):
    X, Y, X_test, Y_test = load_toy()
    minibatch_size = 1
    teacher_noise_precision = 1.0
    teacher_net = get_toy_sym(True, teacher_noise_precision)
    student_net = get_toy_sym(False)
    data_shape = (minibatch_size,) + X.shape[1::]
    teacher_data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id)),
                           'teacher_output_label': nd.zeros((minibatch_size, 1), ctx=dev(gpu_id))}
    student_data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id))}

    teacher_initializer = mx.init.Uniform(0.07)
    student_initializer = mx.init.Uniform(0.07)
    student_grad_f = lambda student_outputs, teacher_pred: \
        regression_student_grad(student_outputs, teacher_pred, teacher_noise_precision)
    student_exe, student_params, _ = \
        DistilledSGLD(teacher_sym=teacher_net, student_sym=student_net,
                      teacher_data_inputs=teacher_data_inputs,
                      student_data_inputs=student_data_inputs,
                      X=X, Y=Y, X_test=X_test, Y_test=Y_test, total_iter_num=80000,
                      teacher_initializer=teacher_initializer,
                      student_initializer=student_initializer,
                      teacher_learning_rate=1E-4, student_learning_rate=0.01,
                      # teacher_lr_scheduler=mx.lr_scheduler.FactorScheduler(100000, 0.5),
                      student_lr_scheduler=mx.lr_scheduler.FactorScheduler(8000, 0.8),
                      student_grad_f=student_grad_f,
                      teacher_prior_precision=0.1, student_prior_precision=0.001,
                      perturb_deviation=0.1, minibatch_size=minibatch_size, task='regression',
                      dev=dev(gpu_id))
开发者ID:luobao-intel,项目名称:incubator-mxnet,代码行数:29,代码来源:bdk_demo.py


示例3: _get_or_reshape

        def _get_or_reshape(name, shared_data_arrays, arg_shape, arg_type, context, logger):
            """Internal helper to get a memory block or re-use by re-shaping"""
            if name in shared_data_arrays:
                arg_arr = shared_data_arrays[name]

                if np.prod(arg_arr.shape) >= np.prod(arg_shape):
                    # nice, we can directly re-use this data blob
                    assert arg_arr.dtype == arg_type
                    arg_arr = arg_arr.reshape(arg_shape)
                else:
                    logger.warning(('bucketing: data "%s" has a shape %s' % (name, arg_shape)) +
                                   (', which is larger than already allocated ') +
                                   ('shape %s' % (arg_arr.shape,)) +
                                   ('. Need to re-allocate. Consider putting ') +
                                   ('default_bucket_key to') +
                                   (' be the bucket taking the largest input for better ') +
                                   ('memory sharing.'))
                    arg_arr = nd.zeros(arg_shape, context, dtype=arg_type)

                    # replace existing shared array because the new one is bigger
                    shared_data_arrays[name] = arg_arr
            else:
                arg_arr = nd.zeros(arg_shape, context, dtype=arg_type)
                shared_data_arrays[name] = arg_arr

            return arg_arr
开发者ID:ktr-hubrt,项目名称:Deformable-ConvNets,代码行数:26,代码来源:DataParallelExecutorGroup.py


示例4: main

def main(args):
  ctx = mx.gpu(args.gpu)
  args.ctx_num = 1
  prop = face_image.load_property(args.data)
  image_size = prop.image_size
  print('image_size', image_size)
  vec = args.model.split(',')
  prefix = vec[0]
  epoch = int(vec[1])
  print('loading',prefix, epoch)
  sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch)
  arg_params, aux_params = ch_dev(arg_params, aux_params, ctx)
  all_layers = sym.get_internals()
  sym = all_layers['fc1_output']
  #model = mx.mod.Module.load(prefix, epoch, context = ctx)
  model = mx.mod.Module(symbol=sym, context=ctx, label_names = None)
  #model.bind(data_shapes=[('data', (args.batch_size, 3, image_size[0], image_size[1]))], label_shapes=[('softmax_label', (args.batch_size,))])
  model.bind(data_shapes=[('data', (args.batch_size, 3, image_size[0], image_size[1]))])
  model.set_params(arg_params, aux_params)
  path_imgrec = os.path.join(args.data, 'train.rec')
  path_imgidx = os.path.join(args.data, 'train.idx')
  imgrec = mx.recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r')  # pylint: disable=redefined-variable-type
  s = imgrec.read_idx(0)
  header, _ = mx.recordio.unpack(s)
  assert header.flag>0
  print('header0 label', header.label)
  header0 = (int(header.label[0]), int(header.label[1]))
  #assert(header.flag==1)
  imgidx = range(1, int(header.label[0]))
  stat = []
  count = 0
  data = nd.zeros( (1 ,3, image_size[0], image_size[1]) )
  label = nd.zeros( (1,) )
  for idx in imgidx:
    if len(stat)%100==0:
      print('processing', len(stat))
    s = imgrec.read_idx(idx)
    header, img = mx.recordio.unpack(s)
    img = mx.image.imdecode(img)
    img = nd.transpose(img, axes=(2, 0, 1))
    data[0][:] = img
    #input_blob = np.expand_dims(img.asnumpy(), axis=0)
    #arg_params["data"] = mx.nd.array(input_blob, ctx)
    #arg_params["softmax_label"] = mx.nd.empty((1,), ctx)
    time_now = datetime.datetime.now()
    #exe = sym.bind(ctx, arg_params ,args_grad=None, grad_req="null", aux_states=aux_params)
    #exe.forward(is_train=False)
    #_embedding = exe.outputs[0].asnumpy().flatten()
    #db = mx.io.DataBatch(data=(data,), label=(label,))
    db = mx.io.DataBatch(data=(data,))
    model.forward(db, is_train=False)
    net_out = model.get_outputs()[0].asnumpy()
    time_now2 = datetime.datetime.now()
    diff = time_now2 - time_now
    stat.append(diff.total_seconds())
    if len(stat)==args.param1:
      break
  stat = stat[10:]
  print('avg infer time', np.mean(stat))
开发者ID:LHQ0308,项目名称:insightface,代码行数:59,代码来源:benchmark.py


示例5: train

def train(input_variable, target_variable, encoder, decoder, teacher_forcing_ratio,
          encoder_optimizer, decoder_optimizer, criterion, max_length, ctx):
    with autograd.record():
        loss = F.zeros((1,), ctx=ctx)

        encoder_hidden = encoder.initHidden(ctx)

        input_length = input_variable.shape[0]
        target_length = target_variable.shape[0]

        encoder_outputs, encoder_hidden = encoder(
                input_variable.expand_dims(0), encoder_hidden)

        if input_length < max_length:
            encoder_outputs = F.concat(encoder_outputs.flatten(),
                F.zeros((max_length - input_length, encoder.hidden_size), ctx=ctx), dim=0)
        else:
            encoder_outputs = encoder_outputs.flatten()



        decoder_input = F.array([SOS_token], ctx=ctx)

        decoder_hidden = encoder_hidden

        use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False

        if use_teacher_forcing:
            # Teacher forcing: Feed the target as the next input
            for di in range(target_length):
                decoder_output, decoder_hidden, decoder_attention = decoder(
                    decoder_input, decoder_hidden, encoder_outputs)

                loss = F.add(loss, criterion(decoder_output, target_variable[di]))
                print criterion(decoder_output, target_variable[di])
                decoder_input = target_variable[di]  # Teacher forcing

        else:
            # Without teacher forcing: use its own predictions as the next input
            for di in range(target_length):
                decoder_output, decoder_hidden, decoder_attention = decoder(
                    decoder_input, decoder_hidden, encoder_outputs)
                topi = decoder_output.argmax(axis=1)

                decoder_input = F.array([topi.asscalar()], ctx=ctx)

                loss = F.add(loss, criterion(decoder_output, target_variable[di]))

                if topi.asscalar() == EOS_token:
                    break

        loss.backward()

    encoder_optimizer.step(1)
    decoder_optimizer.step(1)

    return loss.asscalar()/target_length
开发者ID:ZiyueHuang,项目名称:MXSeq2Seq,代码行数:57,代码来源:seq2seq.py


示例6: weights_init

def weights_init(layers):
    for layer in layers:
        classname = layer.__class__.__name__
        if hasattr(layer, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
            layer.weight.set_data(nd.random.normal(0.0,0.02,shape=layer.weight.data().shape))
            if hasattr(layer, 'bias') and layer.bias is not None:
                layer.bias.set_data(nd.zeros(layer.bias.data().shape))
        elif classname.find('BatchNorm') != -1:
            layer.gamma.set_data(nd.random.normal(1.0, 0.02,shape=layer.gamma.data().shape))
            layer.beta.set_data(nd.zeros(layer.bias.data().shape))
开发者ID:xiayongtao,项目名称:gluon-cv,代码行数:10,代码来源:train_cgan.py


示例7: get_params

def get_params():
    W_xh = nd.random_normal(scale=std, shape=(input_dim, hidden_dim), ctx=ctx)
    W_hh = nd.random_normal(scale=std, shape=(hidden_dim, hidden_dim), ctx=ctx)
    b_h = nd.zeros(hidden_dim, ctx=ctx)

    W_hy = nd.random_normal(scale=std, shape=(hidden_dim, output_dim), ctx=ctx)
    b_y = nd.zeros(output_dim, ctx=ctx)

    params = [W_xh, W_hh, b_h, W_hy, b_y]
    for param in params:
        param.attach_grad()
    return params
开发者ID:z01nl1o02,项目名称:tests,代码行数:12,代码来源:main.py


示例8: run_toy_HMC

def run_toy_HMC(gpu_id=None):
    X, Y, X_test, Y_test = load_toy()
    minibatch_size = Y.shape[0]
    noise_precision = 1 / 9.0
    net = get_toy_sym(True, noise_precision)
    data_shape = (minibatch_size,) + X.shape[1::]
    data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id)),
                   'teacher_output_label': nd.zeros((minibatch_size, 1), ctx=dev(gpu_id))}
    initializer = mx.init.Uniform(0.07)
    sample_pool = HMC(net, data_inputs=data_inputs, X=X, Y=Y, X_test=X_test, Y_test=Y_test,
                      sample_num=300000, initializer=initializer, prior_precision=1.0,
                      learning_rate=1E-3, L=10, dev=dev(gpu_id))
开发者ID:luobao-intel,项目名称:incubator-mxnet,代码行数:12,代码来源:bdk_demo.py


示例9: get_parameters

def get_parameters():
    W_xh = nd.random_normal(scale=config.std, shape=(config.input_dim, config.hidden_dim))
    W_hh = nd.random_normal(scale=config.std, shape=(config.hidden_dim, config.hidden_dim))
    b_h = nd.zeros(config.hidden_dim)

    W_hy = nd.random_normal(scale=config.std, shape=(config.hidden_dim, config.output_dim))
    b_y = nd.zeros(config.output_dim)

    parameters = [W_xh, W_hh, b_h, W_hy, b_y]
    for parameter in parameters:
        parameter.attach_grad()

    return parameters
开发者ID:dolphinsUnderMoon,项目名称:HoloXon,代码行数:13,代码来源:rnn.py


示例10: run_mnist_SGD

def run_mnist_SGD(training_num=50000, gpu_id=None):
    X, Y, X_test, Y_test = load_mnist(training_num)
    minibatch_size = 100
    net = get_mnist_sym()
    data_shape = (minibatch_size,) + X.shape[1::]
    data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id)),
                   'softmax_label': nd.zeros((minibatch_size,), ctx=dev(gpu_id))}
    initializer = mx.init.Xavier(factor_type="in", magnitude=2.34)
    exe, exe_params, _ = SGD(sym=net, dev=dev(gpu_id), data_inputs=data_inputs, X=X, Y=Y,
                             X_test=X_test, Y_test=Y_test,
                             total_iter_num=1000000,
                             initializer=initializer,
                             lr=5E-6, prior_precision=1.0, minibatch_size=100)
开发者ID:luobao-intel,项目名称:incubator-mxnet,代码行数:13,代码来源:bdk_demo.py


示例11: reset_c2c

 def reset_c2c(self):
   self.select_triplets()
   for identity,v in self.id2range.iteritems():
     _list = range(*v)
   
     for idx in _list:
       s = imgrec.read_idx(idx)
       ocontents.append(s)
     embeddings = None
     #print(len(ocontents))
     ba = 0
     while True:
       bb = min(ba+args.batch_size, len(ocontents))
       if ba>=bb:
         break
       _batch_size = bb-ba
       _batch_size2 = max(_batch_size, args.ctx_num)
       data = nd.zeros( (_batch_size2,3, image_size[0], image_size[1]) )
       label = nd.zeros( (_batch_size2,) )
       count = bb-ba
       ii=0
       for i in xrange(ba, bb):
         header, img = mx.recordio.unpack(ocontents[i])
         img = mx.image.imdecode(img)
         img = nd.transpose(img, axes=(2, 0, 1))
         data[ii][:] = img
         label[ii][:] = header.label
         ii+=1
       while ii<_batch_size2:
         data[ii][:] = data[0][:]
         label[ii][:] = label[0][:]
         ii+=1
       db = mx.io.DataBatch(data=(data,), label=(label,))
       self.mx_model.forward(db, is_train=False)
       net_out = self.mx_model.get_outputs()
       net_out = net_out[0].asnumpy()
       model.forward(db, is_train=False)
       net_out = model.get_outputs()
       net_out = net_out[0].asnumpy()
       if embeddings is None:
         embeddings = np.zeros( (len(ocontents), net_out.shape[1]))
       embeddings[ba:bb,:] = net_out[0:_batch_size,:]
       ba = bb
     embeddings = sklearn.preprocessing.normalize(embeddings)
     embedding = np.mean(embeddings, axis=0, keepdims=True)
     embedding = sklearn.preprocessing.normalize(embedding)
     sims = np.dot(embeddings, embedding).flatten()
     assert len(sims)==len(_list)
     for i in xrange(len(_list)):
       _idx = _list[i]
       self.idx2cos[_idx] = sims[i]
开发者ID:bupt-cv,项目名称:insightface,代码行数:51,代码来源:data.py


示例12: run_mnist_SGLD

def run_mnist_SGLD(training_num=50000):
    X, Y, X_test, Y_test = load_mnist(training_num)
    minibatch_size = 100
    net = get_mnist_sym()
    data_shape = (minibatch_size,) + X.shape[1::]
    data_inputs = {'data': nd.zeros(data_shape, ctx=dev()),
                   'softmax_label': nd.zeros((minibatch_size,), ctx=dev())}
    initializer = mx.init.Xavier(factor_type="in", magnitude=2.34)
    exe, sample_pool = SGLD(sym=net, dev=dev(), data_inputs=data_inputs, X=X, Y=Y,
                            X_test=X_test, Y_test=Y_test,
                            total_iter_num=1000000,
                            initializer=initializer,
                            learning_rate=4E-6, prior_precision=1.0, minibatch_size=100,
                            thin_interval=100, burn_in_iter_num=1000)
开发者ID:4ker,项目名称:mxnet,代码行数:14,代码来源:bdk_demo.py


示例13: run_boston_housing_SGLD

def run_boston_housing_SGLD():
    X, Y, X_test, Y_test = load_boston_housing()
    minibatch_size = 1
    teacher_noise_precision = 1.25
    net = get_boston_housing_sym(True, teacher_noise_precision)
    data_shape = (minibatch_size,) + X.shape[1::]
    data_inputs = {'data': nd.zeros(data_shape, ctx=dev()),
                   'teacher_output_label': nd.zeros((minibatch_size, 1), ctx=dev())}
    initializer = BiasXavier(factor_type="in", magnitude=2.34)
    exe, sample_pool = SGLD(sym=net, dev=dev(), data_inputs=data_inputs, X=X, Y=Y,
                            X_test=X_test, Y_test=Y_test,
                            total_iter_num=1000000,
                            initializer=initializer,
                            learning_rate=5E-10, prior_precision=1.0, minibatch_size=minibatch_size,
                            thin_interval=100, burn_in_iter_num=1000, task='boston')
开发者ID:sxjscience,项目名称:mxnet,代码行数:15,代码来源:bdk_demo.py


示例14: orthonormal_VanillaLSTMBuilder

def orthonormal_VanillaLSTMBuilder(lstm_layers, input_dims, lstm_hiddens, dropout_x=0., dropout_h=0., debug=False):
    """Build a standard LSTM cell, with variational dropout,
    with weights initialized to be orthonormal (https://arxiv.org/abs/1312.6120)

    Parameters
    ----------
    lstm_layers : int
        Currently only support one layer
    input_dims : int
        word vector dimensions
    lstm_hiddens : int
        hidden size
    dropout_x : float
        dropout on inputs, not used in this implementation, see `biLSTM` below
    dropout_h : float
        dropout on hidden states
    debug : bool
        set to True to skip orthonormal initialization

    Returns
    -------
    lstm_cell : VariationalDropoutCell
        A LSTM cell
    """
    assert lstm_layers == 1, 'only accept one layer lstm'
    W = orthonormal_initializer(lstm_hiddens, lstm_hiddens + input_dims, debug)
    W_h, W_x = W[:, :lstm_hiddens], W[:, lstm_hiddens:]
    b = nd.zeros((4 * lstm_hiddens,))
    b[lstm_hiddens:2 * lstm_hiddens] = -1.0
    lstm_cell = rnn.LSTMCell(input_size=input_dims, hidden_size=lstm_hiddens,
                             i2h_weight_initializer=mx.init.Constant(np.concatenate([W_x] * 4, 0)),
                             h2h_weight_initializer=mx.init.Constant(np.concatenate([W_h] * 4, 0)),
                             h2h_bias_initializer=mx.init.Constant(b))
    wrapper = VariationalDropoutCell(lstm_cell, drop_states=dropout_h)
    return wrapper
开发者ID:hridaydutta123,项目名称:gluon-nlp,代码行数:35,代码来源:utils.py


示例15: init_params

def init_params():
    w = nd.random_normal(scale=1, shape=(num_inputs, 1))
    b = nd.zeros(shape=(1,))
    params = [w, b]
    for param in params:
        param.attach_grad()#自动求导 需要创建它们的梯度
    return params
开发者ID:dyz-zju,项目名称:MVision,代码行数:7,代码来源:4_regularization_overFitting.py


示例16: try_gpu

def try_gpu():
    try:
        ctx = mx.gpu()
        _ = nd.zeros((1,), ctx=ctx)
    except:
        ctx = mx.cpu()
    return ctx
开发者ID:gonglixue,项目名称:PRML_Python,代码行数:7,代码来源:utils.py


示例17: get_feature

def get_feature(name, vid, args):
  global feature_cache
  key = (name,vid)
  if key in feature_cache:
    return feature_cache[key]

  input_dir = os.path.join(args.image_dir, name, str(vid))
  data = nd.zeros( (1 ,3, image_size[0], image_size[1]) )
  F = []
  for img in os.listdir(input_dir):
    img = os.path.join(input_dir, img)
    img = cv2.imread(img)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    img = np.transpose(img, (2,0,1))
    data[0][:] = img
    db = mx.io.DataBatch(data=(data,))
    model.forward(db, is_train=False)
    net_out = model.get_outputs()[0].asnumpy().flatten()
    F.append(net_out)
  F = np.array(F)
  F = sklearn.preprocessing.normalize(F)
  feature = np.mean(F, axis=0, keepdims=True)
  feature = sklearn.preprocessing.normalize(feature).flatten()

  feature_cache[key] = feature
  return feature
开发者ID:LHQ0308,项目名称:insightface,代码行数:26,代码来源:ytf.py


示例18: calc_sum

def calc_sum(matA, matB):
    height,width = matA.shape
    matC = nd.zeros( matA.shape, ctx=matA.context)
    for y in range(height):
        for x in range(width):
            matC[y,x] = matA[y,x] + matB[y,x]
    return matC
开发者ID:z01nl1o02,项目名称:tests,代码行数:7,代码来源:test.py


示例19: gan_loss

 def gan_loss(input,target_is_real):
     if target_is_real:
         target = nd.ones(input.shape,ctx=input.context)
     else:
         target = nd.zeros(input.shape, ctx=input.context)
     #mse loss for lsgan
     e = ((input - target) ** 2).mean(axis=0, exclude=True)
     return e
开发者ID:xiayongtao,项目名称:gluon-cv,代码行数:8,代码来源:train_cgan.py


示例20: try_gpu

def try_gpu():
    """If GPU is available, return mx.gpu(0); else return mx.cpu()"""
    try:
        ctx = mx.gpu()
        _ = nd.zeros((1,), ctx=ctx)
    except:
        ctx = mx.cpu()
    return ctx
开发者ID:antiBoson,项目名称:gluon-tutorials-zh,代码行数:8,代码来源:utils.py



注:本文中的mxnet.ndarray.zeros函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python test_utils.assert_almost_equal函数代码示例发布时间:2022-05-27
下一篇:
Python ndarray.ones函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap