• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python mxnet.cpu函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中mxnet.cpu函数的典型用法代码示例。如果您正苦于以下问题:Python cpu函数的具体用法?Python cpu怎么用?Python cpu使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了cpu函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: test_convolution_grouping

def test_convolution_grouping():
    num_filter = 4
    num_group = 2
    kernel = (3, 3)
    shape = (1, 4, 9, 9)

    x = mx.sym.Variable('x')
    w = mx.sym.Variable('w')
    b = mx.sym.Variable('b')
    y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group, kernel=kernel)
    xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
    wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
    bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
    y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
                                            num_filter=num_filter//num_group, kernel=kernel)
                       for i in range(num_group)])

    exe1 = y1.simple_bind(mx.cpu(), x=shape)
    exe2 = y2.simple_bind(mx.cpu(), x=shape, w=(num_filter, shape[1]//num_group, kernel[0], kernel[1]), b=(num_filter,))
    for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
        arr1[:] = np.random.normal(size=arr1.shape)
        arr2[:] = arr1
    exe1.forward(is_train=True)
    exe1.backward(exe1.outputs[0])
    exe2.forward(is_train=True)
    exe2.backward(exe2.outputs[0])

    for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
        np.testing.assert_allclose(arr1.asnumpy(), arr2.asnumpy(), rtol=1e-3)
开发者ID:1132520084,项目名称:mxnet,代码行数:29,代码来源:test_operator.py


示例2: test_paramdict

def test_paramdict():
    params = gluon.ParameterDict('net_')
    params.get('weight', shape=(10, 10))
    assert list(params.keys()) == ['net_weight']
    params.initialize(ctx=mx.cpu())
    params.save('test.params')
    params.load('test.params', mx.cpu())
开发者ID:yzhang87,项目名称:mxnet,代码行数:7,代码来源:test_nn.py


示例3: test_elementwisesum_with_type

def test_elementwisesum_with_type():
    sym = mx.sym.ElementWiseSum(name="ews", num_args=2)
    ctx_list = [
        {
            "ctx": mx.gpu(0),
            "ews_arg1": (2, 10),
            "ews_arg0": (2, 10),
            "type_dict": {"ews_arg0": np.float64, "ews_arg1": np.float64},
        },
        {
            "ctx": mx.gpu(0),
            "ews_arg1": (2, 10),
            "ews_arg0": (2, 10),
            "type_dict": {"ews_arg0": np.float32, "ews_arg1": np.float32},
        },
        {
            "ctx": mx.gpu(0),
            "ews_arg1": (2, 10),
            "ews_arg0": (2, 10),
            "type_dict": {"ews_arg0": np.float16, "ews_arg1": np.float16},
        },
        {
            "ctx": mx.cpu(0),
            "ews_arg1": (2, 10),
            "ews_arg0": (2, 10),
            "type_dict": {"ews_arg0": np.float64, "ews_arg1": np.float64},
        },
        {
            "ctx": mx.cpu(0),
            "ews_arg1": (2, 10),
            "ews_arg0": (2, 10),
            "type_dict": {"ews_arg0": np.float32, "ews_arg1": np.float32},
        },
    ]
    check_consistency(sym, ctx_list)
开发者ID:alextnewman,项目名称:mxnet,代码行数:35,代码来源:test_operator_gpu.py


示例4: test_concat_with_type

def test_concat_with_type():
    sym = mx.sym.Concat(name="concat", num_args=2)
    ctx_list = [
        {
            "ctx": mx.gpu(0),
            "concat_arg1": (2, 10),
            "concat_arg0": (2, 10),
            "type_dict": {"concat_arg0": np.float64, "concat_arg1": np.float64},
        },
        {
            "ctx": mx.gpu(0),
            "concat_arg1": (2, 10),
            "concat_arg0": (2, 10),
            "type_dict": {"concat_arg0": np.float32, "concat_arg1": np.float32},
        },
        {
            "ctx": mx.gpu(0),
            "concat_arg1": (2, 10),
            "concat_arg0": (2, 10),
            "type_dict": {"concat_arg0": np.float16, "concat_arg1": np.float16},
        },
        {
            "ctx": mx.cpu(0),
            "concat_arg1": (2, 10),
            "concat_arg0": (2, 10),
            "type_dict": {"concat_arg0": np.float64, "concat_arg1": np.float64},
        },
        {
            "ctx": mx.cpu(0),
            "concat_arg1": (2, 10),
            "concat_arg0": (2, 10),
            "type_dict": {"concat_arg0": np.float32, "concat_arg1": np.float32},
        },
    ]
    check_consistency(sym, ctx_list)
开发者ID:alextnewman,项目名称:mxnet,代码行数:35,代码来源:test_operator_gpu.py


示例5: run

	def run(self):
		data_validate = mx.io.CSVIter(data_csv="../validate-64x64-data.csv", data_shape=(30, 64, 64), batch_size=1)
		network = get_lenet()
		batch_size = 32
		devs = [mx.cpu(0), mx.cpu(0), mx.cpu(0), mx.cpu(0)] #..distribute to multiple cores
		data_train = mx.io.CSVIter(data_csv=self.input()['data'].path, data_shape=(30, 64, 64),
				label_csv=self.input()['label'].path, label_shape=(600,), batch_size=batch_size)


		print "\n%d epochs\n" % self.tune_epoch()
		model = mx.model.FeedForward(ctx=devs,
				symbol             = network,
				num_epoch          = self.tune_epoch(),
				learning_rate      = 0.001,
				wd                 = 0.00001,
				momentum           = 0.9)

		model.fit(X=data_train, eval_metric = mx.metric.np(CRPS))
		prob = model.predict(data_validate)
		prob_fname = "%s_prob" % self.name
		try:
			np.save(prob_fname, prob)
		except:
			pickle.dump(prob, open(prob_fname + '.p', 'wb'))

		pickle.dump(model, open(self.output().path, 'wb'))
开发者ID:polhooper,项目名称:KDSB_2015,代码行数:26,代码来源:Train.py


示例6: test_load_000800

def test_load_000800():
    with mx.AttrScope(ctx_group='stage1'):
        data = mx.symbol.Variable('data', lr_mult=0.2)
        weight = mx.sym.Variable(name='fc1_weight', lr_mult=1.2)
        fc1  = mx.symbol.FullyConnected(data = data, weight=weight, name='fc1', num_hidden=128, wd_mult=0.3)
        act1 = mx.symbol.Activation(data = fc1, name='relu1', act_type="relu")

    set_stage1 = set(act1.list_arguments())
    with mx.AttrScope(ctx_group='stage2'):
        fc2  = mx.symbol.FullyConnected(data = act1, name = 'fc2', num_hidden = 64, lr_mult=0.01)
        act2 = mx.symbol.Activation(data = fc2, name='relu2', act_type="relu")
        fc3  = mx.symbol.FullyConnected(data = act2, name='fc3', num_hidden=10)
        fc3 = mx.symbol.BatchNorm(fc3, name='batchnorm0')
        sym1  = mx.symbol.SoftmaxOutput(data = fc3, name = 'softmax')

    curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
    sym2 = mx.sym.load(os.path.join(curr_path, 'save_000800.json'))

    attr1 = sym1.attr_dict()
    attr2 = sym2.attr_dict()
    for k, v1 in attr1.items():
        assert k in attr2, k
        v2 = attr2[k]
        for kk, vv1 in v1.items():
            if kk.startswith('__') and kk.endswith('__'):
                assert kk in v2 and v2[kk] == vv1, k + str(v1) + str(v2)

    check_symbol_consistency(sym1, sym2,
        {'ctx': mx.cpu(0), 'group2ctx': {'stage1' : mx.cpu(1), 'stage2' : mx.cpu(2)}, 'data': (1,200)})
开发者ID:csgcmai,项目名称:mxnet,代码行数:29,代码来源:test_symbol.py


示例7: validate

def validate(val_data, val_dataset, net, ctx):
    if isinstance(ctx, mx.Context):
        ctx = [ctx]

    val_metric.reset()

    from tqdm import tqdm
    for batch in tqdm(val_data):
        data, scale, center, score, imgid = val_batch_fn(batch, ctx)

        outputs = [net(X) for X in data]
        if opt.flip_test:
            data_flip = [nd.flip(X, axis=3) for X in data]
            outputs_flip = [net(X) for X in data_flip]
            outputs_flipback = [flip_heatmap(o, val_dataset.joint_pairs, shift=True) for o in outputs_flip]
            outputs = [(o + o_flip)/2 for o, o_flip in zip(outputs, outputs_flipback)]

        if len(outputs) > 1:
            outputs_stack = nd.concat(*[o.as_in_context(mx.cpu()) for o in outputs], dim=0)
        else:
            outputs_stack = outputs[0].as_in_context(mx.cpu())

        preds, maxvals = get_final_preds(outputs_stack, center.asnumpy(), scale.asnumpy())
        val_metric.update(preds, maxvals, score, imgid)

    res = val_metric.get()
    return
开发者ID:xiayongtao,项目名称:gluon-cv,代码行数:27,代码来源:validate.py


示例8: test_convolution_with_type

def test_convolution_with_type():
    np.random.seed(1234)
    sym1 = mx.sym.Convolution(num_filter=3, kernel=(3,3), name='conv')

    data = mx.sym.Variable('conv_data')
    w = mx.sym.Variable('conv_weight')
    b = mx.sym.Variable('conv_bias')
    w = mx.sym.transpose(w, axes=(0,2,3,1))
    sym2 = mx.sym.transpose(data, axes=(0,2,3,1))
    sym2 = mx.sym.Convolution(sym2, w, b, layout='NHWC', num_filter=3, kernel=(3,3))
    sym2 = mx.sym.transpose(sym2, axes=(0,3,1,2), name='conv')

    sym = [sym1, sym1, sym1, sym1, sym1, sym2, sym2]
    ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},
                {'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},
                {'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float16}},
                {'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},
                {'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},
                # NHWC
                {'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3),
                 'type_dict': {'conv_data': np.float32, 'conv_weight': np.float32}},
                {'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3),
                 'type_dict': {'conv_data': np.float16, 'conv_weight': np.float16}}
                ]
    # wider tolerance needed for true-fp16 NCHW test above
    tol = {np.dtype(np.float16): 0.5,
               np.dtype(np.float32): 1e-3,
               np.dtype(np.float64): 1e-5,
               np.dtype(np.uint8): 0,
               np.dtype(np.int32): 0}
    check_consistency(sym, ctx_list, tol=tol)
    # test ability to turn off training on bias
    check_consistency(sym, ctx_list, grad_req={'conv_data': 'write', 'conv_weight': 'write', 'conv_bias': 'null'}, tol=tol)
开发者ID:moveforever,项目名称:mxnet,代码行数:33,代码来源:test_operator_gpu.py


示例9: test_save_load

def test_save_load():
    net = mx.gluon.model_zoo.vision.get_resnet(1, 18, pretrained=True)
    net.save_parameters('test_save_load.params')

    net = mx.gluon.model_zoo.vision.get_resnet(1, 18)
    net.output = mx.gluon.nn.Dense(1000)

    net.load_parameters('test_save_load.params')

    class Network(gluon.Block):
        def __init__(self, **kwargs):
            super(Network, self).__init__(**kwargs)
            with self.name_scope():
                self.encoders = gluon.nn.Sequential()
                with self.encoders.name_scope():
                    for _ in range(2):
                        lstm = mx.gluon.rnn.LSTM(200, 1, bidirectional=True)
                        self.encoders.add(lstm)

        def forward(self, x):
            for i in range(2):
                x = self.encoders[i](x)
            return x
    net = Network()
    net.initialize(mx.init.Xavier(), ctx=mx.cpu())
    net.hybridize()
    x = np.random.rand(32, 10, 10)
    x = mx.nd.array(x).as_in_context(mx.cpu())
    net(x)
    net.save_parameters('tmp.params')
    net2 = Network()
    net2.load_parameters('tmp.params')
开发者ID:zwz173131329,项目名称:incubator-mxnet,代码行数:32,代码来源:test_gluon.py


示例10: test_parameter_sharing

def test_parameter_sharing():
    class Net(gluon.Block):
        def __init__(self, in_units=0, **kwargs):
            super(Net, self).__init__(**kwargs)
            with self.name_scope():
                self.dense0 = nn.Dense(5, in_units=in_units)
                self.dense1 = nn.Dense(5, in_units=in_units)

        def forward(self, x):
            return self.dense1(self.dense0(x))

    net1 = Net(prefix='net1_', in_units=5)
    net2 = Net(prefix='net2_', params=net1.collect_params())
    net1.collect_params().initialize()
    net2(mx.nd.zeros((3, 5)))

    net1.save_parameters('net1.params')

    net3 = Net(prefix='net3_')
    net3.load_parameters('net1.params', mx.cpu())

    net4 = Net(prefix='net4_')
    net5 = Net(prefix='net5_', in_units=5, params=net4.collect_params())
    net4.collect_params().initialize()
    net5(mx.nd.zeros((3, 5)))

    net4.save_parameters('net4.params')

    net6 = Net(prefix='net6_')
    net6.load_parameters('net4.params', mx.cpu())
开发者ID:zwz173131329,项目名称:incubator-mxnet,代码行数:30,代码来源:test_gluon.py


示例11: test_module_ctx_group

def test_module_ctx_group():
    with mx.AttrScope(ctx_group='dev1'):
        a = mx.symbol.Variable('a')
        a = a * 2
    with mx.AttrScope(ctx_group='dev2'):
        b = mx.symbol.Variable('b')
        c = a + b
    shape = (2, 5)
    mod1 = mx.mod.Module(c, context=[mx.cpu(0)], data_names=['a', 'b'], label_names=None,
                         group2ctxs=[{'dev1':mx.cpu(1),'dev2':mx.cpu(2)}])
    mod1.bind(data_shapes=[['a', shape], ['b', shape]], inputs_need_grad=True)
    mod1.init_params()
    mod1.forward(data_batch=mx.io.DataBatch(data=[mx.nd.ones(shape), mx.nd.ones(shape)]), is_train=True)
    mod1.backward([mx.nd.ones(shape)])
    mod1_input_grads = mod1.get_input_grads()

    mod2 = mx.mod.Module(c, data_names=['a', 'b'], label_names=None)
    mod2.bind(data_shapes=[['a', shape], ['b', shape]], inputs_need_grad=True)
    mod2.init_params()
    mod2.forward(data_batch=mx.io.DataBatch(data=[mx.nd.ones(shape), mx.nd.ones(shape)]), is_train=True)
    mod2.backward([mx.nd.ones(shape)])
    mod2_input_grads = mod2.get_input_grads()

    assert np.all(mod1_input_grads[0].asnumpy() == mod2_input_grads[0].asnumpy())
    assert np.all(mod1_input_grads[1].asnumpy() == mod2_input_grads[1].asnumpy())
开发者ID:GrassSunFlower,项目名称:mxnet,代码行数:25,代码来源:test_module.py


示例12: test_activation_with_type

def test_activation_with_type():
    sym = mx.sym.Activation(name='act', act_type='sigmoid')
    ctx_list = [{'ctx': mx.gpu(0), 'act_data': (2, 2, 10, 10), 'type_dict': {'act_data': np.float64}},
                {'ctx': mx.gpu(0), 'act_data': (2, 2, 10, 10), 'type_dict': {'act_data': np.float32}},
                {'ctx': mx.cpu(0), 'act_data': (2, 2, 10, 10), 'type_dict': {'act_data': np.float64}},
                {'ctx': mx.cpu(0), 'act_data': (2, 2, 10, 10), 'type_dict': {'act_data': np.float32}}]
    check_consistency(sym, ctx_list)
开发者ID:adavanisanti,项目名称:mxnet,代码行数:7,代码来源:test_operator_gpu.py


示例13: test_fullyconnected_with_type

def test_fullyconnected_with_type():
    sym = mx.sym.FullyConnected(num_hidden=3, name='inner')
    ctx_list = [{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float64}},
                {'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float32}},
                {'ctx': mx.cpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float64}},
                {'ctx': mx.cpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float32}}]
    check_consistency(sym, ctx_list)
开发者ID:adavanisanti,项目名称:mxnet,代码行数:7,代码来源:test_operator_gpu.py


示例14: test_convolution_with_type

def test_convolution_with_type():
    sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), name='conv')
    ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},
                {'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},
                {'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},
                {'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}}]
    check_consistency(sym, ctx_list)
开发者ID:adavanisanti,项目名称:mxnet,代码行数:7,代码来源:test_operator_gpu.py


示例15: test_ctx_group

def test_ctx_group():
    with mx.AttrScope(ctx_group='stage1'):
        data = mx.symbol.Variable('data')
        fc1  = mx.symbol.FullyConnected(data = data, name='fc1', num_hidden=128)
        act1 = mx.symbol.Activation(data = fc1, name='relu1', act_type="relu")

    set_stage1 = set(act1.list_arguments())
    with mx.AttrScope(ctx_group='stage2'):
        fc2  = mx.symbol.FullyConnected(data = act1, name = 'fc2', num_hidden = 64)
        act2 = mx.symbol.Activation(data = fc2, name='relu2', act_type="relu")
        fc3  = mx.symbol.FullyConnected(data = act2, name='fc3', num_hidden=10)
        fc3 = mx.symbol.BatchNorm(fc3)
        mlp  = mx.symbol.SoftmaxOutput(data = fc3, name = 'softmax')

    set_stage2 = set(mlp.list_arguments()) - set_stage1

    group2ctx = {
        'stage1' : mx.cpu(1),
        'stage2' : mx.cpu(2)
    }

    texec = mlp.simple_bind(mx.cpu(0),
                            group2ctx=group2ctx,
                            data=(1,200))

    for arr, name in zip(texec.arg_arrays, mlp.list_arguments()):
        if name in set_stage1:
            assert arr.context == group2ctx['stage1']
        else:
            assert arr.context == group2ctx['stage2']
开发者ID:green-dalii,项目名称:incubator-mxnet,代码行数:30,代码来源:test_multi_device_exec.py


示例16: test_module_states

def test_module_states():
    stack = mx.rnn.SequentialRNNCell()
    for i in range(2):
        stack.add(mx.rnn.LSTMCell(num_hidden=20, prefix='lstm_l%d_'%i))
    begin_state = stack.begin_state(func=mx.sym.Variable)
    _, states = stack.unroll(10, begin_state=begin_state, inputs=mx.sym.Variable('data'))

    state_names = [i.name for i in begin_state]
    mod = mx.mod.Module(mx.sym.Group(states), context=[mx.cpu(0), mx.cpu(1)],
                        label_names=None, state_names=state_names)
    mod.bind(data_shapes=[('data', (5, 10))], label_shapes=None, for_training=False)
    mod.init_params()
    batch = mx.io.DataBatch(data=[mx.nd.zeros((5, 10))], label=[])

    mod.set_states(value=1)
    mod.forward(batch)
    out = mod.get_outputs(merge_multi_context=False)
    out1 = mod.get_outputs(merge_multi_context=True)

    mod.set_states(states=out)
    mod.forward(batch)
    out2 = mod.get_outputs(merge_multi_context=True)

    for x1, x2 in zip(out1, out2):
        assert not mx.test_utils.almost_equal(x1.asnumpy(), x2.asnumpy(), rtol=1e-3)
开发者ID:UniKrau,项目名称:incubator-mxnet,代码行数:25,代码来源:test_module.py


示例17: test_bucket_module_ctx_group

def test_bucket_module_ctx_group():
    num_hidden = 10
    batch_size = 5
    def sym_gen(seq_len):
        with mx.AttrScope(ctx_group='dev1'):
            data = mx.symbol.Variable('data')
            weight = mx.symbol.Variable('dev1_weight')
            bias = mx.symbol.Variable('dev1_bias')
            fc = data
            for i in range(seq_len):
                fc  = mx.symbol.FullyConnected(data=fc, weight=weight, bias=bias,
                                               name='dev1_fc_%d' % i, num_hidden=num_hidden)
        with mx.AttrScope(ctx_group='dev2'):
            label = mx.symbol.Variable('label')
            weight = mx.symbol.Variable('dev2_weight')
            bias = mx.symbol.Variable('dev2_bias')
            for i in range(seq_len):
                fc  = mx.symbol.FullyConnected(data=fc, weight=weight, bias=bias,
                                               name='dev2_fc_%d' % i, num_hidden=num_hidden)
            sym = mx.symbol.SoftmaxOutput(fc, label, name='softmax')

        return sym, ('data',), ('label',)

    mod = mx.mod.BucketingModule(sym_gen=sym_gen, default_bucket_key=10, context=[mx.cpu(0)],
                                 group2ctxs=[{'dev1': mx.cpu(1), 'dev2': mx.cpu(2)}])
    mod.bind(data_shapes=[['data', (batch_size, num_hidden)]],
             label_shapes=[['label', (batch_size,)]],
             for_training=True, inputs_need_grad=True)
    assert(mod.binded)
开发者ID:UniKrau,项目名称:incubator-mxnet,代码行数:29,代码来源:test_module.py


示例18: test_module_reshape

def test_module_reshape():
    data = mx.sym.Variable('data')
    sym = mx.sym.FullyConnected(data, num_hidden=20, name='fc')

    dshape = (7, 20)
    mod = mx.mod.Module(sym, ('data',), None, context=[mx.cpu(0), mx.cpu(1)])
    mod.bind(data_shapes=[('data', dshape)])
    mod.init_params()
    mod.init_optimizer(optimizer_params={'learning_rate': 1})

    mod.forward(mx.io.DataBatch(data=[mx.nd.ones(dshape)],
                                label=None))
    mod.backward([mx.nd.ones(dshape)])
    mod.update()
    assert mod.get_outputs()[0].shape == dshape
    assert (mod.get_params()[0]['fc_bias'].asnumpy() == -1).all()

    dshape = (14, 20)
    mod.reshape(data_shapes=[('data', dshape)])
    mod.forward(mx.io.DataBatch(data=[mx.nd.ones(dshape)],
                                label=None))
    mod.backward([mx.nd.ones(dshape)])
    mod.update()
    assert mod.get_outputs()[0].shape == dshape
    assert (mod.get_params()[0]['fc_bias'].asnumpy() == -3).all()
开发者ID:UniKrau,项目名称:incubator-mxnet,代码行数:25,代码来源:test_module.py


示例19: run_synthetic_SGLD

def run_synthetic_SGLD():
    theta1 = 0
    theta2 = 1
    sigma1 = numpy.sqrt(10)
    sigma2 = 1
    sigmax = numpy.sqrt(2)
    X = load_synthetic(theta1=theta1, theta2=theta2, sigmax=sigmax, num=100)
    minibatch_size = 1
    total_iter_num = 1000000
    lr_scheduler = SGLDScheduler(begin_rate=0.01, end_rate=0.0001, total_iter_num=total_iter_num,
                                 factor=0.55)
    optimizer = mx.optimizer.create('sgld',
                                    learning_rate=None,
                                    rescale_grad=1.0,
                                    lr_scheduler=lr_scheduler,
                                    wd=0)
    updater = mx.optimizer.get_updater(optimizer)
    theta = mx.random.normal(0, 1, (2,), mx.cpu())
    grad = nd.empty((2,), mx.cpu())
    samples = numpy.zeros((2, total_iter_num))
    start = time.time()
    for i in xrange(total_iter_num):
        if (i + 1) % 100000 == 0:
            end = time.time()
            print("Iter:%d, Time spent: %f" % (i + 1, end - start))
            start = time.time()
        ind = numpy.random.randint(0, X.shape[0])
        synthetic_grad(X[ind], theta, sigma1, sigma2, sigmax, rescale_grad=
        X.shape[0] / float(minibatch_size), grad=grad)
        updater('theta', grad, theta)
        samples[:, i] = theta.asnumpy()
    plt.hist2d(samples[0, :], samples[1, :], (200, 200), cmap=plt.cm.jet)
    plt.colorbar()
    plt.show()
开发者ID:luobao-intel,项目名称:incubator-mxnet,代码行数:34,代码来源:bdk_demo.py


示例20: __init__

    def __init__(self,
                 seq_len,
                 input_size,
                 num_hidden,
                 num_embed,
                 num_label,
                 arg_params,
                 ctx=mx.cpu(),
                 dropout=0.):
        self.sym = bi_lstm_inference_symbol(input_size, seq_len,
                                            num_hidden,
                                            num_embed,
                                            num_label,
                                            dropout)
        batch_size = 1
        init_c = [('l%d_init_c'%l, (batch_size, num_hidden)) for l in range(2)]
        init_h = [('l%d_init_h'%l, (batch_size, num_hidden)) for l in range(2)]
        
        data_shape = [("data", (batch_size, seq_len, ))]

        input_shapes = dict(init_c + init_h + data_shape)
        self.executor = self.sym.simple_bind(ctx=mx.cpu(), **input_shapes)

        for key in self.executor.arg_dict.keys():
            if key in arg_params:
                arg_params[key].copyto(self.executor.arg_dict[key])

        state_name = []
        for i in range(2):
            state_name.append("l%d_init_c" % i)
            state_name.append("l%d_init_h" % i)

        self.states_dict = dict(zip(state_name, self.executor.outputs[1:]))
        self.input_arr = mx.nd.zeros(data_shape[0][1])
开发者ID:1132520084,项目名称:mxnet,代码行数:34,代码来源:rnn_model.py



注:本文中的mxnet.cpu函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python mxnet.gpu函数代码示例发布时间:2022-05-27
下一篇:
Python socketio.emit函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap