• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python test_utils.assert_almost_equal函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中mxnet.test_utils.assert_almost_equal函数的典型用法代码示例。如果您正苦于以下问题:Python assert_almost_equal函数的具体用法?Python assert_almost_equal怎么用?Python assert_almost_equal使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了assert_almost_equal函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: check_quantized_pooling

    def check_quantized_pooling(data_shape, kernel, pool_type, pad, stride, global_pool):
        with mx.Context('gpu', 0):
            data = mx.sym.Variable(name='data', shape=data_shape, dtype='float32')
            pooling_fp32 = mx.sym.Pooling(data=data, kernel=kernel, pad=pad, stride=stride,
                                          pool_type=pool_type, global_pool=global_pool, cudnn_off=False)
            arg_shapes, _, _ = pooling_fp32.infer_shape(data=data_shape)
            arg_names = pooling_fp32.list_arguments()
            pooling_fp32_exe = pooling_fp32.simple_bind(ctx=mx.current_context(), grad_req='null')
            pooling_fp32_exe.arg_dict[arg_names[0]][:] = mx.nd.random.uniform(low=-127.0, high=127.0,
                                                                              shape=data_shape).astype('int32')
            output = pooling_fp32_exe.forward()[0]

            qdata = mx.sym.Variable(name='qdata', shape=data_shape, dtype='int8')
            min_data = mx.sym.Variable(name='min_data')
            max_data = mx.sym.Variable(name='max_data')
            quantized_pooling = mx.sym.contrib.quantized_pooling(data=qdata, min_data=min_data,
                                                                 max_data=max_data, kernel=kernel,
                                                                 pad=pad, stride=stride, pool_type=pool_type,
                                                                 global_pool=global_pool)
            pooling_int8_exe = quantized_pooling.simple_bind(ctx=mx.current_context(), grad_req='null')
            qarg_names = quantized_pooling.list_arguments()
            pooling_int8_exe.arg_dict[qarg_names[0]][:] = pooling_fp32_exe.arg_dict[arg_names[0]].astype('int8')
            quantized_range = 127.0
            pooling_int8_exe.arg_dict[qarg_names[1]][:] = -quantized_range
            pooling_int8_exe.arg_dict[qarg_names[2]][:] = quantized_range
            qoutput, min_range, max_range = pooling_int8_exe.forward()

            if pool_type == 'max':
                assert_almost_equal(output.asnumpy(), qoutput.asnumpy())
            elif pool_type == 'avg':  # for avg pooling, fp32 and int8 may be different due to rounding errors
                diff = mx.nd.abs(output - qoutput.astype(output.dtype))
                cond = mx.nd.lesser(2, diff).sum().asscalar()
                assert cond == 0
开发者ID:CoderHHX,项目名称:incubator-mxnet,代码行数:33,代码来源:test_quantization.py


示例2: test_gnmt_encoder

def test_gnmt_encoder():
    ctx = mx.Context.default_ctx
    for cell_type in ["lstm", "gru", "relu_rnn", "tanh_rnn"]:
        for num_layers, num_bi_layers in [(2, 1), (3, 0)]:
            for use_residual in [False, True]:
                encoder = GNMTEncoder(cell_type=cell_type, num_layers=num_layers,
                                      num_bi_layers=num_bi_layers, hidden_size=8,
                                      dropout=0.0, use_residual=use_residual,
                                      prefix='gnmt_encoder_')
                encoder.initialize(ctx=ctx)
                encoder.hybridize()
                for batch_size in [4]:
                    for seq_length in [5, 10]:
                        inputs_nd = mx.nd.random.normal(0, 1, shape=(batch_size, seq_length, 4), ctx=ctx)
                        valid_length_nd = mx.nd.array(np.random.randint(1, seq_length,
                                                                        size=(batch_size,)), ctx=ctx)
                        encoder_outputs, _ = encoder(inputs_nd, valid_length=valid_length_nd)
                        valid_length_npy = valid_length_nd.asnumpy()
                        rnn_output = encoder_outputs[0].asnumpy()
                        for i in range(batch_size):
                            if valid_length_npy[i] < seq_length - 1:
                                padded_out = rnn_output[i, int(valid_length_npy[i]):, :]
                                assert_almost_equal(padded_out, np.zeros_like(padded_out), 1E-6, 1E-6)
                        assert(encoder_outputs[0].shape == (batch_size, seq_length, 8))
                        assert(len(encoder_outputs[1]) == num_layers)
开发者ID:hridaydutta123,项目名称:gluon-nlp,代码行数:25,代码来源:test_encoder_decoder.py


示例3: test_activations

def test_activations():
    point_to_validate = mx.nd.array([-0.1, 0.1] * 3)

    swish = mx.gluon.nn.Swish()
    def swish_test(x):
        return x * mx.nd.sigmoid(x)

    for test_point, ref_point in zip(swish_test(point_to_validate), swish(point_to_validate)):
        assert test_point == ref_point

    elu = mx.gluon.nn.ELU()
    def elu_test(x):
        def elu(x):
            return 1.0 * (mx.nd.exp(x) - 1) if x < 0 else x
        return [elu(x_i) for x_i in x]

    for test_point, ref_point in zip(elu_test(point_to_validate), elu(point_to_validate)):
        assert test_point == ref_point

    selu = mx.gluon.nn.SELU()
    def selu_test(x):
        def selu(x):
            scale, alpha = 1.0507009873554804934193349852946, 1.6732632423543772848170429916717
            return scale * x if x >= 0 else alpha * mx.nd.exp(x) - alpha
        return [selu(x_i) for x_i in x]

    for test_point, ref_point in zip(selu(point_to_validate), selu(point_to_validate)):
        assert test_point == ref_point

    prelu = mx.gluon.nn.PReLU()
    prelu.initialize()
    x = point_to_validate.reshape((1, 3, 2))
    assert_almost_equal(prelu(x).asnumpy(), mx.nd.where(x >= 0, x, 0.25 * x).asnumpy())
开发者ID:zwz173131329,项目名称:incubator-mxnet,代码行数:33,代码来源:test_gluon.py


示例4: check_hybrid_static_memory

def check_hybrid_static_memory(**kwargs):
    x = mx.nd.random.uniform(shape=(2, 3, 32, 32))
    x.attach_grad()

    net1 = gluon.model_zoo.vision.get_resnet(
        1, 18, pretrained=True, prefix='net_', ctx=mx.context.current_context())
    net2 = gluon.model_zoo.vision.get_resnet(
        1, 18, pretrained=True, prefix='net_', ctx=mx.context.current_context())
    net2.hybridize(**kwargs)
    net1(x)
    net2(x)

    def test(net, x):
        with mx.autograd.record():
            y = net(x) + net(x)
            y.backward()

        grads = {k: v.grad() for k, v in net.collect_params().items() if v.grad_req != 'null'}

        return y, grads

    y1, grads1 = test(net1, x)
    y2, grads2 = test(net2, x)

    assert_almost_equal(y1.asnumpy(), y2.asnumpy(), rtol=1e-3, atol=1e-5)
    for key in grads1:
        assert_almost_equal(grads1[key].asnumpy(), grads2[key].asnumpy(), rtol=1e-3, atol=1e-5)
开发者ID:zwz173131329,项目名称:incubator-mxnet,代码行数:27,代码来源:test_gluon.py


示例5: pull_init_test

 def pull_init_test(kv):
     # checks that compression is not applied to init of key
     out = [mx.nd.zeros(shapes[0], mx.gpu(g)) for g in range(nworker)]
     kv.pull(gc_init_test_key, out=out)
     exp = np.ones_like(out[0].asnumpy())
     for o in out:
         assert_almost_equal(o.asnumpy(), exp)
开发者ID:GrassSunFlower,项目名称:mxnet,代码行数:7,代码来源:test_kvstore.py


示例6: check_rsp_pull

        def check_rsp_pull(kv, ctxs, sparse_pull, is_same_rowid=False, use_slice=False):
            count = len(ctxs)
            num_rows = shape[0]
            row_ids = []
            all_row_ids = np.arange(num_rows)
            vals = [mx.nd.sparse.zeros(shape=shape, ctx=ctxs[i], stype='row_sparse') for i in range(count)]
            if is_same_rowid:
                row_id = np.random.randint(num_rows, size=num_rows)
                row_ids = [mx.nd.array(row_id)] * count
            elif use_slice:
                total_row_ids = mx.nd.array(np.random.randint(num_rows, size=count*num_rows))
                row_ids = [total_row_ids[i*num_rows : (i+1)*num_rows] for i in range(count)]
            else:
                for i in range(count):
                    row_id = np.random.randint(num_rows, size=num_rows)
                    row_ids.append(mx.nd.array(row_id))
            row_ids_to_pull = row_ids[0] if (len(row_ids) == 1 or is_same_rowid) else row_ids
            vals_to_pull = vals[0] if len(vals) == 1 else vals

            kv.row_sparse_pull('e', out=vals_to_pull, row_ids=row_ids_to_pull)
            for val, row_id in zip(vals, row_ids):
                retained = val.asnumpy()
                excluded_row_ids = np.setdiff1d(all_row_ids, row_id.asnumpy())
                for row in range(num_rows):
                    expected_val = np.zeros_like(retained[row])
                    expected_val += 0 if row in excluded_row_ids else 2
                    assert_almost_equal(retained[row], expected_val)

            if sparse_pull is True:
                kv.pull('e', out=vals_to_pull, ignore_sparse=False)
                for val in vals:
                    retained = val.asnumpy()
                    expected_val = np.zeros_like(retained)
                    expected_val[:] = 2
                    assert_almost_equal(retained, expected_val)
开发者ID:dpom,项目名称:incubator-mxnet,代码行数:35,代码来源:test_kvstore_gpu.py


示例7: check_compr_random

    def check_compr_random(kv, threshold, nworker):
        # set a seed so all workers generate same data. knowing this helps
        # calculate expected value after pull
        mx.random.seed(123)
        rnd.seed(123)
        nrepeat = 5
        compr_random_keys_shapes = [('2121', shape),('212221',irregular_shape),('21221', big_shape)]
        # use new keys so residual is 0 for calculation of expected
        for k,s in compr_random_keys_shapes:
            kv.init(k, mx.nd.zeros(s))
        for k,s in compr_random_keys_shapes:
            curr_residual = np.zeros(s)
            for l in range(nrepeat):
                orig_val = mx.nd.zeros(s)
                kv.pull(k, orig_val)

                grad = mx.nd.array(rnd.rand(s[0], s[1]))
                # creates a copy because push changes grad because of assignment
                grad_cpy = mx.nd.array(grad)
                kv.push(k, grad)
                val = mx.nd.zeros(s)
                kv.pull(k, val)

                diff = val - orig_val

                # compute expected by using simulation of operator
                compr, curr_residual, decompr = compute_expected_2bit_quantization(grad_cpy, curr_residual, threshold)
                decompr *= nworker * rate
                assert_almost_equal(diff.asnumpy(), decompr)
开发者ID:GrassSunFlower,项目名称:mxnet,代码行数:29,代码来源:dist_sync_kvstore.py


示例8: test_req

def test_req():
    data = mx.nd.random.uniform(shape=(1,3,224,224))
    label = mx.nd.random.uniform(shape=(1))
    label[:] = 1
    loss = gluon.loss.SoftmaxCrossEntropyLoss()

    net = nn.HybridSequential()
    net1 = nn.HybridSequential()
    net1.add(nn.Dense(4))
    net2 = nn.HybridSequential()
    net2.add(nn.Dense(3))
    net2.add(nn.Dense(2))
    net.add(net1)
    net.add(net2)
    net.initialize()

    net.hybridize()

    for v in net.collect_params().values():
        v.grad_req = 'add'

    net.collect_params().zero_grad()
    with mx.autograd.record():
        pred = net(data)
        l = loss(pred, label)
        l.backward()
        grad = net[0][0].weight.grad().mean().asnumpy()
        # run twice to check req = add
        pred = net(data)
        l = loss(pred, label)
        l.backward()

    grad_double = net[0][0].weight.grad().mean().asnumpy()
    assert_almost_equal(grad * 2, grad_double)
开发者ID:zwz173131329,项目名称:incubator-mxnet,代码行数:34,代码来源:test_gluon.py


示例9: check_with_uniform

def check_with_uniform(uf, arg_shapes, dim=None, npuf=None, rmin=-10, type_list=[np.float32]):
    """check function consistency with uniform random numbers"""
    if isinstance(arg_shapes, int):
        assert dim
        shape = tuple(np.random.randint(1, int(1000**(1.0/dim)), size=dim))
        arg_shapes = [shape] * arg_shapes
    for dtype in type_list:
        ndarray_arg = []
        numpy_arg = []
        for s in arg_shapes:
            npy = np.random.uniform(rmin, 10, s).astype(dtype)
            narr = mx.nd.array(npy, dtype=dtype)
            ndarray_arg.append(narr)
            numpy_arg.append(npy)
        out1 = uf(*ndarray_arg)
        if npuf is None:
            out2 = uf(*numpy_arg).astype(dtype)
        else:
            out2 = npuf(*numpy_arg).astype(dtype)

        assert out1.shape == out2.shape
        if isinstance(out1, mx.nd.NDArray):
            out1 = out1.asnumpy()
        if dtype == np.float16:
            assert_almost_equal(out1, out2, rtol=2e-3)
        else:
            assert_almost_equal(out1, out2)
开发者ID:bhuWenDongchao,项目名称:incubator-mxnet,代码行数:27,代码来源:test_ndarray.py


示例10: check_quantized_conv

    def check_quantized_conv(data_shape, kernel, num_filter, pad, stride, no_bias):
        with mx.Context('gpu', 0):
            # run fp32 conv
            data = mx.sym.Variable(name='data', shape=data_shape, dtype='float32')
            conv2d = mx.sym.Convolution(data=data, kernel=kernel, num_filter=num_filter, pad=pad, stride=stride,
                                        no_bias=no_bias, cudnn_off=False, name='conv2d')
            arg_shapes, _, _ = conv2d.infer_shape(data=data_shape)
            arg_names = conv2d.list_arguments()
            conv_exe_fp32 = conv2d.simple_bind(ctx=mx.current_context(), grad_req='null')
            conv_exe_fp32.arg_dict[arg_names[0]][:] = mx.nd.random.uniform(low=-127.0, high=127.0,
                                                                           shape=data_shape).astype('int32')
            conv_exe_fp32.arg_dict[arg_names[1]][:] = mx.nd.random.uniform(low=-127.0, high=127.0,
                                                                           shape=arg_shapes[1]).astype('int32')
            if not no_bias:
                conv_exe_fp32.arg_dict[arg_names[2]][:] = mx.nd.random.uniform(low=-127.0, high=127.0,
                                                                               shape=arg_shapes[2]).astype('int32')
            output = conv_exe_fp32.forward()[0]

            # run quantized conv
            qdata = mx.sym.Variable(name='qdata', shape=data_shape, dtype='int8')
            qweight = mx.sym.Variable(name='qweight', dtype='int8')
            min_data = mx.sym.Variable(name='min_data')
            max_data = mx.sym.Variable(name='max_data')
            min_weight = mx.sym.Variable(name='min_weight')
            max_weight = mx.sym.Variable(name='max_weight')
            quantized_conv2d = mx.sym.contrib.quantized_conv(data=qdata, weight=qweight, min_data=min_data,
                                                             max_data=max_data, min_weight=min_weight,
                                                             max_weight=max_weight, kernel=kernel,
                                                             num_filter=num_filter, pad=pad, stride=stride,
                                                             no_bias=no_bias)
            qarg_names = quantized_conv2d.list_arguments()
            type_dict = None
            if not no_bias:
                type_dict = {qarg_names[2]: 'int8'}
            conv_exe_int8 = quantized_conv2d.simple_bind(ctx=mx.current_context(), type_dict=type_dict, grad_req='null')
            conv_exe_int8.arg_dict[qarg_names[0]][:] = conv_exe_fp32.arg_dict[arg_names[0]].astype('int8')
            conv_exe_int8.arg_dict[qarg_names[1]][:] = conv_exe_fp32.arg_dict[arg_names[1]].astype('int8')
            quantized_range = 127.0
            if no_bias:
                conv_exe_int8.arg_dict[qarg_names[2]][:] = -quantized_range
                conv_exe_int8.arg_dict[qarg_names[3]][:] = quantized_range
                conv_exe_int8.arg_dict[qarg_names[4]][:] = -quantized_range
                conv_exe_int8.arg_dict[qarg_names[5]][:] = quantized_range
            else:
                conv_exe_int8.arg_dict[qarg_names[2]][:] = conv_exe_fp32.arg_dict[arg_names[2]].astype('int8')
                conv_exe_int8.arg_dict[qarg_names[3]][:] = -quantized_range
                conv_exe_int8.arg_dict[qarg_names[4]][:] = quantized_range
                conv_exe_int8.arg_dict[qarg_names[5]][:] = -quantized_range
                conv_exe_int8.arg_dict[qarg_names[6]][:] = quantized_range
                conv_exe_int8.arg_dict[qarg_names[7]][:] = -quantized_range
                conv_exe_int8.arg_dict[qarg_names[8]][:] = quantized_range
            qoutput, min_range, max_range = conv_exe_int8.forward()

            if no_bias:
                assert_almost_equal(output.asnumpy(), qoutput.asnumpy())
            else:
                # with adding bias, accuracy loss should not be greater than one
                diff = mx.nd.abs(output - qoutput.astype(output.dtype))
                cond = mx.nd.lesser(2, diff).sum().asscalar()
                assert cond == 0
开发者ID:CoderHHX,项目名称:incubator-mxnet,代码行数:60,代码来源:test_quantization.py


示例11: test_smooth_distribution

def test_smooth_distribution():
    assert_exception(lambda: mx.contrib.quant._smooth_distribution(np.zeros((2,)), eps=1e-3), ValueError)
    dirac_delta = np.zeros((5,))
    dirac_delta[2] = 1
    smooth_dirac_delta = dirac_delta.copy()
    smooth_dirac_delta += 1e-3
    smooth_dirac_delta[2] -= 5e-3
    assert_almost_equal(mx.contrib.quant._smooth_distribution(dirac_delta, eps=1e-3), smooth_dirac_delta)
开发者ID:kefeiw,项目名称:incubator-mxnet,代码行数:8,代码来源:test_quantization.py


示例12: test_bce_equal_ce2

def test_bce_equal_ce2():
    N = 100
    loss1 = gluon.loss.SigmoidBCELoss(from_sigmoid=True)
    loss2 = gluon.loss.SoftmaxCELoss(from_logits=True)
    out1 = mx.random.uniform(0.1, 0.9, shape=(N, 1))
    out2 = mx.nd.log(mx.nd.concat(1-out1, out1, dim=1) + 1e-8)
    label = mx.nd.round(mx.random.uniform(0, 1, shape=(N, 1)))
    assert_almost_equal(loss1(out1, label).asnumpy(), loss2(out2, label).asnumpy())
开发者ID:csgcmai,项目名称:mxnet,代码行数:8,代码来源:test_loss.py


示例13: pull_before_push

 def pull_before_push(kv):
     for i in range(nrepeat):
         for j in range(len(keys)):
             out = [mx.nd.ones(shapes[j], mx.gpu(g)) for g in range(nworker)]
             kv.pull(keys[j], out=out)
             exp = np.zeros_like(out[0].asnumpy())
             for o in out:
                 assert_almost_equal(o.asnumpy(), exp)
开发者ID:GrassSunFlower,项目名称:mxnet,代码行数:8,代码来源:test_kvstore.py


示例14: softmax_forward

 def softmax_forward(input_data, true_output):
     data = mx.sym.Variable('data')
     out1 = data.softmax(axis=1)
     exec1 = out1.bind(mx.cpu(), args={'data': input_data})
     exec1.forward()[0].wait_to_read()
     ndarr = exec1.outputs[0][0][0][0]
     nparr = ndarr.asnumpy()
     assert_almost_equal(nparr, true_output, rtol=1e-5, atol=1e-5)
开发者ID:dpom,项目名称:incubator-mxnet,代码行数:8,代码来源:test_mkldnn.py


示例15: test_logistic_loss_equal_bce

def test_logistic_loss_equal_bce():
    N = 100
    loss_binary = gluon.loss.LogisticLoss(label_format='binary')
    loss_signed = gluon.loss.LogisticLoss(label_format='signed')
    loss_bce = gluon.loss.SigmoidBCELoss(from_sigmoid=False)
    data = mx.random.uniform(-10, 10, shape=(N, 1))
    label = mx.nd.round(mx.random.uniform(0, 1, shape=(N, 1)))
    assert_almost_equal(loss_binary(data, label).asnumpy(), loss_bce(data, label).asnumpy())
    assert_almost_equal(loss_signed(data, 2 * label - 1).asnumpy(), loss_bce(data, label).asnumpy())
开发者ID:tuliang1996,项目名称:incubator-mxnet,代码行数:9,代码来源:test_loss.py


示例16: test_normalize

def test_normalize():
    data_in = np.random.uniform(0, 255, (300, 300, 3)).astype(dtype=np.uint8)
    data_in = transforms.ToTensor()(nd.array(data_in, dtype='uint8'))
    out_nd = transforms.Normalize(mean=(0, 1, 2), std=(3, 2, 1))(data_in)
    data_expected = data_in.asnumpy()
    data_expected[:][:][0] = data_expected[:][:][0] / 3.0
    data_expected[:][:][1] = (data_expected[:][:][1] - 1.0) / 2.0
    data_expected[:][:][2] = data_expected[:][:][2] - 2.0
    assert_almost_equal(data_expected, out_nd.asnumpy())
开发者ID:bhuWenDongchao,项目名称:incubator-mxnet,代码行数:9,代码来源:test_gluon_data_vision.py


示例17: test_zero_grad

def test_zero_grad():
    data = mx.nd.random.uniform(shape=(3,3))
    net = nn.Embedding(3, 4, sparse_grad=True, prefix='test_zero_grad_')
    net.initialize()
    with mx.autograd.record():
        l = net(data)
        l.backward()
    net.collect_params().zero_grad()
    grad = net.collect_params()['test_zero_grad_weight'].grad()
    assert_almost_equal(grad.asnumpy(), grad.asnumpy() * 0)
开发者ID:zwz173131329,项目名称:incubator-mxnet,代码行数:10,代码来源:test_gluon.py


示例18: bench_dot

    def bench_dot(lhs_shape, rhs_shape, lhs_stype, rhs_stype,
                  lhs_den, rhs_den, trans_lhs, ctx, num_repeat=10, fw="mxnet", distribution="uniform"):
        set_default_context(ctx)
        assert fw == "mxnet" or fw == "scipy"
        # Set funcs
        dot_func_sparse = mx.nd.sparse.dot if fw == "mxnet" else sp.spmatrix.dot
        dot_func_dense = mx.nd.dot if fw == "mxnet" else np.dot
        # Create matrix instances
        lhs_nd = rand_ndarray(lhs_shape, lhs_stype, density=lhs_den, distribution=distribution)
        # only uniform distribution supported for rhs
        if rhs_stype == 'csr':
            rhs_nd = rand_ndarray(rhs_shape, rhs_stype, density=rhs_den, distribution=distribution)
        else:
            rhs_nd = rand_ndarray(rhs_shape, rhs_stype, density=rhs_den, distribution="uniform")
        lhs_dns = None
        rhs_dns = None
        dense_cost = None
        sparse_cost = None

        if fw == "mxnet":
            lhs_dns = lhs_nd if lhs_stype == 'default' else lhs_nd.tostype('default')
            rhs_dns = rhs_nd if rhs_stype == 'default' else rhs_nd.tostype('default')
            # One warm up run, verify correctness
            out = dot_func_sparse(lhs_nd, rhs_dns, trans_lhs)
            out_expected = dot_func_dense(lhs_dns, rhs_dns, trans_lhs)
            assert_almost_equal(out.asnumpy(), out_expected.asnumpy(), rtol=1e-1, atol=1e-1)
            sparse_cost = measure_cost(num_repeat, False, False, dot_func_sparse, lhs_nd, rhs_nd, trans_lhs)
            dense_cost = measure_cost(num_repeat, False, False, dot_func_dense, lhs_dns, rhs_dns, trans_lhs)
        else:
            lhs_dns = lhs_nd.asnumpy()
            rhs_dns = rhs_nd.asnumpy()
            lhs_nd = sp.csr_matrix(lhs_nd.asnumpy())
            rhs_nd = rhs_nd.asnumpy()
            # One warm up run, verify correctness
            lhs_nd_copy = sp.spmatrix.transpose(lhs_nd) if trans_lhs else lhs_nd
            out = dot_func_sparse(lhs_nd_copy, rhs_dns)
            sparse_cost = measure_cost(num_repeat, trans_lhs, False, dot_func_sparse, lhs_nd, rhs_nd)
            dense_cost = measure_cost(num_repeat, trans_lhs, True, dot_func_dense, lhs_dns, rhs_dns)

        speedup = dense_cost / sparse_cost
        # Print results
        m = lhs_shape[0]
        k = lhs_shape[1]
        n = rhs_shape[1]
        result_pattern = '{:15.1f} {:15.1f} {:>10} {:8d} {:8d} {:8d} {:13.2f} {:13.2f} {:8.2f}'
        results = result_pattern.format(lhs_den*100,
                                        rhs_den*100,
                                        str(ctx),
                                        m,
                                        k,
                                        n,
                                        sparse_cost*1000,
                                        dense_cost*1000,
                                        speedup)
        print(results)
开发者ID:CoderHHX,项目名称:incubator-mxnet,代码行数:55,代码来源:dot.py


示例19: test_global_norm_clip_multi_device

def test_global_norm_clip_multi_device():
    for check_isfinite in [True, False]:
        x1 = mx.nd.ones((3,3), ctx=mx.gpu(0))
        x2 = mx.nd.ones((4,4), ctx=mx.cpu(0))
        norm = gluon.utils.clip_global_norm([x1, x2], 1.0, check_isfinite=check_isfinite)
        if check_isfinite:
            assert norm == 5.0
        else:
            assert norm.asscalar() == 5.0
        assert_almost_equal(x1.asnumpy(), np.ones((3, 3)) / 5)
        assert_almost_equal(x2.asnumpy(), np.ones((4, 4)) / 5)
开发者ID:dpom,项目名称:incubator-mxnet,代码行数:11,代码来源:test_gluon_gpu.py


示例20: test_mkldnn_ndarray_slice

def test_mkldnn_ndarray_slice():
    ctx = mx.cpu()
    net = gluon.nn.HybridSequential()
    with net.name_scope():
        net.add(gluon.nn.Conv2D(channels=32, kernel_size=3, activation=None))
    net.collect_params().initialize(ctx=ctx)
    x = mx.nd.array(np.ones([32, 3, 224, 224]), ctx)
    y = net(x)

    # trigger computation on ndarray slice
    assert_almost_equal(y[0].asnumpy()[0, 0, 0], 0.3376348)
开发者ID:dpom,项目名称:incubator-mxnet,代码行数:11,代码来源:test_mkldnn.py



注:本文中的mxnet.test_utils.assert_almost_equal函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python test_utils.check_consistency函数代码示例发布时间:2022-05-27
下一篇:
Python ndarray.zeros函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap