• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python torch.randn函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中torch.randn函数的典型用法代码示例。如果您正苦于以下问题:Python randn函数的具体用法?Python randn怎么用?Python randn使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了randn函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: main

def main():
    dtype = torch.cuda.FloatTensor

    batch_size = 64
    input_dimension = 1000
    hidden_dimension = 100
    output_dimension = 10

    random_input = torch.randn(batch_size, input_dimension).type(dtype)
    true_output = torch.randn(batch_size, output_dimension).type(dtype)

    weights_1 = torch.randn(input_dimension, hidden_dimension).type(dtype)
    weights_2 = torch.randn(hidden_dimension, output_dimension).type(dtype)

    learning_rate = 1e-6
    for t in range(500):
        h = random_input.mm(weights_1)
        # Compute ReLU
        h_relu = h.clamp(min=0)
        predicted_output = h_relu.mm(weights_2)

        loss = (predicted_output - true_output).pow(2).sum()
        print("Loss: {}".format(loss))

        # Compute gradient
        grad_y_pred = 2.0 * (predicted_output - true_output)
        grad_w2 = h_relu.t().mm(grad_y_pred)
        grad_h_relu = grad_y_pred.mm(weights_2.t())
        grad_h = grad_h_relu.clone()
        grad_h[h < 0] = 0
        grad_w1 = random_input.t().mm(grad_h)

        weights_1 -= learning_rate * grad_w1
        weights_2 -= learning_rate * grad_w2
开发者ID:vluzko,项目名称:test,代码行数:34,代码来源:tutorial.py


示例2: fit

    def fit(self):
        args = self.args

        for epoch in range(args.max_epochs):
            self.G.train()
            self.D.train()
            for step, inputs in enumerate(self.train_loader):
                batch_size = inputs[0].size(0)

                images = inputs[0].to(self.device)
                labels = inputs[1].to(self.device)
                
                # create the labels used to distingush real or fake
                real_labels = torch.ones(batch_size, dtype=torch.int64).to(self.device)
                fake_labels = torch.zeros(batch_size, dtype=torch.int64).to(self.device)
                
                # train the discriminator
                
                # discriminator <- real image
                D_real, D_real_cls = self.D(images)
                D_loss_real = self.loss_fn(D_real, real_labels)
                D_loss_real_cls = self.loss_fn(D_real_cls, labels)
                
                # noise vector
                z = torch.randn(batch_size, args.z_dim).to(self.device)

                # make label to onehot vector
                y_onehot = torch.zeros((batch_size, 10)).to(self.device)
                y_onehot.scatter_(1, labels.unsqueeze(1), 1)
                y_onehot.requires_grad_(False)
                
                # discriminator <- fake image
                G_fake = self.G(y_onehot, z)
                D_fake, D_fake_cls = self.D(G_fake)
                D_loss_fake = self.loss_fn(D_fake, fake_labels)
                D_loss_fake_cls = self.loss_fn(D_fake_cls, labels)
                
                D_loss = D_loss_real + D_loss_fake + \
                         D_loss_real_cls + D_loss_fake_cls
                self.D.zero_grad()
                D_loss.backward()
                self.optim_D.step()
                
                # train the generator

                z = torch.randn(batch_size, args.z_dim).to(self.device)
                G_fake = self.G(y_onehot, z)
                D_fake, D_fake_cls = self.D(G_fake)
                
                G_loss = self.loss_fn(D_fake, real_labels) + \
                         self.loss_fn(D_fake_cls, labels)
                self.G.zero_grad()
                G_loss.backward()
                self.optim_G.step()

            if (epoch+1) % args.print_every == 0:
                print("Epoch [{}/{}] Loss_D: {:.3f}, Loss_G: {:.3f}".
                    format(epoch+1, args.max_epochs, D_loss.item(), G_loss.item()))
                self.save(args.ckpt_dir, epoch+1)
                self.sample(epoch+1)
开发者ID:muncok,项目名称:pytorch-exercise,代码行数:60,代码来源:solver.py


示例3: __init__

    def __init__(self,
                 input_dim: int,
                 forward_combination: str = "y-x",
                 backward_combination: str = "y-x",
                 num_width_embeddings: int = None,
                 span_width_embedding_dim: int = None,
                 bucket_widths: bool = False,
                 use_sentinels: bool = True) -> None:
        super().__init__()
        self._input_dim = input_dim
        self._forward_combination = forward_combination
        self._backward_combination = backward_combination
        self._num_width_embeddings = num_width_embeddings
        self._bucket_widths = bucket_widths

        if self._input_dim % 2 != 0:
            raise ConfigurationError("The input dimension is not divisible by 2, but the "
                                     "BidirectionalEndpointSpanExtractor assumes the embedded representation "
                                     "is bidirectional (and hence divisible by 2).")
        if num_width_embeddings is not None and span_width_embedding_dim is not None:
            self._span_width_embedding = Embedding(num_width_embeddings, span_width_embedding_dim)
        elif not all([num_width_embeddings is None, span_width_embedding_dim is None]):
            raise ConfigurationError("To use a span width embedding representation, you must"
                                     "specify both num_width_buckets and span_width_embedding_dim.")
        else:
            self._span_width_embedding = None

        self._use_sentinels = use_sentinels
        if use_sentinels:
            self._start_sentinel = Parameter(torch.randn([1, 1, int(input_dim / 2)]))
            self._end_sentinel = Parameter(torch.randn([1, 1, int(input_dim / 2)]))
开发者ID:Jordan-Sauchuk,项目名称:allennlp,代码行数:31,代码来源:bidirectional_endpoint_span_extractor.py


示例4: __init__

    def __init__(self):
        super(Chunking, self).__init__()
        
        self.input_size = embedding_size \
                        + nb_postags \
                        + postag_hn_size * 2

        self.w = nn.Parameter(torch.randn(chunking_nb_layers * 2, 
                                          max_sentence_size, 
                                          chunking_hn_size))
        self.h = nn.Parameter(torch.randn(chunking_nb_layers * 2, 
                                          max_sentence_size,
                                          chunking_hn_size))

        self.embedding = nn.Embedding(nb_postags, chunking_postag_emb_size)
        
        self.aux_emb = torch.arange(0, nb_postags)
        self.aux_emb = Variable(self.aux_emb).long()
        
        self.bi_lstm = nn.LSTM(self.input_size, 
                               chunking_hn_size,
                               chunking_nb_layers,
                               bidirectional=True)
        
        self.fc = nn.Linear(chunking_hn_size * 2, nb_chunktags)
开发者ID:bage79,项目名称:multitask_sentiment_analysis,代码行数:25,代码来源:chunking.py


示例5: test_forget_mult_cuda

def test_forget_mult_cuda():
    this_tests(ForgetMultGPU, BwdForgetMultGPU)
    x,f = torch.randn(5,3,20).cuda().chunk(2, dim=2)
    x,f = x.contiguous().requires_grad_(True),f.contiguous().requires_grad_(True)
    th_x,th_f = detach_and_clone(x),detach_and_clone(f)
    for (bf, bw) in [(True,True), (False,True), (True,False), (False,False)]:
        forget_mult = BwdForgetMultGPU if bw else ForgetMultGPU
        th_out = forget_mult_CPU(th_x, th_f, hidden_init=None, batch_first=bf, backward=bw)
        th_loss = th_out.pow(2).mean()
        th_loss.backward()
        out = forget_mult.apply(x, f, None, bf)
        loss = out.pow(2).mean()
        loss.backward()
        assert torch.allclose(th_out,out, rtol=1e-4, atol=1e-5)
        assert torch.allclose(th_x.grad,x.grad, rtol=1e-4, atol=1e-5)
        assert torch.allclose(th_f.grad,f.grad, rtol=1e-4, atol=1e-5)
        for p in [x,f, th_x, th_f]:
            p = p.detach()
            p.grad = None
        h = torch.randn((5 if bf else 3), 10).cuda().requires_grad_(True)
        th_h = detach_and_clone(h)
        th_out = forget_mult_CPU(th_x, th_f, hidden_init=th_h, batch_first=bf, backward=bw)
        th_loss = th_out.pow(2).mean()
        th_loss.backward()
        out = forget_mult.apply(x.contiguous(), f.contiguous(), h, bf)
        loss = out.pow(2).mean()
        loss.backward()
        assert torch.allclose(th_out,out, rtol=1e-4, atol=1e-5)
        assert torch.allclose(th_x.grad,x.grad, rtol=1e-4, atol=1e-5)
        assert torch.allclose(th_f.grad,f.grad, rtol=1e-4, atol=1e-5)
        assert torch.allclose(th_h.grad,h.grad, rtol=1e-4, atol=1e-5)
        for p in [x,f, th_x, th_f]:
            p = p.detach()
            p.grad = None
开发者ID:SiddharthTiwari,项目名称:fastai,代码行数:34,代码来源:test_text_qrnn.py


示例6: test_backward_computes_backward_pass

def test_backward_computes_backward_pass():
    weight = torch.randn(4, 8, 3, 3).cuda()
    input = torch.randn(4, 8, 4, 4).cuda()

    input_var = Variable(input, requires_grad=True)
    weight_var = Parameter(weight)
    out_var = F.conv2d(
        input=input_var,
        weight=weight_var,
        bias=None,
        stride=1,
        padding=1,
        dilation=1,
        groups=1,
    )
    out_var.backward(gradient=input_var.data.clone().fill_(1))
    out = out_var.data
    input_grad = input_var.grad.data
    weight_grad = weight_var.grad.data

    func = _EfficientConv2d(
        stride=1,
        padding=1,
        dilation=1,
        groups=1,
    )
    out_efficient = func.forward(weight, None, input)
    weight_grad_efficient, _, input_grad_efficient = func.backward(
            weight, None, input, input.clone().fill_(1))

    assert(almost_equal(out, out_efficient))
    assert(almost_equal(input_grad, input_grad_efficient))
    assert(almost_equal(weight_grad, weight_grad_efficient))
开发者ID:Robert0812,项目名称:efficient_densenet_pytorch,代码行数:33,代码来源:efficient_conv_test.py


示例7: test_degenerate_GPyTorchPosterior

    def test_degenerate_GPyTorchPosterior(self, cuda=False):
        device = torch.device("cuda") if cuda else torch.device("cpu")
        for dtype in (torch.float, torch.double):
            # singular covariance matrix
            degenerate_covar = torch.tensor(
                [[1, 1, 0], [1, 1, 0], [0, 0, 2]], dtype=dtype, device=device
            )
            mean = torch.rand(3, dtype=dtype, device=device)
            mvn = MultivariateNormal(mean, lazify(degenerate_covar))
            posterior = GPyTorchPosterior(mvn=mvn)
            # basics
            self.assertEqual(posterior.device.type, device.type)
            self.assertTrue(posterior.dtype == dtype)
            self.assertEqual(posterior.event_shape, torch.Size([3, 1]))
            self.assertTrue(torch.equal(posterior.mean, mean.unsqueeze(-1)))
            variance_exp = degenerate_covar.diag().unsqueeze(-1)
            self.assertTrue(torch.equal(posterior.variance, variance_exp))

            # rsample
            with warnings.catch_warnings(record=True) as w:
                # we check that the p.d. warning is emitted - this only
                # happens once per posterior, so we need to check only once
                samples = posterior.rsample(sample_shape=torch.Size([4]))
                self.assertEqual(len(w), 1)
                self.assertTrue(issubclass(w[-1].category, RuntimeWarning))
                self.assertTrue("not p.d." in str(w[-1].message))
            self.assertEqual(samples.shape, torch.Size([4, 3, 1]))
            samples2 = posterior.rsample(sample_shape=torch.Size([4, 2]))
            self.assertEqual(samples2.shape, torch.Size([4, 2, 3, 1]))
            # rsample w/ base samples
            base_samples = torch.randn(4, 3, 1, device=device, dtype=dtype)
            samples_b1 = posterior.rsample(
                sample_shape=torch.Size([4]), base_samples=base_samples
            )
            samples_b2 = posterior.rsample(
                sample_shape=torch.Size([4]), base_samples=base_samples
            )
            self.assertTrue(torch.allclose(samples_b1, samples_b2))
            base_samples2 = torch.randn(4, 2, 3, 1, device=device, dtype=dtype)
            samples2_b1 = posterior.rsample(
                sample_shape=torch.Size([4, 2]), base_samples=base_samples2
            )
            samples2_b2 = posterior.rsample(
                sample_shape=torch.Size([4, 2]), base_samples=base_samples2
            )
            self.assertTrue(torch.allclose(samples2_b1, samples2_b2))
            # collapse_batch_dims
            b_mean = torch.rand(2, 3, dtype=dtype, device=device)
            b_degenerate_covar = degenerate_covar.expand(2, *degenerate_covar.shape)
            b_mvn = MultivariateNormal(b_mean, lazify(b_degenerate_covar))
            b_posterior = GPyTorchPosterior(mvn=b_mvn)
            b_base_samples = torch.randn(4, 2, 3, 1, device=device, dtype=dtype)
            with warnings.catch_warnings(record=True) as w:
                b_samples = b_posterior.rsample(
                    sample_shape=torch.Size([4]), base_samples=b_base_samples
                )
                self.assertEqual(len(w), 1)
                self.assertTrue(issubclass(w[-1].category, RuntimeWarning))
                self.assertTrue("not p.d." in str(w[-1].message))
            self.assertEqual(b_samples.shape, torch.Size([4, 2, 3, 1]))
开发者ID:saschwan,项目名称:botorch,代码行数:60,代码来源:test_gpytorch.py


示例8: test_DepthConcat

    def test_DepthConcat(self):
        outputSize = torch.IntTensor((5, 6, 7, 8))
        input = torch.randn(2, 3, 12, 12)
        gradOutput = torch.randn(2, int(outputSize.sum()), 12, 12)
        concat = nn.DepthConcat(1)
        concat.add(nn.SpatialConvolution(3, outputSize[0], 1, 1, 1, 1))  # > 2, 5, 12, 12
        concat.add(nn.SpatialConvolution(3, outputSize[1], 3, 3, 1, 1))  # > 2, 6, 10, 10
        concat.add(nn.SpatialConvolution(3, outputSize[2], 4, 4, 1, 1))  # > 2, 7, 9, 9
        concat.add(nn.SpatialConvolution(3, outputSize[3], 5, 5, 1, 1))  # > 2, 8, 8, 8
        concat.zeroGradParameters()
        # forward/backward
        outputConcat = concat.forward(input)
        gradInputConcat = concat.backward(input, gradOutput)
        # the spatial dims are the largest, the nFilters is the sum
        output = torch.Tensor(2, int(outputSize.sum()), 12, 12).zero_()  # zero for padding
        narrows = ((slice(None), slice(0, 5), slice(None), slice(None)),
                   (slice(None), slice(5, 11), slice(1, 11), slice(1, 11)),
                   (slice(None), slice(11, 18), slice(1, 10), slice(1, 10)),
                   (slice(None), slice(18, 26), slice(2, 10), slice(2, 10)))
        gradInput = input.clone().zero_()
        for i in range(4):
            conv = concat.get(i)
            gradWeight = conv.gradWeight.clone()
            conv.zeroGradParameters()
            output[narrows[i]].copy_(conv.forward(input))
            gradInput.add_(conv.backward(input, gradOutput[narrows[i]]))
            self.assertEqual(gradWeight, conv.gradWeight)

        self.assertEqual(output, outputConcat)
        self.assertEqual(gradInput, gradInputConcat)

        # Check that these don't raise errors
        concat.__repr__()
        str(concat)
开发者ID:bhuWenDongchao,项目名称:pytorch,代码行数:34,代码来源:test_legacy_nn.py


示例9: bisect_demo

def bisect_demo():
    """ Bisect the LB/UB on specified columns.
        The key is to use scatter_() to convert indices into one-hot encodings.
    """
    t1t2 = torch.stack((torch.randn(5, 4), torch.randn(5, 4)), dim=-1)
    lb, _ = torch.min(t1t2, dim=-1)
    ub, _ = torch.max(t1t2, dim=-1)
    print('LB:', lb)
    print('UB:', ub)

    # random idxs for testing
    idxs = torch.randn_like(lb)
    _, idxs = idxs.max(dim=-1)  # <Batch>
    print('Split idxs:', idxs)

    idxs = idxs.unsqueeze(dim=-1)  # Batch x 1
    idxs = torch.zeros_like(lb).byte().scatter_(-1, idxs, 1)  # convert into one-hot encoding
    print('Reorg idxs:', idxs)

    mid = (lb + ub) / 2.0
    lefts_lb = lb
    lefts_ub = torch.where(idxs, mid, ub)  # use the one-hot encoding to call torch.where()
    rights_lb = torch.where(idxs, mid, lb)  # definitely faster than element-wise reassignment
    rights_ub = ub

    print('LEFT LB:', lefts_lb)
    print('LEFT UB:', lefts_ub)
    print('RIGHT LB:', rights_lb)
    print('RIGHT UB:', rights_ub)

    newlb = torch.cat((lefts_lb, rights_lb), dim=0)
    newub = torch.cat((lefts_ub, rights_ub), dim=0)
    return newlb, newub
开发者ID:AndriyLin,项目名称:Utils,代码行数:33,代码来源:pytorch.py


示例10: test_backward

    def test_backward(self):
        a = Variable(torch.randn(2, 2), requires_grad=True)
        b = Variable(torch.randn(2, 2), requires_grad=True)

        x = a
        y = a * b

        trace, inputs = torch._C._tracer_enter((x, y), 2)

        def fn(x, y):
            return y * 2 * x
        z = fn(*inputs)
        torch._C._tracer_exit((z,))
        torch._C._jit_pass_lint(trace)

        # Run first backward
        grad, = torch.autograd.grad(z, x, Variable(torch.ones(2, 2), requires_grad=True), create_graph=True)
        torch._C._jit_pass_lint(trace)

        # Run second backward
        grad.sum().backward(create_graph=True)
        torch._C._jit_pass_lint(trace)

        # Run dead code elimination to remove unused trace nodes
        torch._C._jit_pass_dce(trace)
        # This is nondeterministic, see:
        #   https://github.com/ezyang/pytorch/issues/227
        # self.assertExpectedTrace(trace)
        self.skipTest("output is nondeterministic on Travis/Python 3.5")
开发者ID:bhuWenDongchao,项目名称:pytorch,代码行数:29,代码来源:test_jit.py


示例11: main

def main():
    dtype = torch.FloatTensor
    N, d_in, H, d_out = 64, 1000, 100, 10  # d_in表示输入维度,d_out输出维度,H是隐藏层维度数

    x = Variable(torch.randn(N, d_in).type(dtype), requires_grad=False)
    y = Variable(torch.randn(N, d_out).type(dtype), requires_grad=False)

    model = torch.nn.Sequential(
        torch.nn.Linear(d_in,H),
        torch.nn.ReLU(),
        torch.nn.Linear(H,d_out)
    )

    loss_fn = torch.nn.MSELoss(size_average=False)

    learning_rate = 1e-4
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
    for t in range(500):
        y_pred = model(x)

        loss = loss_fn(y_pred,y)

        model.zero_grad()
        loss.backward()

        optimizer.step()
    print(loss.data[0])
开发者ID:chenjianhong,项目名称:machineleaning,代码行数:27,代码来源:nn_demo.py


示例12: test_non_stateful_states_are_sorted_correctly

    def test_non_stateful_states_are_sorted_correctly(self):
        encoder_base = _EncoderBase(stateful=False)
        initial_states = (torch.randn(6, 5, 7), torch.randn(6, 5, 7))
        # Check that we sort the state for non-stateful encoders. To test
        # we'll just use a "pass through" encoder, as we aren't actually testing
        # the functionality of the encoder here anyway.
        _, states, restoration_indices = encoder_base.sort_and_run_forward(lambda *x: x,
                                                                           self.tensor,
                                                                           self.mask,
                                                                           initial_states)
        # Our input tensor had 2 zero length sequences, so we need
        # to concat a tensor of shape
        # (num_layers * num_directions, batch_size - num_valid, hidden_dim),
        # to the output before unsorting it.
        zeros = torch.zeros([6, 2, 7])

        # sort_and_run_forward strips fully-padded instances from the batch;
        # in order to use the restoration_indices we need to add back the two
        #  that got stripped. What we get back should match what we started with.
        for state, original in zip(states, initial_states):
            assert list(state.size()) == [6, 3, 7]
            state_with_zeros = torch.cat([state, zeros], 1)
            unsorted_state = state_with_zeros.index_select(1, restoration_indices)
            for index in [0, 1, 3]:
                numpy.testing.assert_array_equal(unsorted_state[:, index, :].data.numpy(),
                                                 original[:, index, :].data.numpy())
开发者ID:apmoore1,项目名称:allennlp,代码行数:26,代码来源:encoder_base_test.py


示例13: test_inline_jit_compile_extension_multiple_sources_and_no_functions

    def test_inline_jit_compile_extension_multiple_sources_and_no_functions(self):
        cpp_source1 = '''
        at::Tensor sin_add(at::Tensor x, at::Tensor y) {
          return x.sin() + y.sin();
        }
        '''

        cpp_source2 = '''
        #include <torch/torch.h>
        at::Tensor sin_add(at::Tensor x, at::Tensor y);
        PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
          m.def("sin_add", &sin_add, "sin(x) + sin(y)");
        }
        '''

        module = torch.utils.cpp_extension.load_inline(
            name='inline_jit_extension',
            cpp_sources=[cpp_source1, cpp_source2],
            verbose=True)

        x = torch.randn(4, 4)
        y = torch.randn(4, 4)

        z = module.sin_add(x, y)
        self.assertEqual(z, x.sin() + y.sin())
开发者ID:xiongyw,项目名称:pytorch,代码行数:25,代码来源:test_cpp_extensions.py


示例14: get_batch

def get_batch(batch_size=32, randomize=False):
    """Builds a batch i.e. (x, f(x)) pair."""
    random = torch.randn(batch_size)
    x = make_features(random)
    y = f(x)
    if randomize: y += torch.randn(1)
    return Variable(x), Variable(y)
开发者ID:deo1,项目名称:deo1,代码行数:7,代码来源:regression.py


示例15: test_forward

    def test_forward(self):
        batch = 16
        len1, len2 = 21, 24
        seq_len1 = torch.randint(low=len1 - 10, high=len1 + 1, size=(batch,)).long()
        seq_len2 = torch.randint(low=len2 - 10, high=len2 + 1, size=(batch,)).long()

        mask1 = []
        for w in seq_len1:
            mask1.append([1] * w.item() + [0] * (len1 - w.item()))
        mask1 = torch.FloatTensor(mask1)
        mask2 = []
        for w in seq_len2:
            mask2.append([1] * w.item() + [0] * (len2 - w.item()))
        mask2 = torch.FloatTensor(mask2)

        d = 200  # hidden dimension
        l = 20  # number of perspective
        test1 = torch.randn(batch, len1, d)
        test2 = torch.randn(batch, len2, d)
        test1 = test1 * mask1.view(-1, len1, 1).expand(-1, len1, d)
        test2 = test2 * mask2.view(-1, len2, 1).expand(-1, len2, d)

        test1_fw, test1_bw = torch.split(test1, d // 2, dim=-1)
        test2_fw, test2_bw = torch.split(test2, d // 2, dim=-1)

        ml_fw = BiMpmMatching.from_params(Params({"is_forward": True, "num_perspectives": l}))
        ml_bw = BiMpmMatching.from_params(Params({"is_forward": False, "num_perspectives": l}))

        vecs_p_fw, vecs_h_fw = ml_fw(test1_fw, mask1, test2_fw, mask2)
        vecs_p_bw, vecs_h_bw = ml_bw(test1_bw, mask1, test2_bw, mask2)
        vecs_p, vecs_h = torch.cat(vecs_p_fw + vecs_p_bw, dim=2), torch.cat(vecs_h_fw + vecs_h_bw, dim=2)

        assert vecs_p.size() == torch.Size([batch, len1, 10 + 10 * l])
        assert vecs_h.size() == torch.Size([batch, len2, 10 + 10 * l])
        assert ml_fw.get_output_dim() == ml_bw.get_output_dim() == vecs_p.size(2) // 2 == vecs_h.size(2) // 2
开发者ID:apmoore1,项目名称:allennlp,代码行数:35,代码来源:bimpm_matching_test.py


示例16: test_convtbc

    def test_convtbc(self):
        # ksz, in_channels, out_channels
        conv_tbc = ConvTBC(4, 5, kernel_size=3, padding=1)
        # out_channels, in_channels, ksz
        conv1d = nn.Conv1d(4, 5, kernel_size=3, padding=1)

        conv_tbc.weight.data.copy_(conv1d.weight.data.transpose(0, 2))
        conv_tbc.bias.data.copy_(conv1d.bias.data)

        input_tbc = torch.randn(7, 2, 4, requires_grad=True)
        input1d = input_tbc.data.transpose(0, 1).transpose(1, 2)
        input1d.requires_grad = True

        output_tbc = conv_tbc(input_tbc)
        output1d = conv1d(input1d)

        self.assertAlmostEqual(output_tbc.data.transpose(0, 1).transpose(1, 2), output1d.data)

        grad_tbc = torch.randn(output_tbc.size())
        grad1d = grad_tbc.transpose(0, 1).transpose(1, 2).contiguous()

        output_tbc.backward(grad_tbc)
        output1d.backward(grad1d)

        self.assertAlmostEqual(conv_tbc.weight.grad.data.transpose(0, 2), conv1d.weight.grad.data)
        self.assertAlmostEqual(conv_tbc.bias.grad.data, conv1d.bias.grad.data)
        self.assertAlmostEqual(input_tbc.grad.data.transpose(0, 1).transpose(1, 2), input1d.grad.data)
开发者ID:fyabc,项目名称:fairseq,代码行数:27,代码来源:test_convtbc.py


示例17: test_stack_overwrite_failure

 def test_stack_overwrite_failure(self):
     data1 = {"latent2": torch.randn(2)}
     data2 = {"latent2": torch.randn(2)}
     cm = poutine.condition(poutine.condition(self.model, data=data1),
                            data=data2)
     with pytest.raises(AssertionError):
         cm()
开发者ID:lewisKit,项目名称:pyro,代码行数:7,代码来源:test_poutines.py


示例18: main

def main():
    dtype = torch.FloatTensor
    N,d_in,H,d_out = 64,1000,100,10 # d_in表示输入维度,d_out输出维度,H是隐藏层维度数

    x = torch.randn(N,d_in).type(dtype)
    y = torch.randn(N,d_out).type(dtype)

    w1 = torch.randn(d_in,H).type(dtype)
    w2 = torch.randn(H, d_out).type(dtype)

    learning_rate = 1e-6
    for t in range(500):

        # 定义模型
        h = x.mm(w1)
        h_relu = h.clamp(min=0)
        y_pred = h_relu.mm(w2)

        # 定义损失函数
        loss = (y_pred - y).pow(2).sum()

        grad_y_pred = 2.0 * (y_pred - y)
        grad_w2 = h_relu.t().mm(grad_y_pred)
        grad_h_relu = grad_y_pred.mm(w2.t())
        grad_h = grad_h_relu.clone()
        grad_h[h<0] = 0
        grad_w1 = x.t().mm(grad_h)

        w1 -= learning_rate * grad_w1
        w2 -= learning_rate * grad_w2

    print(type(loss))
    print(loss)
开发者ID:chenjianhong,项目名称:machineleaning,代码行数:33,代码来源:tensor_demo.py


示例19: test_trace_expire

    def test_trace_expire(self):
        x = Variable(torch.randn(2, 2), requires_grad=True)
        y = Variable(torch.randn(2, 2), requires_grad=True)

        def record_trace(num_backwards):
            trace = torch._C._tracer_enter((x, y), num_backwards)
            z = y * 2 * x
            torch._C._tracer_exit((z,))
            return z, trace

        def check(expired, complete):
            self.assertEqual(trace.is_expired, expired)
            self.assertEqual(trace.is_complete, complete)

        z, trace = record_trace(0)
        check(False, True)
        del z
        check(False, True)

        z, trace = record_trace(1)
        check(False, False)
        del z
        check(True, False)

        z, trace = record_trace(1)
        check(False, False)
        z.sum().backward()
        check(False, True)
        del z
        check(False, True)
开发者ID:Northrend,项目名称:pytorch,代码行数:30,代码来源:test_jit.py


示例20: test_getitem_1d

 def test_getitem_1d(self):
     t = torch.randn(15)
     l = torch.randn(15)
     source = TensorDataset(t, l)
     for i in range(15):
         self.assertEqual(t[i], source[i][0])
         self.assertEqual(l[i], source[i][1])
开发者ID:RichieMay,项目名称:pytorch,代码行数:7,代码来源:test_dataloader.py



注:本文中的torch.randn函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python torch.randperm函数代码示例发布时间:2022-05-27
下一篇:
Python torch.rand函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap