• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python torch.abs函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中torch.abs函数的典型用法代码示例。如果您正苦于以下问题:Python abs函数的具体用法?Python abs怎么用?Python abs使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了abs函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: kurtosis_score

def kurtosis_score(x, dim=0):
    '''Test whether a dataset has normal kurtosis.

    This function tests the null hypothesis that the kurtosis
    of the population from which the sample was drawn is that
    of the normal distribution: ``kurtosis = 3(n-1)/(n+1)``.
    ripoff from: `scipy.stats.kurtosistest`.

    Args:
        a: Array of the sample data
        axis: Axis along which to compute test. Default is 0. If None,
           compute over the whole array `a`.
    Returns:
        statistic: The computed z-score for this test.
        p-value: A 2-sided chi squared probability for the hypothesis test.
    '''
    x, n, dim = _x_n_dim(x, dim)
    if n < 20:
        raise ValueError(
            "Number of elements has to be >= 20 to compute kurtosis")
    b2 = (x**4).mean(dim) / (x**2).mean(dim)**2
    E = 3.0 * (n - 1) / (n + 1)
    varb2 = 24.0 * n * (n - 2) * (n - 3) / ((n + 1)**2 * (n + 3) * (n + 5))
    x = (b2 - E) / math.sqrt(varb2)
    sqrtbeta1 = 6.0 * (n * n - 5 * n + 2) / ((n + 7) * (n + 9)) *\
        math.sqrt((6.0 * (n + 3) * (n + 5)) / (n * (n - 2) * (n - 3)))
    A = 6.0 + 8.0 / sqrtbeta1 * \
        (2.0 / sqrtbeta1 + math.sqrt(1 + 4.0 / (sqrtbeta1**2)))
    term1 = 1 - 2 / (9.0 * A)
    denom = 1 + x * math.sqrt(2 / (A - 4.0))
    term2 = torch.sign(denom) * torch.pow((1 - 2.0 / A) /
                                          torch.abs(denom), 1 / 3.0)
    Z = (term1 - term2) / math.sqrt(2 / (9.0 * A))
    return Z, 1 + torch.erf(-math.sqrt(0.5) * torch.abs(Z))
开发者ID:ModarTensai,项目名称:network_moments,代码行数:34,代码来源:stats.py


示例2: forward

    def forward(self, agent_qs, states):
        """Forward pass for the mixer.

        Arguments:
            agent_qs: Tensor of shape [B, T, n_agents, n_actions]
            states: Tensor of shape [B, T, state_dim]
        """
        bs = agent_qs.size(0)
        states = states.reshape(-1, self.state_dim)
        agent_qs = agent_qs.view(-1, 1, self.n_agents)
        # First layer
        w1 = th.abs(self.hyper_w_1(states))
        b1 = self.hyper_b_1(states)
        w1 = w1.view(-1, self.n_agents, self.embed_dim)
        b1 = b1.view(-1, 1, self.embed_dim)
        hidden = F.elu(th.bmm(agent_qs, w1) + b1)
        # Second layer
        w_final = th.abs(self.hyper_w_final(states))
        w_final = w_final.view(-1, self.embed_dim, 1)
        # State-dependent bias
        v = self.V(states).view(-1, 1, 1)
        # Compute final output
        y = th.bmm(hidden, w_final) + v
        # Reshape and return
        q_tot = y.view(bs, -1, 1)
        return q_tot
开发者ID:jamescasbon,项目名称:ray,代码行数:26,代码来源:mixers.py


示例3: __call__

    def __call__(self, module, features):
        statistics = self.get_statistics(features)

        self.statistics = statistics
        if self.mode == 'store':
            self.stored[module] = statistics.detach()

        elif self.mode == 'match':
           
            if statistics.ndimension() == 2:

                if self.method == 'maximize':
                    self.losses[module] = - statistics[0, self.map_index]
                else:
                    self.losses[module] = torch.abs(300 - statistics[0, self.map_index]) 

            else:
                ws = self.window_size

                t = statistics.detach() * 0

                s_cc = statistics[:1, :, t.shape[2] // 2 - ws:t.shape[2] // 2 + ws, t.shape[3] // 2 - ws:t.shape[3] // 2 + ws] #* 1.0
                t_cc = t[:1, :, t.shape[2] // 2 - ws:t.shape[2] // 2 + ws, t.shape[3] // 2 - ws:t.shape[3] // 2 + ws] #* 1.0
                t_cc[:, self.map_index,...] = 1

                if self.method == 'maximize':
                    self.losses[module] = -(s_cc * t_cc.contiguous()).sum()
                else:
                    self.losses[module] = torch.abs(200 -(s_cc * t_cc.contiguous())).sum()
开发者ID:1exx,项目名称:deep-image-prior,代码行数:29,代码来源:matcher.py


示例4: test_MultivariateNormalQMCEngineDegenerate

 def test_MultivariateNormalQMCEngineDegenerate(self, cuda=False):
     device = torch.device("cuda") if cuda else torch.device("cpu")
     for dtype in (torch.float, torch.double):
         # X, Y iid standard Normal and Z = X + Y, random vector (X, Y, Z)
         mean = torch.zeros(3, device=device, dtype=dtype)
         cov = torch.tensor(
             [[1, 0, 1], [0, 1, 1], [1, 1, 2]], device=device, dtype=dtype
         )
         engine = MultivariateNormalQMCEngine(mean=mean, cov=cov, seed=12345)
         samples = engine.draw(n=2000)
         self.assertEqual(samples.dtype, dtype)
         self.assertEqual(samples.device.type, device.type)
         self.assertTrue(torch.all(torch.abs(samples.mean(dim=0)) < 1e-2))
         self.assertTrue(torch.abs(torch.std(samples[:, 0]) - 1) < 1e-2)
         self.assertTrue(torch.abs(torch.std(samples[:, 1]) - 1) < 1e-2)
         self.assertTrue(torch.abs(torch.std(samples[:, 2]) - math.sqrt(2)) < 1e-2)
         for i in (0, 1, 2):
             _, pval = shapiro(samples[:, i].cpu().numpy())
             self.assertGreater(pval, 0.9)
         cov = np.cov(samples.cpu().numpy().transpose())
         self.assertLess(np.abs(cov[0, 1]), 1e-2)
         self.assertLess(np.abs(cov[0, 2] - 1), 1e-2)
         # check to see if X + Y = Z almost exactly
         self.assertTrue(
             torch.all(
                 torch.abs(samples[:, 0] + samples[:, 1] - samples[:, 2]) < 1e-5
             )
         )
开发者ID:saschwan,项目名称:botorch,代码行数:28,代码来源:test_normal.py


示例5: simplax

def simplax(surrogate, x, logits, mixtureweights, k=1):

    B = logits.shape[0]
    probs = torch.softmax(logits, dim=1)

    cat = RelaxedOneHotCategorical(probs=probs, temperature=torch.tensor([1.]).cuda())

    outputs = {}
    net_loss = 0
    surr_loss = 0
    for jj in range(k):

        cluster_S = cat.rsample()
        cluster_H = H(cluster_S)

        logq = cat.log_prob(cluster_S.detach()).view(B,1)
        logpx_given_z = logprob_undercomponent(x, component=cluster_H)
        logpz = torch.log(mixtureweights[cluster_H]).view(B,1)
        logpxz = logpx_given_z + logpz #[B,1]
        f = logpxz - logq - 1.

        surr_input = torch.cat([cluster_S, x, logits], dim=1) #[B,21]
        surr_pred = surrogate.net(surr_input)

        net_loss += - torch.mean((f.detach() - surr_pred.detach()) * logq  + surr_pred)


        # surr_loss += torch.mean(torch.abs(f.detach()-1.-surr_pred))
        # grad_logq =  torch.mean( torch.autograd.grad([torch.mean(logq)], [logits], create_graph=True, retain_graph=True)[0], dim=1, keepdim=True)
        # grad_surr = torch.mean( torch.autograd.grad([torch.mean(surr_pred)], [logits], create_graph=True, retain_graph=True)[0], dim=1, keepdim=True)

        grad_logq =  torch.autograd.grad([torch.mean(logq)], [logits], create_graph=True, retain_graph=True)[0]
        grad_surr =  torch.autograd.grad([torch.mean(surr_pred)], [logits], create_graph=True, retain_graph=True)[0]
        surr_loss = torch.mean(((f.detach() - surr_pred) * grad_logq + grad_surr)**2)

        surr_dif = torch.mean(torch.abs(f.detach() - surr_pred))
        # surr_loss = torch.mean(torch.abs(f.detach() - surr_pred))

        grad_path = torch.autograd.grad([torch.mean(surr_pred)], [logits], create_graph=True, retain_graph=True)[0]
        grad_score = torch.autograd.grad([torch.mean((f.detach() - surr_pred.detach()) * logq)], [logits], create_graph=True, retain_graph=True)[0]
        grad_path = torch.mean(torch.abs(grad_path))
        grad_score = torch.mean(torch.abs(grad_score))
   
    net_loss = net_loss/ k
    surr_loss = surr_loss/ k

    outputs['net_loss'] = net_loss
    outputs['f'] = f
    outputs['logpx_given_z'] = logpx_given_z
    outputs['logpz'] = logpz
    outputs['logq'] = logq
    outputs['surr_loss'] = surr_loss
    outputs['surr_dif'] = surr_dif   
    outputs['grad_path'] = grad_path   
    outputs['grad_score'] = grad_score   

    return outputs #net_loss, f, logpx_given_z, logpz, logq, surr_loss, surr_dif, grad_path, grad_score
开发者ID:chriscremer,项目名称:Other_Code,代码行数:57,代码来源:gmm_cleaned_v5.py


示例6: argwhere_nonzero

def argwhere_nonzero(layer, batchnorm=False):
    indices=[]
    # for batchnorms we want to do the opposite
    if batchnorm:
        for idx,w in enumerate(layer):
            if torch.sum(torch.abs(w)).data.cpu().numpy() == 0.:
                indices.append(idx)
    else:
        for idx,w in enumerate(layer):
            if torch.sum(torch.abs(w)).data.cpu().numpy() != 0.:
                indices.append(idx)

    return indices
开发者ID:howtocodewang,项目名称:DeepCompression-PyTorch,代码行数:13,代码来源:utils.py


示例7: test_NormalQMCEngineShapiroInvTransform

 def test_NormalQMCEngineShapiroInvTransform(self):
     engine = NormalQMCEngine(d=2, seed=12345, inv_transform=True)
     samples = engine.draw(n=250)
     self.assertEqual(samples.dtype, torch.float)
     self.assertTrue(torch.all(torch.abs(samples.mean(dim=0)) < 1e-2))
     self.assertTrue(torch.all(torch.abs(samples.std(dim=0) - 1) < 1e-2))
     # perform Shapiro-Wilk test for normality
     for i in (0, 1):
         _, pval = shapiro(samples[:, i])
         self.assertGreater(pval, 0.9)
     # make sure samples are uncorrelated
     cov = np.cov(samples.numpy().transpose())
     self.assertLess(np.abs(cov[0, 1]), 1e-2)
开发者ID:saschwan,项目名称:botorch,代码行数:13,代码来源:test_normal.py


示例8: pairwise_distance

def pairwise_distance(x1, x2, p=2, eps=1e-6):
    r"""
    Computes the batchwise pairwise distance between vectors v1,v2:

    .. math ::
        \Vert x \Vert _p := \left( \sum_{i=1}^n  \vert x_i \vert ^ p \right) ^ {1/p}

    Args:
        x1: first input tensor
        x2: second input tensor
        p: the norm degree. Default: 2
        eps (float, optional): Small value to avoid division by zero. Default: 1e-6

    Shape:
        - Input: :math:`(N, D)` where `D = vector dimension`
        - Output: :math:`(N, 1)`

    Example::

        >>> input1 = autograd.Variable(torch.randn(100, 128))
        >>> input2 = autograd.Variable(torch.randn(100, 128))
        >>> output = F.pairwise_distance(input1, input2, p=2)
        >>> output.backward()
    """
    assert x1.size() == x2.size(), "Input sizes must be equal."
    assert x1.dim() == 2, "Input must be a 2D matrix."
    diff = torch.abs(x1 - x2)
    out = torch.pow(diff + eps, p).sum(dim=1, keepdim=True)
    return torch.pow(out, 1. / p)
开发者ID:athiwatp,项目名称:pytorch,代码行数:29,代码来源:functional.py


示例9: _mu_law

 def _mu_law(self, x):
     m = self._variable(torch.FloatTensor(1))
     m[:] = self.n_categories + 1
     s = torch.sign(x)
     x = torch.abs(x)
     x = s * (torch.log(1 + (self.n_categories * x)) / torch.log(m))
     return x
开发者ID:JohnVinyard,项目名称:zounds,代码行数:7,代码来源:sample_embedding.py


示例10: forward

 def forward(self, x1, x2):
     out1 = self.forward_one(x1)
     out2 = self.forward_one(x2)
     dis = torch.abs(out1 - out2)
     out = self.out(dis)
     #  return self.sigmoid(out)
     return out
开发者ID:fangpin,项目名称:siamese-pytorch,代码行数:7,代码来源:model.py


示例11: skewness_score

def skewness_score(x, dim=0):
    '''Test whether the skew is different from the normal distribution.

    This function tests the null hypothesis that the skewness of
    the population that the sample was drawn from is the same
    as that of a corresponding normal distribution.
    ripoff from: `scipy.stats.skewtest`.

    Args:
        a: Array of the sample data
        axis: Axis along which to compute test. Default is 0. If None,
           compute over the whole array `a`.
    Returns:
        statistic: The computed z-score for this test.
        p-value: A 2-sided chi squared probability for the hypothesis test.
    '''
    x, n, dim = _x_n_dim(x, dim)
    b2 = (x**3).mean(dim) / (x**2).mean(dim)**1.5
    y = b2 * math.sqrt(((n + 1) * (n + 3)) / (6.0 * (n - 2)))
    beta2 = 3.0 * (n**2 + 27 * n - 70) * (n + 1) * (n + 3) /\
        ((n - 2.0) * (n + 5) * (n + 7) * (n + 9))
    W2 = -1.0 + math.sqrt(2 * (beta2 - 1))
    delta = 1.0 / math.sqrt(0.5 * math.log(W2))
    alpha = math.sqrt(2.0 / (W2 - 1))
    y[y == 0] = 1
    yalpha = y / alpha
    Z = delta * torch.log(yalpha + torch.sqrt(yalpha**2 + 1))
    return Z, 1 + torch.erf(-math.sqrt(0.5) * torch.abs(Z))
开发者ID:ModarTensai,项目名称:network_moments,代码行数:28,代码来源:stats.py


示例12: forward

    def forward(self, frame, policies):
        # x: [B,2,84,84]
        self.B = frame.size()[0]
        

        #Predict mask
        pre_mask = self.predict_mask_nosigmoid(frame)
        mask = F.sigmoid(pre_mask)

        masked_frame = frame * mask
        kls = []
        for i in range(len(policies)):
            policy = policies[i]

            log_dist_mask = policy.action_logdist(masked_frame)
            log_dist_true = policy.action_logdist(frame)

            action_dist_kl = torch.sum((log_dist_true - log_dist_mask)*torch.exp(log_dist_true), dim=1) #[B]
            action_dist_kl = torch.mean(action_dist_kl) # * 1000
            kls.append(action_dist_kl)

        kls = torch.stack(kls)  #[policies, B]
        action_dist_kl = torch.mean(action_dist_kl) #[1] #over batch and over policies

        pre_mask = pre_mask.view(self.B, -1)
        mask_cost = torch.abs(pre_mask + 20)
        # mask_sum = torch.mean(torch.sum(mask_cost, dim=1)) * .00001
        # mask_cost = torch.mean(mask_cost) * .00001
        mask_cost = torch.mean(mask_cost) * .01

        loss = action_dist_kl + mask_cost

        return loss, action_dist_kl, mask_cost
开发者ID:chriscremer,项目名称:Other_Code,代码行数:33,代码来源:learn_to_mask_amortized_vae_policies.py


示例13: test_train

    def test_train(self):
        self._metric.train()
        calls = [[torch.FloatTensor([0.0]), torch.LongTensor([0])],
                 [torch.FloatTensor([0.0, 0.1, 0.2, 0.3]), torch.LongTensor([0, 1, 2, 3])]]
        for i in range(len(self._states)):
            self._metric.process(self._states[i])
        self.assertEqual(2, len(self._metric_function.call_args_list))
        for i in range(len(self._metric_function.call_args_list)):
            self.assertTrue(torch.eq(self._metric_function.call_args_list[i][0][0], calls[i][0]).all)
            self.assertTrue(torch.lt(torch.abs(torch.add(self._metric_function.call_args_list[i][0][1], -calls[i][1])), 1e-12).all)
        self._metric_function.reset_mock()
        self._metric.process_final({})

        self._metric_function.assert_called_once()
        self.assertTrue(torch.eq(self._metric_function.call_args_list[0][0][1], torch.LongTensor([0, 1, 2, 3, 4])).all)
        self.assertTrue(torch.lt(torch.abs(torch.add(self._metric_function.call_args_list[0][0][0], -torch.FloatTensor([0.0, 0.1, 0.2, 0.3, 0.4]))), 1e-12).all)
开发者ID:little1tow,项目名称:torchbearer,代码行数:16,代码来源:test_wrappers.py


示例14: test_mu_law_companding

    def test_mu_law_companding(self):

        sig = self.sig.clone()

        quantization_channels = 256
        sig = self.sig.numpy()
        sig = sig / np.abs(sig).max()
        self.assertTrue(sig.min() >= -1. and sig.max() <= 1.)

        sig_mu = transforms.MuLawEncoding(quantization_channels)(sig)
        self.assertTrue(sig_mu.min() >= 0. and sig.max() <= quantization_channels)

        sig_exp = transforms.MuLawExpanding(quantization_channels)(sig_mu)
        self.assertTrue(sig_exp.min() >= -1. and sig_exp.max() <= 1.)

        sig = self.sig.clone()
        sig = sig / torch.abs(sig).max()
        self.assertTrue(sig.min() >= -1. and sig.max() <= 1.)

        sig_mu = transforms.MuLawEncoding(quantization_channels)(sig)
        self.assertTrue(sig_mu.min() >= 0. and sig.max() <= quantization_channels)

        sig_exp = transforms.MuLawExpanding(quantization_channels)(sig_mu)
        self.assertTrue(sig_exp.min() >= -1. and sig_exp.max() <= 1.)

        repr_test = transforms.MuLawEncoding(quantization_channels)
        repr_test.__repr__()
        repr_test = transforms.MuLawExpanding(quantization_channels)
        repr_test.__repr__()
开发者ID:SsnL,项目名称:audio,代码行数:29,代码来源:test_transforms.py


示例15: test_regularization

    def test_regularization(self):
        penalty = self.model.get_regularization_penalty().data
        assert (penalty > 0).all()

        penalty2 = 0

        # Config specifies penalty as
        #   "regularizer": [
        #     ["weight$", {"type": "l2", "alpha": 10}],
        #     ["bias$", {"type": "l1", "alpha": 5}]
        #   ]
        for name, parameter in self.model.named_parameters():
            if name.endswith("weight"):
                weight_penalty = 10 * torch.sum(torch.pow(parameter, 2))
                penalty2 += weight_penalty
            elif name.endswith("bias"):
                bias_penalty = 5 * torch.sum(torch.abs(parameter))
                penalty2 += bias_penalty

        assert (penalty == penalty2.data).all()

        # You get a RuntimeError if you call `model.forward` twice on the same inputs.
        # The data and config are such that the whole dataset is one batch.
        training_batch = next(self.iterator(self.instances, num_epochs=1))
        validation_batch = next(self.iterator(self.instances, num_epochs=1))

        training_loss = self.trainer._batch_loss(training_batch, for_training=True).data
        validation_loss = self.trainer._batch_loss(validation_batch, for_training=False).data

        # Training loss should have the regularization penalty, but validation loss should not.
        assert (training_loss != validation_loss).all()

        # Training loss should equal the validation loss plus the penalty.
        penalized = validation_loss + penalty
        assert (training_loss == penalized).all()
开发者ID:pyknife,项目名称:allennlp,代码行数:35,代码来源:simple_tagger_test.py


示例16: evalAccuracy

def evalAccuracy(model, device, args, outPutType, test_X, test_Y, validEval = False) :
    model_training_orig = model.training
    setModelMode(model, False)
    N_test = len(test_Y)*len(test_Y[0])
    totals = 0
    for b_data, b_labels in zip(test_X, test_Y):
        b_data = torch.from_numpy(b_data).to(device) 
        b_labels = torch.from_numpy(b_labels).to(device) 
        b_labels = b_labels.view(b_labels.shape[0],-1 )  # make it the same shape as output
       
        yhat = model(b_data) # need to compute the Yhat again, as this is the yhat AFTER updating the weights, not before as in 'learn()' function
        b_data = None
        
        # depending on if we are in a classification or regression problem, we evaluate performance differently
        if outPutType == OUT_REGRESSION :  
            currentRate = torch_pearsonr( yhat.view(-1)  , b_labels.view(-1))**2   
            N_test = len(test_Y) # as we are testing correlation, the N should refer to the number of batches, and NOT the total number of observations  
        elif outPutType == OUT_MULTICLASS : 
            currentRate = calc_Accuracy(yhat,b_labels )    # calculate accuracy, this is ROUNDED 
        else : # mean absolute error
            currentRate = -torch.mean(torch.abs(yhat - b_labels)) # negative as the rest of the metrics are accuracy, IE the greater the error, the lower the accuracy
            N_test = len(test_Y) # as we are testing correlation, the N should refer to the number of batches, and NOT the total number of observations
 
        currentRate = float(currentRate.detach().cpu().numpy() )  # need to move it back to CPU
        totals = totals +currentRate # sum in all minibatches

    accuracy = round( float(totals)/N_test,5)
    setModelMode(model, model_training_orig)
    return(accuracy)
开发者ID:mkelcb,项目名称:knet,代码行数:29,代码来源:knet_main_pytorch.py


示例17: test_autograd_closure

    def test_autograd_closure(self):
        x = Variable(torch.Tensor([0.4]), requires_grad=True)
        y = Variable(torch.Tensor([0.7]), requires_grad=True)

        trace = torch._C._tracer_enter((x, y), 1)

        z = torch.sigmoid(x * (x + y))
        w = torch.abs(x * x * x + y) + Variable(torch.ones(1))

        torch._C._tracer_exit((z, w))
        torch._C._jit_pass_lint(trace)

        (z * w).backward()
        torch._C._jit_pass_dce(trace)
        torch._C._jit_pass_lint(trace)

        x_grad = x.grad.data.clone()
        x.grad.data.zero_()

        function = torch._C._jit_createAutogradClosure(trace)
        torch._C._jit_pass_lint(trace)
        z2, w2 = function()(x, y)
        (z2 * w2).backward()
        self.assertEqual(z, z2)
        self.assertEqual(w, w2)
        self.assertEqual(x.grad.data, x_grad)
开发者ID:Northrend,项目名称:pytorch,代码行数:26,代码来源:test_jit.py


示例18: evaluate_post_training

    def evaluate_post_training(self, edp: EvaluationDataPage) -> CpeDetails:
        cpe_details = CpeDetails()

        self.score_cpe("Reward", edp, cpe_details.reward_estimates)

        if (
            self.metrics_to_score is not None
            and edp.logged_metrics is not None
            and self.action_names is not None
        ):
            for i, metric in enumerate(self.metrics_to_score):
                logger.info(
                    "--------- Running CPE on metric: {} ---------".format(metric)
                )

                metric_reward_edp = edp.set_metric_as_reward(i, len(self.action_names))

                cpe_details.metric_estimates[metric] = CpeEstimateSet()
                self.score_cpe(
                    metric, metric_reward_edp, cpe_details.metric_estimates[metric]
                )

        # Compute MC Loss on Aggregate Reward
        cpe_details.mc_loss = float(
            torch.mean(torch.abs(edp.logged_values - edp.model_values))
        )

        return cpe_details
开发者ID:sra4077,项目名称:Horizon,代码行数:28,代码来源:evaluator.py


示例19: apply_global_reward

    def apply_global_reward(self, rewards: torch.Tensor, next_iteration: int):
        std_dev = torch.std(rewards)
        if torch.abs(std_dev) > 1e-6:
            normalized_rewards = (rewards - torch.mean(rewards)) / std_dev
            for parent_tensor in self.parent_tensors.values():
                parent_tensor.grad.zero_()
            for i, individual in enumerate(self.population_tensors):
                for tensor_name, parent_tensor in self.parent_tensors.items():
                    individual_tensor = individual[tensor_name]

                    # Subtract the parent to get the gradient estimate
                    individual_tensor.sub_(parent_tensor)

                    # Amplify the gradient by the reward
                    individual_tensor.mul_(normalized_rewards[i])

                    # Divide by a normalizing constant
                    individual_tensor.div_(
                        self.es_params.population_size
                        * self.es_params.mutation_power
                        * -1
                    )

                    parent_tensor.grad += individual_tensor
            self.optimizer.step()

        self.populate_children(next_iteration)
开发者ID:sra4077,项目名称:Horizon,代码行数:27,代码来源:evolution_pool.py


示例20: get_loss

 def get_loss(self, y_pred, y_true, X=None, training=False):
     y_true = to_tensor(y_true, device='cpu')
     loss_a = torch.abs(y_true.float() - y_pred[:, 1]).mean()
     loss_b = ((y_true.float() - y_pred[:, 1]) ** 2).mean()
     if training:
         self.history.record_batch('loss_a', to_numpy(loss_a))
         self.history.record_batch('loss_b', to_numpy(loss_b))
     return loss_a + loss_b
开发者ID:YangHaha11514,项目名称:skorch,代码行数:8,代码来源:test_net.py



注:本文中的torch.abs函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python torch.add函数代码示例发布时间:2022-05-27
下一篇:
Python TodoList.TodoList类代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap