• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python torch.norm函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中torch.norm函数的典型用法代码示例。如果您正苦于以下问题:Python norm函数的具体用法?Python norm怎么用?Python norm使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了norm函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: updateOutput

    def updateOutput(self, input):
        assert input.dim() == 2

        inputSize = self.weight.size(1)
        outputSize = self.weight.size(0)

        if self._weightNorm is None:
            self._weightNorm = self.weight.new()
        if self._inputNorm is None:
            self._inputNorm = self.weight.new()

        # y_j = (w_j * x) / ( || w_j || * || x || )

        torch.norm(self.weight, 2, 1, out=self._weightNorm, keepdim=True).add_(1e-12)

        batchSize = input.size(0)
        nelement = self.output.nelement()
        self.output.resize_(batchSize, outputSize)
        if self.output.nelement() != nelement:
            self.output.zero_()

        self.output.addmm_(0., 1., input, self.weight.t())

        torch.norm(input, 2, 1, out=self._inputNorm, keepdim=True).add_(1e-12)
        self.output.div_(self._weightNorm.view(1, outputSize).expand_as(self.output))
        self.output.div_(self._inputNorm.expand_as(self.output))
        return self.output
开发者ID:Jsmilemsj,项目名称:pytorch,代码行数:27,代码来源:Cosine.py


示例2: extract_feature

def extract_feature(model,dataloaders):
    features = torch.FloatTensor()
    count = 0
    for data in dataloaders:
        img, label = data
        n, c, h, w = img.size()
        count += n
        print(count)
        ff = torch.FloatTensor(n, 2048).zero_()
        for i in range(2):
            if(i==1):
                img = fliplr(img)
            input_img = Variable(img.cuda())
            outputs,f = model(input_img) 
            f = f.data.cpu()
            ff = ff+f
        # norm feature
        if opt.PCB:
            # feature size (n,2048,6)
            # 1. To treat every part equally, I calculate the norm for every 2048-dim part feature.
            # 2. To keep the cosine score==1, sqrt(6) is added to norm the whole feature (2048*6).
            fnorm = torch.norm(ff, p=2, dim=1, keepdim=True) * np.sqrt(6) 
            ff = ff.div(fnorm.expand_as(ff))
            ff = ff.view(ff.size(0), -1)
        else:
            fnorm = torch.norm(ff, p=2, dim=1, keepdim=True)
            ff = ff.div(fnorm.expand_as(ff))

        features = torch.cat((features,ff), 0)
    return features
开发者ID:XuJiaMing1997,项目名称:Person-reID-triplet-loss,代码行数:30,代码来源:test.py


示例3: cosine_similarity

def cosine_similarity(x1, x2, dim=1, eps=1e-8):
    r"""Returns cosine similarity between x1 and x2, computed along dim.

    .. math ::
        \text{similarity} = \dfrac{x_1 \cdot x_2}{\max(\Vert x_1 \Vert _2 \cdot \Vert x_2 \Vert _2, \epsilon)}

    Args:
        x1 (Variable): First input.
        x2 (Variable): Second input (of size matching x1).
        dim (int, optional): Dimension of vectors. Default: 1
        eps (float, optional): Small value to avoid division by zero.
            Default: 1e-8

    Shape:
        - Input: :math:`(\ast_1, D, \ast_2)` where D is at position `dim`.
        - Output: :math:`(\ast_1, \ast_2)` where 1 is at position `dim`.

    Example::

        >>> input1 = autograd.Variable(torch.randn(100, 128))
        >>> input2 = autograd.Variable(torch.randn(100, 128))
        >>> output = F.cosine_similarity(input1, input2)
        >>> print(output)
    """
    w12 = torch.sum(x1 * x2, dim)
    w1 = torch.norm(x1, 2, dim)
    w2 = torch.norm(x2, 2, dim)
    return (w12 / (w1 * w2).clamp(min=eps)).squeeze()
开发者ID:athiwatp,项目名称:pytorch,代码行数:28,代码来源:functional.py


示例4: angle_length_loss

    def angle_length_loss(y_pred, y_true, weights):
        y_true = y_true.permute(0, 2, 3, 1)
        y_pred = y_pred.permute(0, 2, 3, 1)
        weights = weights.permute(0, 2, 3, 1)

        # Single threshold

        # score_per_bundle = {}
        # bundles = ExpUtils.get_bundle_names(HP.CLASSES)[1:]

        nr_of_classes = int(y_true.shape[-1] / 3.)
        scores = torch.zeros(nr_of_classes)

        for idx in range(nr_of_classes):
            y_pred_bund = y_pred[:, :, :, (idx * 3):(idx * 3) + 3].contiguous()
            y_true_bund = y_true[:, :, :, (idx * 3):(idx * 3) + 3].contiguous()  # [x,y,z,3]
            weights_bund = weights[:, :, :, (idx * 3)].contiguous()  # [x,y,z]

            angles = PytorchUtils.angle_last_dim(y_pred_bund, y_true_bund)
            angles_weighted = angles / weights_bund
            #norm lengths to 0-1 to be more equal to angles?? -> peaks are already around 1 -> ok
            lengths = (torch.norm(y_pred_bund, 2., -1) - torch.norm(y_true_bund, 2, -1)) ** 2
            lenghts_weighted = lengths * weights_bund

            # Divide by weights.max otherwise lens would be way bigger
            # Flip angles to make it a minimization problem
            combined = -angles_weighted + lenghts_weighted / weights_bund.max()

            scores[idx] = torch.mean(combined)

        return torch.mean(scores)
开发者ID:doctoryfx,项目名称:TractSeg,代码行数:31,代码来源:PytorchUtils.py


示例5: nn

 def nn(self, word, k):
     embedding = self.mu.weight.data.cpu() # [dict, embed_size]
     vector = embedding[self.dset.stoi[word], :].view(-1, 1) # [embed_size, 1]
     distance = torch.mm(embedding, vector).squeeze() / torch.norm(embedding, 2, 1)
     distance = distance / torch.norm(vector, 2, 0)[0]
     distance = distance.numpy()
     index = np.argsort(distance)[:-k]
     return [self.dset.itos[x] for x in index]
开发者ID:schelotto,项目名称:Gaussian_Word_Embedding,代码行数:8,代码来源:main.py


示例6: torch_pearsonr

def torch_pearsonr(x, y):  # https://github.com/pytorch/pytorch/issues/1254
    mean_x = torch.mean(x)
    mean_y = torch.mean(y)
    xm = x.sub(mean_x)
    ym = y.sub(mean_y)
    r_num = xm.dot(ym)
    r_den = torch.norm(xm, 2) * torch.norm(ym, 2)
    r_val = r_num / r_den
    return r_val
开发者ID:mkelcb,项目名称:knet,代码行数:9,代码来源:knet_main_pytorch.py


示例7: train_multilabel

def train_multilabel(features, targets, classes, train_split, test_split, C=1.0, ignore_hard_examples=True, after_ReLU=False, normalize_L2=False):
    print('\nHyperparameters:\n - C: {}\n - after_ReLU: {}\n - normL2: {}'.format(C, after_ReLU, normalize_L2))
    train_APs = []
    test_APs = []
    for class_id in range(len(classes)):
        
        classifier = SVC(C=C, kernel='linear') # http://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html
        
        if ignore_hard_examples:
            train_masks = (targets[train_split][:,class_id] != 0).view(-1, 1)
            train_features = torch.masked_select(features[train_split], train_masks.expand_as(features[train_split])).view(-1,features[train_split].size(1))
            train_targets = torch.masked_select(targets[train_split], train_masks.expand_as(targets[train_split])).view(-1,targets[train_split].size(1))
            test_masks = (targets[test_split][:,class_id] != 0).view(-1, 1)
            test_features = torch.masked_select(features[test_split], test_masks.expand_as(features[test_split])).view(-1,features[test_split].size(1))
            test_targets = torch.masked_select(targets[test_split], test_masks.expand_as(targets[test_split])).view(-1,targets[test_split].size(1))
        else:
            train_features = features[train_split]
            train_targets = targets[train_split]
            test_features = features[test_split]
            test_targets = features[test_split]

        if after_ReLU:
            train_features[train_features < 0] = 0
            test_features[test_features < 0] = 0

        if normalize_L2:
            train_norm = torch.norm(train_features, p=2, dim=1).unsqueeze(1)
            train_features = train_features.div(train_norm.expand_as(train_features))
            test_norm = torch.norm(test_features, p=2, dim=1).unsqueeze(1)
            test_features = test_features.div(test_norm.expand_as(test_features))

        train_X = train_features.numpy()
        train_y = (train_targets[:,class_id] != -1).numpy() # uses hard examples if not ignored

        test_X = test_features.numpy()
        test_y = (test_targets[:,class_id] != -1).numpy()

        classifier.fit(train_X, train_y) # train parameters of the classifier

        train_preds = classifier.predict(train_X)
        train_acc = accuracy_score(train_y, train_preds) * 100
        train_AP = average_precision_score(train_y, train_preds) * 100
        train_APs.append(train_AP)

        test_preds = classifier.predict(test_X)
        test_acc = accuracy_score(test_y, test_preds) * 100
        test_AP = average_precision_score(test_y, test_preds) * 100
        test_APs.append(test_AP)

        print('class "{}" ({}/{}):'.format(classes[class_id], test_y.sum(), test_y.shape[0]))
        print('  - {:8}: acc {:.2f}, AP {:.2f}'.format(train_split, train_acc, train_AP))
        print('  - {:8}: acc {:.2f}, AP {:.2f}'.format(test_split, test_acc, test_AP))

    print('all classes:')
    print('  - {:8}: mAP {:.4f}'.format(train_split, sum(train_APs)/len(classes)))
    print('  - {:8}: mAP {:.4f}'.format(test_split, sum(test_APs)/len(classes)))
开发者ID:zbxzc35,项目名称:pretrained-models.pytorch,代码行数:56,代码来源:voc2007_extract.py


示例8: getM

def getM(mods):
    for m in mods:
        if isinstance(m, legacy.nn.SpatialConvolution):
            m.gradWeight[m.gradWeight.ne(m.gradWeight)] = 0
            l.append(torch.norm(m.gradWeight))
        elif isinstance(m, legacy.nn.Linear):
            l.append(torch.norm(m.gradWeight))
        elif isinstance(m, legacy.nn.Concat) or \
             isinstance(m, legacy.nn.Sequential):
            getM(m.modules)
开发者ID:shubhampachori12110095,项目名称:densenet.pytorch,代码行数:10,代码来源:compare-pytorch-and-torch-grads.py


示例9: test_importance_prior

 def test_importance_prior(self):
     posterior = pyro.infer.Importance(self.model, guide=None, num_samples=10000)
     marginal = pyro.infer.Marginal(posterior)
     posterior_samples = [marginal() for i in range(1000)]
     posterior_mean = torch.mean(torch.cat(posterior_samples))
     posterior_stddev = torch.std(torch.cat(posterior_samples), 0)
     self.assertEqual(0, torch.norm(posterior_mean - self.mu_mean).data[0],
                      prec=0.01)
     self.assertEqual(0, torch.norm(posterior_stddev - self.mu_stddev).data[0],
                      prec=0.1)
开发者ID:Magica-Chen,项目名称:pyro,代码行数:10,代码来源:test_sampling.py


示例10: calc_peak_length_dice_pytorch

    def calc_peak_length_dice_pytorch(HP, y_pred, y_true, max_angle_error=[0.9], max_length_error=0.1):
        '''
        Ca

        :param y_pred:
        :param y_true:
        :param max_angle_error:  0.7 ->  angle error of 45° or less; 0.9 ->  angle error of 23° or less
                                 Can be list with several values -> calculate for several thresholds
        :return:
        '''
        import torch
        from tractseg.libs.PytorchEinsum import einsum
        from tractseg.libs.PytorchUtils import PytorchUtils

        y_true = y_true.permute(0, 2, 3, 1)
        y_pred = y_pred.permute(0, 2, 3, 1)

        def angle_last_dim(a, b):
            '''
            Calculate the angle between two nd-arrays (array of vectors) along the last dimension

            without anything further: 1->0°, 0.9->23°, 0.7->45°, 0->90°
            np.arccos -> returns degree in pi (90°: 0.5*pi)

            return: one dimension less then input
            '''
            return torch.abs(einsum('abcd,abcd->abc', a, b) / (torch.norm(a, 2., -1) * torch.norm(b, 2, -1) + 1e-7))

        #Single threshold
        score_per_bundle = {}
        bundles = ExpUtils.get_bundle_names(HP.CLASSES)[1:]
        for idx, bundle in enumerate(bundles):
            # if bundle == "CST_right":
            y_pred_bund = y_pred[:, :, :, (idx * 3):(idx * 3) + 3].contiguous()
            y_true_bund = y_true[:, :, :, (idx * 3):(idx * 3) + 3].contiguous()      # [x,y,z,3]

            angles = angle_last_dim(y_pred_bund, y_true_bund)

            lenghts_pred = torch.norm(y_pred_bund, 2., -1)
            lengths_true = torch.norm(y_true_bund, 2, -1)
            lengths_binary = torch.abs(lenghts_pred-lengths_true) < (max_length_error * lengths_true)
            lengths_binary = lengths_binary.view(-1)

            gt_binary = y_true_bund.sum(dim=-1) > 0
            gt_binary = gt_binary.view(-1)  # [bs*x*y]

            angles_binary = angles > max_angle_error[0]
            angles_binary = angles_binary.view(-1)

            combined = lengths_binary * angles_binary

            f1 = PytorchUtils.f1_score_binary(gt_binary, combined)
            score_per_bundle[bundle] = f1
        return score_per_bundle
开发者ID:doctoryfx,项目名称:TractSeg,代码行数:54,代码来源:MetricUtils.py


示例11: triplet_loss

 def triplet_loss(self, z_p, z_n, z_d, margin=0.1, l2=0):
     l_n = torch.sqrt(((z_p - z_n) ** 2).sum(dim=1))
     l_d = - torch.sqrt(((z_p - z_d) ** 2).sum(dim=1))
     l_nd = l_n + l_d
     loss = F.relu(l_n + l_d + margin)
     l_n = torch.mean(l_n)
     l_d = torch.mean(l_d)
     l_nd = torch.mean(l_n + l_d)
     loss = torch.mean(loss)
     if l2 != 0:
         loss += l2 * (torch.norm(z_p) + torch.norm(z_n) + torch.norm(z_d))
     return loss, l_n, l_d, l_nd
开发者ID:ml-lab,项目名称:tile2vec,代码行数:12,代码来源:tilenet.py


示例12: th_pearsonr

def th_pearsonr(x, y):
    """
    mimics scipy.stats.pearsonr
    """
    mean_x = th.mean(x)
    mean_y = th.mean(y)
    xm = x.sub(mean_x)
    ym = y.sub(mean_y)
    r_num = xm.dot(ym)
    r_den = th.norm(xm, 2) * th.norm(ym, 2)
    r_val = r_num / r_den
    return r_val
开发者ID:BrianDo2005,项目名称:torchsample,代码行数:12,代码来源:utils.py


示例13: angle_last_dim

    def angle_last_dim(a, b):
        '''
        Calculate the angle between two nd-arrays (array of vectors) along the last dimension

        without anything further: 1->0°, 0.9->23°, 0.7->45°, 0->90°
        np.arccos -> returns degree in pi (90°: 0.5*pi)

        return: one dimension less then input
        '''
        from tractseg.libs.PytorchEinsum import einsum

        return torch.abs(einsum('abcd,abcd->abc', a, b) / (torch.norm(a, 2., -1) * torch.norm(b, 2, -1) + 1e-7))
开发者ID:doctoryfx,项目名称:TractSeg,代码行数:12,代码来源:PytorchUtils.py


示例14: _transform

    def _transform(self, x):
        bands = self.dct_transform.frequency_decomposition(
            x, self.factors, axis=-1)

        norms = [torch.norm(b, dim=-1, keepdim=True) for b in bands]
        bands = [b / (n + 1e-8) for (b, n) in zip(bands, norms)]
        fine = torch.cat(bands, dim=-1)

        coarse = torch.cat(norms, dim=-1)
        coarse_norms = torch.norm(coarse, dim=-1, keepdim=True)
        coarse = coarse / (coarse_norms + 1e-8)

        return fine, coarse
开发者ID:JohnVinyard,项目名称:zounds,代码行数:13,代码来源:loss.py


示例15: distance_calcu

 def distance_calcu(self, gallery, query):
     # x = torch.autograd.Variable(torch.cat([gallery,query]))
     gallery = gallery.expand_as(query).contiguous()
     x = torch.cat([gallery, query],1)
     W = self.adpW(x)
     num = query.size(0)
     dist1 = torch.norm(torch.matmul(W,gallery.view(num,-1,1))
                       -torch.matmul(W,query.view(num,-1,1)),2,1)
     x = torch.cat([query,gallery],1)
     W = self.adpW(x)
     dist2 = torch.norm(torch.matmul(W, gallery.view(num, -1, 1))
                       - torch.matmul(W, query.view(num, -1, 1)), 2, 1)
     dist = 0.5*(dist1+dist2)
     return dist
开发者ID:hh23333,项目名称:FVAE_adversarial,代码行数:14,代码来源:adaptive_triplet_onlyadp.py


示例16: angle_second_dim

    def angle_second_dim(a, b):
        '''
        Not working !
        RuntimeError: invalid argument 2: input is not contiguous (and

        Calculate the angle between two nd-arrays (array of vectors) along the second dimension

        without anything further: 1->0°, 0.9->23°, 0.7->45°, 0->90°
        np.arccos -> returns degree in pi (90°: 0.5*pi)

        return: one dimension less then input
        '''
        from tractseg.libs.PytorchEinsum import einsum

        return torch.abs(einsum('abcd,abcd->acd', a, b) / (torch.norm(a, 2., 1) * torch.norm(b, 2, 1) + 1e-7))
开发者ID:doctoryfx,项目名称:TractSeg,代码行数:15,代码来源:PytorchUtils.py


示例17: calculate_distance_term

def calculate_distance_term(means, n_objects, delta_d, norm=2, usegpu=True):
    """means: bs, n_instances, n_filters"""

    bs, n_instances, n_filters = means.size()

    dist_term = 0.0
    for i in range(bs):
        _n_objects_sample = n_objects[i]

        if _n_objects_sample <= 1:
            continue

        _mean_sample = means[i, : _n_objects_sample, :]  # n_objects, n_filters
        means_1 = _mean_sample.unsqueeze(1).expand(
            _n_objects_sample, _n_objects_sample, n_filters)
        means_2 = means_1.permute(1, 0, 2)

        diff = means_1 - means_2  # n_objects, n_objects, n_filters

        _norm = torch.norm(diff, norm, 2)

        margin = 2 * delta_d * (1.0 - torch.eye(_n_objects_sample))
        if usegpu:
            margin = margin.cuda()
        margin = Variable(margin)

        _dist_term_sample = torch.sum(
            torch.clamp(margin - _norm, min=0.0) ** 2)
        _dist_term_sample = _dist_term_sample / \
            (_n_objects_sample * (_n_objects_sample - 1))
        dist_term += _dist_term_sample

    dist_term = dist_term / bs

    return dist_term
开发者ID:davnov134,项目名称:instance-segmentation-pytorch,代码行数:35,代码来源:discriminative.py


示例18: calculate_variance_term

def calculate_variance_term(pred, gt, means, n_objects, delta_v, norm=2):
    """pred: bs, height * width, n_filters
       gt: bs, height * width, n_instances
       means: bs, n_instances, n_filters"""

    bs, n_loc, n_filters = pred.size()
    n_instances = gt.size(2)

    # bs, n_loc, n_instances, n_filters
    means = means.unsqueeze(1).expand(bs, n_loc, n_instances, n_filters)
    # bs, n_loc, n_instances, n_filters
    pred = pred.unsqueeze(2).expand(bs, n_loc, n_instances, n_filters)
    # bs, n_loc, n_instances, n_filters
    gt = gt.unsqueeze(3).expand(bs, n_loc, n_instances, n_filters)

    _var = (torch.clamp(torch.norm((pred - means), norm, 3) -
                        delta_v, min=0.0) ** 2) * gt[:, :, :, 0]

    var_term = 0.0
    for i in range(bs):
        _var_sample = _var[i, :, :n_objects[i]]  # n_loc, n_objects
        _gt_sample = gt[i, :, :n_objects[i], 0]  # n_loc, n_objects

        var_term += torch.sum(_var_sample) / torch.sum(_gt_sample)
    var_term = var_term / bs

    return var_term
开发者ID:davnov134,项目名称:instance-segmentation-pytorch,代码行数:27,代码来源:discriminative.py


示例19: weighted_mpjpe

def weighted_mpjpe(predicted, target, w):
    """
    Weighted mean per-joint position error (i.e. mean Euclidean distance)
    """
    assert predicted.shape == target.shape
    assert w.shape[0] == predicted.shape[0]
    return torch.mean(w * torch.norm(predicted - target, dim=len(target.shape)-1))
开发者ID:HenrryBryant,项目名称:VideoPose3D,代码行数:7,代码来源:loss.py


示例20: encode

    def encode(self, indices, lengths, noise):
        embeddings = self.embedding(indices)
        packed_embeddings = pack_padded_sequence(input=embeddings,
                                                 lengths=lengths,
                                                 batch_first=True)

        # Encode
        packed_output, state = self.encoder(packed_embeddings)

        hidden, cell = state
        # batch_size x nhidden
        hidden = hidden[-1]  # get hidden state of last layer of encoder

        # normalize to unit ball (l2 norm of 1) - p=2, dim=1
        norms = torch.norm(hidden, 2, 1)
        
        # For older versions of PyTorch use:
        hidden = torch.div(hidden, norms.expand_as(hidden))
        # For newest version of PyTorch (as of 8/25) use this:
        # hidden = torch.div(hidden, norms.unsqueeze(1).expand_as(hidden))

        if noise and self.noise_radius > 0:
            gauss_noise = torch.normal(means=torch.zeros(hidden.size()),
                                       std=self.noise_radius)
            hidden = hidden + to_gpu(self.gpu, Variable(gauss_noise))

        return hidden
开发者ID:wangwang110,项目名称:ARAE,代码行数:27,代码来源:models.py



注:本文中的torch.norm函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python torch.ones函数代码示例发布时间:2022-05-27
下一篇:
Python torch.no_grad函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap