• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python torch.eye函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中torch.eye函数的典型用法代码示例。如果您正苦于以下问题:Python eye函数的具体用法?Python eye怎么用?Python eye使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了eye函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: model

    def model(self):
        self.set_mode("model")

        f_loc = self.get_param("f_loc")
        f_scale_tril = self.get_param("f_scale_tril")

        N = self.X.shape[0]
        Kff = self.kernel(self.X) + (torch.eye(N, out=self.X.new_empty(N, N)) *
                                     self.jitter)
        Lff = Kff.potrf(upper=False)

        zero_loc = self.X.new_zeros(f_loc.shape)
        f_name = param_with_module_name(self.name, "f")

        if self.whiten:
            Id = torch.eye(N, out=self.X.new_empty(N, N))
            pyro.sample(f_name,
                        dist.MultivariateNormal(zero_loc, scale_tril=Id)
                            .independent(zero_loc.dim() - 1))
            f_scale_tril = Lff.matmul(f_scale_tril)
        else:
            pyro.sample(f_name,
                        dist.MultivariateNormal(zero_loc, scale_tril=Lff)
                            .independent(zero_loc.dim() - 1))

        f_var = f_scale_tril.pow(2).sum(dim=-1)

        if self.whiten:
            f_loc = Lff.matmul(f_loc.unsqueeze(-1)).squeeze(-1)
        f_loc = f_loc + self.mean_function(self.X)
        if self.y is None:
            return f_loc, f_var
        else:
            return self.likelihood(f_loc, f_var, self.y)
开发者ID:lewisKit,项目名称:pyro,代码行数:34,代码来源:vgp.py


示例2: model

    def model(self):
        self.set_mode("model")

        Xu = self.get_param("Xu")
        u_loc = self.get_param("u_loc")
        u_scale_tril = self.get_param("u_scale_tril")

        M = Xu.shape[0]
        Kuu = self.kernel(Xu) + torch.eye(M, out=Xu.new_empty(M, M)) * self.jitter
        Luu = Kuu.potrf(upper=False)

        zero_loc = Xu.new_zeros(u_loc.shape)
        u_name = param_with_module_name(self.name, "u")
        if self.whiten:
            Id = torch.eye(M, out=Xu.new_empty(M, M))
            pyro.sample(u_name,
                        dist.MultivariateNormal(zero_loc, scale_tril=Id)
                            .independent(zero_loc.dim() - 1))
        else:
            pyro.sample(u_name,
                        dist.MultivariateNormal(zero_loc, scale_tril=Luu)
                            .independent(zero_loc.dim() - 1))

        f_loc, f_var = conditional(self.X, Xu, self.kernel, u_loc, u_scale_tril,
                                   Luu, full_cov=False, whiten=self.whiten,
                                   jitter=self.jitter)

        f_loc = f_loc + self.mean_function(self.X)
        if self.y is None:
            return f_loc, f_var
        else:
            with poutine.scale(None, self.num_data / self.X.shape[0]):
                return self.likelihood(f_loc, f_var, self.y)
开发者ID:lewisKit,项目名称:pyro,代码行数:33,代码来源:vsgp.py


示例3: __init__

 def __init__(self):
     super(Tune, self).__init__()
     self.linear1 = nn.Linear(len(TEXT.vocab),len(TEXT.vocab))
     self.linear1.weight.data.copy_(torch.eye(len(TEXT.vocab)))
     self.linear2 = nn.Linear(len(TEXT.vocab),len(TEXT.vocab))
     self.linear2.weight.data.copy_(torch.eye(len(TEXT.vocab)))
     self.linear3 = nn.Linear(len(TEXT.vocab),len(TEXT.vocab))
     self.linear3.weight.data.copy_(torch.eye(len(TEXT.vocab)))
开发者ID:anihamde,项目名称:cs287-s18,代码行数:8,代码来源:ensemble.py


示例4: enumerate_support

 def enumerate_support(self):
     probs = self._categorical.probs
     n = self.event_shape[0]
     if isinstance(probs, Variable):
         values = Variable(torch.eye(n, out=probs.data.new(n, n)))
     else:
         values = torch.eye(n, out=probs.new(n, n))
     values = values.view((n,) + (1,) * len(self.batch_shape) + (n,))
     return values.expand((n,) + self.batch_shape + (n,))
开发者ID:lxlhh,项目名称:pytorch,代码行数:9,代码来源:one_hot_categorical.py


示例5: test_forward

 def test_forward(self):
     # pylint: disable=protected-access
     similarity = MultiHeadedSimilarity(num_heads=3, tensor_1_dim=6)
     similarity._tensor_1_projection = Parameter(torch.eye(6))
     similarity._tensor_2_projection = Parameter(torch.eye(6))
     a_vectors = Variable(torch.FloatTensor([[[[1, 1, -1, -1, 0, 1], [-2, 5, 9, -1, 3, 4]]]]))
     b_vectors = Variable(torch.FloatTensor([[[[1, 1, 1, 0, 2, 5], [0, 1, -1, -7, 1, 2]]]]))
     result = similarity(a_vectors, b_vectors).data.numpy()
     assert result.shape == (1, 1, 2, 3)
     assert_almost_equal(result, [[[[2, -1, 5], [5, -2, 11]]]])
开发者ID:Jordan-Sauchuk,项目名称:allennlp,代码行数:10,代码来源:multiheaded_test.py


示例6: btriunpack

def btriunpack(LU_data, LU_pivots, unpack_data=True, unpack_pivots=True):
    r"""Unpacks the data and pivots from a batched LU factorization (btrifact) of a tensor.

    Returns a tuple indexed by:
      0: The pivots.
      1: The L tensor.
      2: The U tensor.

    Arguments:
        LU_data (Tensor): the packed LU factorization data
        LU_pivots (Tensor): the packed LU factorization pivots
        unpack_data (bool): flag indicating if the data should be unpacked
        unpack_pivots (bool): tlag indicating if the pivots should be unpacked

    Example::

        >>> A = torch.randn(2, 3, 3)
        >>> A_LU, pivots = A.btrifact()
        >>> P, a_L, a_U = torch.btriunpack(A_LU, pivots)
        >>>
        >>> # test that (P, A_L, A_U) gives LU factorization
        >>> A_ = torch.bmm(P, torch.bmm(A_L, A_U))
        >>> assert torch.equal(A_, A) == True  # can recover A
    """

    nBatch, sz, _ = LU_data.size()

    if unpack_data:
        I_U = torch.triu(torch.ones(sz, sz)).type_as(LU_data).byte().unsqueeze(0).expand(nBatch, sz, sz)
        I_L = 1 - I_U
        L = LU_data.new(LU_data.size()).zero_()
        U = LU_data.new(LU_data.size()).zero_()
        I_diag = torch.eye(sz).type_as(LU_data).byte().unsqueeze(0).expand(nBatch, sz, sz)
        L[I_diag] = 1.0
        L[I_L] = LU_data[I_L]
        U[I_U] = LU_data[I_U]
    else:
        L = U = None

    if unpack_pivots:
        P = torch.eye(sz).type_as(LU_data).unsqueeze(0).repeat(nBatch, 1, 1)
        for i in range(nBatch):
            for j in range(sz):
                k = LU_pivots[i, j] - 1
                t = P[i, :, j].clone()
                P[i, :, j] = P[i, :, k]
                P[i, :, k] = t
    else:
        P = None

    return P, L, U
开发者ID:lxlhh,项目名称:pytorch,代码行数:51,代码来源:functional.py


示例7: read_test

def read_test(memory):
    print("Memory Reading Test: ")
    _k = T.ones(1, M_DIM*Kr)
    _b = T.eye(Kr)[0].view(1, -1)
    print("k tensor: ", _k)
    print("b tensor: ", _b)
    print(memory.read(_k, _b))
开发者ID:andreofner,项目名称:MERLIN,代码行数:7,代码来源:memory_test.py


示例8: __init__

    def __init__(self, X, y, kernel, Xu, likelihood, mean_function=None,
                 latent_shape=None, num_data=None, whiten=False, jitter=1e-6,
                 name="SVGP"):
        super(VariationalSparseGP, self).__init__(X, y, kernel, mean_function, jitter,
                                                  name)
        self.likelihood = likelihood

        self.num_data = num_data if num_data is not None else self.X.shape[0]
        self.whiten = whiten

        self.Xu = Parameter(Xu)

        y_batch_shape = self.y.shape[:-1] if self.y is not None else torch.Size([])
        self.latent_shape = latent_shape if latent_shape is not None else y_batch_shape

        M = self.Xu.shape[0]
        u_loc_shape = self.latent_shape + (M,)
        u_loc = self.Xu.new_zeros(u_loc_shape)
        self.u_loc = Parameter(u_loc)

        u_scale_tril_shape = self.latent_shape + (M, M)
        Id = torch.eye(M, out=self.Xu.new_empty(M, M))
        u_scale_tril = Id.expand(u_scale_tril_shape)
        self.u_scale_tril = Parameter(u_scale_tril)
        self.set_constraint("u_scale_tril", constraints.lower_cholesky)

        self._sample_latent = True
开发者ID:lewisKit,项目名称:pyro,代码行数:27,代码来源:vsgp.py


示例9: create_input

def create_input(points, sigma2):
    bs, N, _ = points.size() #points has size bs,N,2
    OP = torch.zeros(bs,N,N,4).type(dtype)
    E = torch.eye(N).type(dtype).unsqueeze(0).expand(bs,N,N)
    OP[:,:,:,0] = E
    W = points.unsqueeze(1).expand(bs,N,N,dim) - points.unsqueeze(2).expand(bs,N,N,dim)
    dists2 = (W * W).sum(3)
    dists = torch.sqrt(dists2)
    W = torch.exp(-dists2 / sigma2)
    OP[:,:,:,1] = W
    D = E * W.sum(2,True).expand(bs,N,N)
    OP[:,:,:,2] = D
    U = (torch.ones(N,N).type(dtype)/N).unsqueeze(0).expand(bs,N,N)
    OP[:,:,:,3] = U
    OP = Variable(OP)
    x = Variable(points)
    Y = Variable(W.clone())

    # Normalize inputs
    if normalize:
        mu = x.sum(1)/N
        mu_ext = mu.unsqueeze(1).expand_as(x)
        var = ((x - mu_ext)*(x - mu_ext)).sum(1)/N
        var_ext = var.unsqueeze(1).expand_as(x)
        x = x - mu_ext
        x = x/(10 * var_ext)

    return (OP, x, Y), dists
开发者ID:ParsonsZeng,项目名称:DiCoNet,代码行数:28,代码来源:kmeans.py


示例10: calculate_distance_term

def calculate_distance_term(means, n_objects, delta_d, norm=2, usegpu=True):
    """means: bs, n_instances, n_filters"""

    bs, n_instances, n_filters = means.size()

    dist_term = 0.0
    for i in range(bs):
        _n_objects_sample = n_objects[i]

        if _n_objects_sample <= 1:
            continue

        _mean_sample = means[i, : _n_objects_sample, :]  # n_objects, n_filters
        means_1 = _mean_sample.unsqueeze(1).expand(
            _n_objects_sample, _n_objects_sample, n_filters)
        means_2 = means_1.permute(1, 0, 2)

        diff = means_1 - means_2  # n_objects, n_objects, n_filters

        _norm = torch.norm(diff, norm, 2)

        margin = 2 * delta_d * (1.0 - torch.eye(_n_objects_sample))
        if usegpu:
            margin = margin.cuda()
        margin = Variable(margin)

        _dist_term_sample = torch.sum(
            torch.clamp(margin - _norm, min=0.0) ** 2)
        _dist_term_sample = _dist_term_sample / \
            (_n_objects_sample * (_n_objects_sample - 1))
        dist_term += _dist_term_sample

    dist_term = dist_term / bs

    return dist_term
开发者ID:davnov134,项目名称:instance-segmentation-pytorch,代码行数:35,代码来源:discriminative.py


示例11: _compute_logdet_and_mahalanobis

    def _compute_logdet_and_mahalanobis(self, D, W, y, trace_term=0):
        """
        Calculates log determinant and (squared) Mahalanobis term of covariance
        matrix ``(D + Wt.W)``, where ``D`` is a diagonal matrix, based on the
        "Woodbury matrix identity" and "matrix determinant lemma"::

            inv(D + Wt.W) = inv(D) - inv(D).Wt.inv(I + W.inv(D).Wt).W.inv(D)
            log|D + Wt.W| = log|Id + Wt.inv(D).W| + log|D|
        """
        W_Dinv = W / D
        M = W.shape[0]
        Id = torch.eye(M, M, out=W.new_empty(M, M))
        K = Id + W_Dinv.matmul(W.t())
        L = K.potrf(upper=False)
        if y.dim() == 1:
            W_Dinv_y = W_Dinv.matmul(y)
        elif y.dim() == 2:
            W_Dinv_y = W_Dinv.matmul(y.t())
        else:
            raise NotImplementedError("SparseMultivariateNormal distribution does not support "
                                      "computing log_prob for a tensor with more than 2 dimensionals.")
        Linv_W_Dinv_y = matrix_triangular_solve_compat(W_Dinv_y, L, upper=False)
        if y.dim() == 2:
            Linv_W_Dinv_y = Linv_W_Dinv_y.t()

        logdet = 2 * L.diag().log().sum() + D.log().sum()

        mahalanobis1 = (y * y / D).sum(-1)
        mahalanobis2 = (Linv_W_Dinv_y * Linv_W_Dinv_y).sum(-1)
        mahalanobis_squared = mahalanobis1 - mahalanobis2 + trace_term

        return logdet, mahalanobis_squared
开发者ID:lewisKit,项目名称:pyro,代码行数:32,代码来源:lowrank_mvn.py


示例12: test_forward_backward

 def test_forward_backward(self):
     import torch
     import torch.nn.functional as F
     from torch.autograd import Variable
     from reid.loss import OIMLoss
     criterion = OIMLoss(3, 3, scalar=1.0, size_average=False)
     criterion.lut = torch.eye(3)
     x = Variable(torch.randn(3, 3), requires_grad=True)
     y = Variable(torch.range(0, 2).long())
     loss = criterion(x, y)
     loss.backward()
     probs = F.softmax(x)
     grads = probs.data - torch.eye(3)
     abs_diff = torch.abs(grads - x.grad.data)
     self.assertEquals(torch.log(probs).diag().sum(), -loss)
     self.assertTrue(torch.max(abs_diff) < 1e-6)
开发者ID:DianJin2018,项目名称:open-reid,代码行数:16,代码来源:test_oim.py


示例13: torch_eye

def torch_eye(n, m=None, out=None):
    """
    Like `torch.eye()`, but works with cuda tensors.
    """
    if m is None:
        m = n
    try:
        return torch.eye(n, m, out=out)
    except TypeError:
        # Only catch errors due to torch.eye() not being available for cuda tensors.
        module = torch.Tensor.__module__ if out is None else type(out).__module__
        if module != 'torch.cuda':
            raise
    Tensor = getattr(torch, torch.Tensor.__name__)
    cpu_out = Tensor(n, m)
    cuda_out = torch.eye(m, n, out=cpu_out).cuda()
    return cuda_out if out is None else out.copy_(cuda_out)
开发者ID:Magica-Chen,项目名称:pyro,代码行数:17,代码来源:util.py


示例14: get_cat_mapping

def get_cat_mapping(model: infogan.InfoGAN, data_loader: DataLoader):
    eye = torch.eye(10)
    confusion = torch.zeros(10, 10)
    for data, labels in data_loader:
        real_data = data.to(model.device).unsqueeze(1).float() / 255.
        cat_logits = model.rec(model.dis(real_data)[1])[0]
        confusion += eye[labels.long()].t() @ eye[cat_logits.cpu().argmax(1)]
    return confusion.argmax(0).numpy()
开发者ID:dccastro,项目名称:Morpho-MNIST,代码行数:8,代码来源:train_infogan.py


示例15: check

    def check(self, value):
        value_tril = batch_tril(value)
        lower_triangular = (value_tril == value).view(value.shape[:-2] + (-1,)).min(-1)[0]

        n = value.size(-1)
        diag_mask = torch.eye(n, n, out=value.new(n, n))
        positive_diagonal = (value * diag_mask > (diag_mask - 1)).min(-1)[0].min(-1)[0]
        return lower_triangular & positive_diagonal
开发者ID:RichieMay,项目名称:pytorch,代码行数:8,代码来源:constraints.py


示例16: eye_

def eye_(tensor):
    r"""Fills the 2-dimensional input `Tensor` with the identity
    matrix. Preserves the identity of the inputs in `Linear` layers, where as
    many inputs are preserved as possible.

    Args:
        tensor: a 2-dimensional `torch.Tensor`

    Examples:
        >>> w = torch.empty(3, 5)
        >>> nn.init.eye_(w)
    """
    if tensor.ndimension() != 2:
        raise ValueError("Only tensors with 2 dimensions are supported")

    with torch.no_grad():
        torch.eye(*tensor.shape, out=tensor, requires_grad=tensor.requires_grad)
    return tensor
开发者ID:xiongyw,项目名称:pytorch,代码行数:18,代码来源:init.py


示例17: addOrthoRegularizer

def addOrthoRegularizer(loss,model, regParam, targetLayers) :
    for i in range( len(targetLayers) ) :
        layerParams =   model[targetLayers[i]].named_parameters() 
        for param in layerParams:  # dont regularize bias params
            if 'bias' not in param[0]: 
                W = param[1].t()  
                WTW = torch.mm( W.t(),  W)
                C = (  regParam * 0.5) * torch.sum(torch.abs(WTW - torch.eye(WTW.shape[0])) )
                loss += C
开发者ID:mkelcb,项目名称:knet,代码行数:9,代码来源:knet_main_pytorch.py


示例18: eye

def eye(tensor):
    """Fills the 2-dimensional input Tensor or Variable with the identity
    matrix. Preserves the identity of the inputs in Linear layers, where as
    many inputs are preserved as possible.

    Args:
        tensor: a 2-dimensional torch.Tensor or autograd.Variable

    Examples:
        >>> w = torch.Tensor(3, 5)
        >>> nn.init.eye(w)
    """
    if tensor.ndimension() != 2:
        raise ValueError("Only tensors with 2 dimensions are supported")

    with torch.no_grad():
        torch.eye(*tensor.shape, out=tensor)
    return tensor
开发者ID:Jsmilemsj,项目名称:pytorch,代码行数:18,代码来源:init.py


示例19: test_forward

def test_forward(model_class, X, y, kernel, likelihood):
    if model_class is SparseGPRegression or model_class is VariationalSparseGP:
        gp = model_class(X, y, kernel, X, likelihood)
    else:
        gp = model_class(X, y, kernel, likelihood)

    # test shape
    Xnew = torch.tensor([[2.0, 3.0, 1.0]])
    loc0, cov0 = gp(Xnew, full_cov=True)
    loc1, var1 = gp(Xnew, full_cov=False)
    assert loc0.dim() == y.dim()
    assert loc0.shape[-1] == Xnew.shape[0]
    # test latent shape
    assert loc0.shape[:-1] == y.shape[:-1]
    assert cov0.shape[:-2] == y.shape[:-1]
    assert cov0.shape[-1] == cov0.shape[-2]
    assert cov0.shape[-1] == Xnew.shape[0]
    assert_equal(loc0, loc1)
    n = Xnew.shape[0]
    cov0_diag = torch.stack([mat.diag() for mat in cov0.view(-1, n, n)]).reshape(var1.shape)
    assert_equal(cov0_diag, var1)

    # test trivial forward: Xnew = X
    loc, cov = gp(X, full_cov=True)
    if model_class is VariationalGP or model_class is VariationalSparseGP:
        assert_equal(loc.norm().item(), 0)
        assert_equal(cov, torch.eye(cov.shape[-1]).expand(cov.shape))
    else:
        assert_equal(loc, y)
        assert_equal(cov.norm().item(), 0)

    # test same input forward: Xnew[0,:] = Xnew[1,:] = ...
    Xnew = torch.tensor([[2.0, 3.0, 1.0]]).expand(10, 3)
    loc, cov = gp(Xnew, full_cov=True)
    loc_diff = loc - loc[..., :1].expand(y.shape[:-1] + (10,))
    assert_equal(loc_diff.norm().item(), 0)
    cov_diff = cov - cov[..., :1, :1].expand(y.shape[:-1] + (10, 10))
    assert_equal(cov_diff.norm().item(), 0)

    # test noise kernel forward: kernel = WhiteNoise
    gp.kernel = WhiteNoise(input_dim=3, variance=torch.tensor(10.))
    loc, cov = gp(X, full_cov=True)
    assert_equal(loc.norm().item(), 0)
    assert_equal(cov, torch.eye(cov.shape[-1]).expand(cov.shape) * 10)
开发者ID:lewisKit,项目名称:pyro,代码行数:44,代码来源:test_models.py


示例20: reset_parameters

    def reset_parameters(self):
        """
        Initialize parameters following the way proposed in the paper.
        """
        init.orthogonal(self.weight_ih.data)
        init.orthogonal(self.alpha_weight_ih.data)

        weight_hh_data = torch.eye(self.hidden_size)
        weight_hh_data = weight_hh_data.repeat(1, 3)
        self.weight_hh.data.set_(weight_hh_data)

        alpha_weight_hh_data = torch.eye(self.hidden_size)
        alpha_weight_hh_data = alpha_weight_hh_data.repeat(1, 1)
        self.alpha_weight_hh.data.set_(alpha_weight_hh_data)

        # The bias is just set to zero vectors.
        if self.use_bias:
            init.constant(self.bias.data, val=0)
            init.constant(self.alpha_bias.data, val=0)
开发者ID:chongp,项目名称:Name-Entity-Recognition,代码行数:19,代码来源:latticelstm.py



注:本文中的torch.eye函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python torch.from_numpy函数代码示例发布时间:2022-05-27
下一篇:
Python torch.exp函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap