• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python torch.pow函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中torch.pow函数的典型用法代码示例。如果您正苦于以下问题:Python pow函数的具体用法?Python pow怎么用?Python pow使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了pow函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: pairwise_distance

def pairwise_distance(x1, x2, p=2, eps=1e-6):
    r"""
    Computes the batchwise pairwise distance between vectors v1,v2:

    .. math ::
        \Vert x \Vert _p := \left( \sum_{i=1}^n  \vert x_i \vert ^ p \right) ^ {1/p}

    Args:
        x1: first input tensor
        x2: second input tensor
        p: the norm degree. Default: 2
        eps (float, optional): Small value to avoid division by zero. Default: 1e-6

    Shape:
        - Input: :math:`(N, D)` where `D = vector dimension`
        - Output: :math:`(N, 1)`

    Example::

        >>> input1 = autograd.Variable(torch.randn(100, 128))
        >>> input2 = autograd.Variable(torch.randn(100, 128))
        >>> output = F.pairwise_distance(input1, input2, p=2)
        >>> output.backward()
    """
    assert x1.size() == x2.size(), "Input sizes must be equal."
    assert x1.dim() == 2, "Input must be a 2D matrix."
    diff = torch.abs(x1 - x2)
    out = torch.pow(diff + eps, p).sum(dim=1, keepdim=True)
    return torch.pow(out, 1. / p)
开发者ID:athiwatp,项目名称:pytorch,代码行数:29,代码来源:functional.py


示例2: forward

    def forward(self, x, labels):
        """
        Args:
        - x: feature matrix with shape (batch_size, feat_dim).
        - labels: ground truth labels with shape (num_classes).
        """
        batch_size = x.size(0)
        distmat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(batch_size, self.num_classes) + \
                  torch.pow(self.centers, 2).sum(dim=1, keepdim=True).expand(self.num_classes, batch_size).t()
        distmat.addmm_(1, -2, x, self.centers.t())

        classes = torch.arange(self.num_classes).long()
        if self.use_gpu: classes = classes.cuda()
        labels = labels.unsqueeze(1).expand(batch_size, self.num_classes)
        mask = labels.eq(classes.expand(batch_size, self.num_classes))

        dist = []
        for i in range(batch_size):
            value = distmat[i][mask[i]]
            value = value.clamp(min=1e-12, max=1e+12) # for numerical stability
            dist.append(value)
        dist = torch.cat(dist)
        loss = dist.mean()

        return loss
开发者ID:zysolanine,项目名称:deep-person-reid,代码行数:25,代码来源:losses.py


示例3: forward

    def forward(self, model_output, target, mask, attr):

        pred_seq, pred_attr = model_output

        # input (from model.forward())      (batch_size, max_seq_len, vocab_size)
        # target (from dataloader->labels)  (batch_size, max_seq_len)
        # mask (from dataloader->masks)     (batch_size, max_seq_len)

        if not self.seen:
            print('> in LanguageModelCriterion.forward(input, target, mask):')
            print('    pred_seq', pred_seq.shape)  # (200, 17, 3562)
            print('    pred_attr', pred_attr.shape)  # (200, 1000)
            print('    target', target.shape)  # (200, 17)
            print('    mask', mask.shape)  # (200, 17)
            print('    attr', attr.shape)  # (200, 1000)
            self.seen = True

        # truncate to the same size
        target = target[:, :pred_seq.size(1)]
        mask =  mask[:, :pred_seq.size(1)]
        pred_seq = to_contiguous(pred_seq).view(-1, pred_seq.size(2))
        target = to_contiguous(target).view(-1, 1)
        mask = to_contiguous(mask).view(-1, 1)
        output = - pred_seq.gather(1, target) * mask
        output = torch.sum(output) / torch.sum(mask)

        bsize = pred_attr.size(0)
        pred_attr = to_contiguous(pred_attr)
        attr = to_contiguous(attr.float())
        attr_loss = torch.pow(torch.sum(torch.pow((pred_attr - attr), 2)), 0.5) / bsize

        output = output + self.attr_weight * attr_loss

        return output
开发者ID:nagizeroiw,项目名称:ImageCaptioning.pytorch,代码行数:34,代码来源:utils.py


示例4: model

 def model():
     mu_latent = pyro.sample("mu_latent", dist.normal,
                             self.mu0, torch.pow(self.tau0, -0.5))
     sigma = torch.pow(self.tau, -0.5)
     pyro.observe("obs0", dist.lognormal, self.data[0], mu_latent, sigma)
     pyro.observe("obs1", dist.lognormal, self.data[1], mu_latent, sigma)
     return mu_latent
开发者ID:Magica-Chen,项目名称:pyro,代码行数:7,代码来源:test_tracegraph_elbo.py


示例5: updateOutput

    def updateOutput(self, input):
        assert input.dim() == 4

        if self.scale is None:
            self.scale = input.new()
        if input.type() == 'torch.cuda.FloatTensor':
            self._backend.SpatialCrossMapLRN_updateOutput(
                self._backend.library_state,
                input,
                self.output,
                self.scale,
                self.size,
                self.alpha,
                self.beta,
                self.k
            )
        else:
            batchSize = input.size(0)
            channels = input.size(1)
            inputHeight = input.size(2)
            inputWidth = input.size(3)

            self.output.resize_as_(input)
            self.scale.resize_as_(input)

            # use output storage as temporary buffer
            inputSquare = self.output
            torch.pow(input, 2, out=inputSquare)

            prePad = int((self.size - 1) / 2 + 1)
            prePadCrop = channels if prePad > channels else prePad

            scaleFirst = self.scale.select(1, 0)
            scaleFirst.zero_()
            # compute first feature map normalization
            for c in range(prePadCrop):
                scaleFirst.add_(inputSquare.select(1, c))

            # reuse computations for next feature maps normalization
            # by adding the next feature map and removing the previous
            for c in range(1, channels):
                scalePrevious = self.scale.select(1, c - 1)
                scaleCurrent = self.scale.select(1, c)
                scaleCurrent.copy_(scalePrevious)
                if c < channels - prePad + 1:
                    squareNext = inputSquare.select(1, c + prePad - 1)
                    scaleCurrent.add_(1, squareNext)

                if c > prePad:
                    squarePrevious = inputSquare.select(1, c - prePad)
                    scaleCurrent.add_(-1, squarePrevious)

            self.scale.mul_(self.alpha / self.size).add_(self.k)

            torch.pow(self.scale, -self.beta, out=self.output)
            self.output.mul_(input)

        return self.output
开发者ID:Jsmilemsj,项目名称:pytorch,代码行数:58,代码来源:SpatialCrossMapLRN.py


示例6: model

 def model():
     mu_latent = pyro.sample("mu_latent", dist.normal,
                             self.mu0, torch.pow(self.lam0, -0.5))
     pyro.map_data("aaa", self.data, lambda i,
                   x: pyro.observe(
                       "obs_%d" % i, dist.normal,
                       x, mu_latent, torch.pow(self.lam, -0.5)),
                   batch_size=self.batch_size)
     return mu_latent
开发者ID:Magica-Chen,项目名称:pyro,代码行数:9,代码来源:test_inference.py


示例7: mean_dist

def mean_dist(source_points,warped_points,L_pck):
    # compute precentage of correct keypoints
    batch_size=source_points.size(0)
    dist=torch.zeros((batch_size))
    for i in range(batch_size):
        p_src = source_points[i,:]
        p_wrp = warped_points[i,:]
        N_pts = torch.sum(torch.ne(p_src[0,:],-1)*torch.ne(p_src[1,:],-1))
        point_distance = torch.pow(torch.sum(torch.pow(p_src[:,:N_pts]-p_wrp[:,:N_pts],2),0),0.5)
        L_pck_mat = L_pck[i].expand_as(point_distance)
        dist[i]=torch.mean(torch.div(point_distance,L_pck_mat))
    return dist
开发者ID:codealphago,项目名称:weakalign,代码行数:12,代码来源:eval_util.py


示例8: model

        def model(*args, **kwargs):
            next_mean = self.mu0
            for k in range(1, self.N + 1):
                latent_dist = dist.Normal(next_mean, torch.pow(self.lambdas[k - 1], -0.5))
                mu_latent = pyro.sample("mu_latent_%d" % k, latent_dist)
                next_mean = mu_latent

            mu_N = next_mean
            for i, x in enumerate(self.data):
                pyro.observe("obs_%d" % i, dist.normal, x, mu_N,
                             torch.pow(self.lambdas[self.N], -0.5))
            return mu_N
开发者ID:Magica-Chen,项目名称:pyro,代码行数:12,代码来源:test_conjugate_gaussian_models.py


示例9: model

    def model(self, reparameterized, difficulty=0.0):
        next_mean = self.loc0
        for k in range(1, self.N + 1):
            latent_dist = dist.Normal(next_mean, torch.pow(self.lambdas[k - 1], -0.5))
            loc_latent = pyro.sample("loc_latent_%d" % k, latent_dist)
            next_mean = loc_latent

        loc_N = next_mean
        with pyro.iarange("data", self.data.size(0)):
            pyro.sample("obs", dist.Normal(loc_N.expand_as(self.data),
                                           torch.pow(self.lambdas[self.N], -0.5).expand_as(self.data)), obs=self.data)
        return loc_N
开发者ID:lewisKit,项目名称:pyro,代码行数:12,代码来源:test_conjugate_gaussian_models.py


示例10: log_norm

def log_norm(x, mu, std):
    """Compute the log pdf of x,
    under a normal distribution with mean mu and standard deviation std."""
    
#    print ("X device: ", x.device)
#    print ("mu device: ", mu.device)
#    print ("std device: ", std.device)
    x = x.view(-1)
    mu = mu.view(-1)
    std = std.view(-1)
    return -0.5 * torch.log(2*np.pi*torch.pow(std,2))  \
            - 0.5 * (1/torch.pow(std,2))* torch.pow( (x-mu),2) 
开发者ID:manuwhs,项目名称:Trapyng,代码行数:12,代码来源:Variational_inferences_lib.py


示例11: pck

def pck(source_points,warped_points,L_pck,alpha=0.1):
    # compute precentage of correct keypoints
    batch_size=source_points.size(0)
    pck=torch.zeros((batch_size))
    for i in range(batch_size):
        p_src = source_points[i,:]
        p_wrp = warped_points[i,:]
        N_pts = torch.sum(torch.ne(p_src[0,:],-1)*torch.ne(p_src[1,:],-1))
        point_distance = torch.pow(torch.sum(torch.pow(p_src[:,:N_pts]-p_wrp[:,:N_pts],2),0),0.5)
        L_pck_mat = L_pck[i].expand_as(point_distance)
        correct_points = torch.le(point_distance,L_pck_mat*alpha)
        pck[i]=torch.mean(correct_points.float())
    return pck
开发者ID:codealphago,项目名称:weakalign,代码行数:13,代码来源:eval_util.py


示例12: euclidean_dist

def euclidean_dist(x, y):
  """
  Args:
    x: pytorch Variable, with shape [m, d]
    y: pytorch Variable, with shape [n, d]
  Returns:
    dist: pytorch Variable, with shape [m, n]
  """
  m, n = x.size(0), y.size(0)
  xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n)
  yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t()
  dist = xx + yy
  dist.addmm_(1, -2, x, y.t())
  dist = dist.clamp(min=1e-12).sqrt()  # for numerical stability
  return dist
开发者ID:ChunfeiMa,项目名称:AlignedReID-Re-Production-Pytorch,代码行数:15,代码来源:loss.py


示例13: backward

    def backward(self, grad_output):
        input, output = self.saved_tensors
        grad_input = grad_output.new()

        if self._backend is not None:
            self._backend.SpatialCrossMapLRN_updateGradInput(
                self._backend.library_state,
                input,
                grad_output,
                grad_input,
                self.scale,
                output,
                self.size,
                self.alpha,
                self.beta,
                self.k
            )
        else:
            batch_size = input.size(0)
            channels = input.size(1)
            input_height = input.size(2)
            input_width = input.size(3)

            paddded_ratio = input.new(channels + self.size - 1, input_height,
                                      input_width)
            accum_ratio = input.new(input_height, input_width)

            cache_ratio_value = 2 * self.alpha * self.beta / self.size
            inversePrePad = int(self.size - (self.size - 1) / 2)

            grad_input.resize_as_(input)
            torch.pow(self.scale, -self.beta, out=grad_input).mul_(grad_output)

            paddded_ratio.zero_()
            padded_ratio_center = paddded_ratio.narrow(0, inversePrePad,
                                                       channels)
            for n in range(batch_size):
                torch.mul(grad_output[n], output[n], out=padded_ratio_center)
                padded_ratio_center.div_(self.scale[n])
                torch.sum(
                    paddded_ratio.narrow(0, 0, self.size - 1), 0, keepdim=False, out=accum_ratio)
                for c in range(channels):
                    accum_ratio.add_(paddded_ratio[c + self.size - 1])
                    grad_input[n][c].addcmul_(-cache_ratio_value, input[n][c],
                                              accum_ratio)
                    accum_ratio.add_(-1, paddded_ratio[c])

        return grad_input
开发者ID:Jsmilemsj,项目名称:pytorch,代码行数:48,代码来源:normalization.py


示例14: test_save_and_load

    def test_save_and_load(self):
        lin = pyro.module("mymodule", self.linear_module)
        pyro.module("mymodule2", self.linear_module2)
        x = torch.randn(1, 3)
        myparam = pyro.param("myparam", torch.tensor(1.234 * torch.ones(1), requires_grad=True))

        cost = torch.sum(torch.pow(lin(x), 2.0)) * torch.pow(myparam, 4.0)
        cost.backward()
        params = list(self.linear_module.parameters()) + [myparam]
        optim = torch.optim.Adam(params, lr=.01)
        myparam_copy_stale = copy(pyro.param("myparam").detach().cpu().numpy())

        optim.step()

        myparam_copy = copy(pyro.param("myparam").detach().cpu().numpy())
        param_store_params = copy(pyro.get_param_store()._params)
        param_store_param_to_name = copy(pyro.get_param_store()._param_to_name)
        assert len(list(param_store_params.keys())) == 5
        assert len(list(param_store_param_to_name.values())) == 5

        pyro.get_param_store().save('paramstore.unittest.out')
        pyro.clear_param_store()
        assert len(list(pyro.get_param_store()._params)) == 0
        assert len(list(pyro.get_param_store()._param_to_name)) == 0
        pyro.get_param_store().load('paramstore.unittest.out')

        def modules_are_equal():
            weights_equal = np.sum(np.fabs(self.linear_module3.weight.detach().cpu().numpy() -
                                   self.linear_module.weight.detach().cpu().numpy())) == 0.0
            bias_equal = np.sum(np.fabs(self.linear_module3.bias.detach().cpu().numpy() -
                                self.linear_module.bias.detach().cpu().numpy())) == 0.0
            return (weights_equal and bias_equal)

        assert not modules_are_equal()
        pyro.module("mymodule", self.linear_module3, update_module_params=False)
        assert id(self.linear_module3.weight) != id(pyro.param('mymodule$$$weight'))
        assert not modules_are_equal()
        pyro.module("mymodule", self.linear_module3, update_module_params=True)
        assert id(self.linear_module3.weight) == id(pyro.param('mymodule$$$weight'))
        assert modules_are_equal()

        myparam = pyro.param("myparam")
        store = pyro.get_param_store()
        assert myparam_copy_stale != myparam.detach().cpu().numpy()
        assert myparam_copy == myparam.detach().cpu().numpy()
        assert sorted(param_store_params.keys()) == sorted(store._params.keys())
        assert sorted(param_store_param_to_name.values()) == sorted(store._param_to_name.values())
        assert sorted(store._params.keys()) == sorted(store._param_to_name.values())
开发者ID:lewisKit,项目名称:pyro,代码行数:48,代码来源:test_param.py


示例15: forward

    def forward(self, input, label):
        # --------------------------- cos(theta) & phi(theta) ---------------------------
        if self.device_id == None:
            cosine = F.linear(F.normalize(input), F.normalize(self.weight))
        else:
            x = input
            sub_weights = torch.chunk(self.weight, len(self.device_id), dim=0)
            temp_x = x.cuda(self.device_id[0])
            weight = sub_weights[0].cuda(self.device_id[0])
            cosine = F.linear(F.normalize(temp_x), F.normalize(weight))
            for i in range(1, len(self.device_id)):
                temp_x = x.cuda(self.device_id[i])
                weight = sub_weights[i].cuda(self.device_id[i])
                cosine = torch.cat((cosine, F.linear(F.normalize(temp_x), F.normalize(weight)).cuda(self.device_id[0])), dim=1) 
        sine = torch.sqrt(1.0 - torch.pow(cosine, 2))
        phi = cosine * self.cos_m - sine * self.sin_m
        if self.easy_margin:
            phi = torch.where(cosine > 0, phi, cosine)
        else:
            phi = torch.where(cosine > self.th, phi, cosine - self.mm)
        # --------------------------- convert label to one-hot ---------------------------
        one_hot = torch.zeros(cosine.size())
        if self.device_id != None:
            one_hot = one_hot.cuda(self.device_id[0])
        one_hot.scatter_(1, label.view(-1, 1).long(), 1)
        # -------------torch.where(out_i = {x_i if condition_i else y_i) -------------
        output = (one_hot * phi) + ((1.0 - one_hot) * cosine)  # you can use torch.where if your torch.__version__ is 0.4
        output *= self.s

        return output
开发者ID:stjordanis,项目名称:face.evoLVe.PyTorch,代码行数:30,代码来源:metrics.py


示例16: kurtosis_score

def kurtosis_score(x, dim=0):
    '''Test whether a dataset has normal kurtosis.

    This function tests the null hypothesis that the kurtosis
    of the population from which the sample was drawn is that
    of the normal distribution: ``kurtosis = 3(n-1)/(n+1)``.
    ripoff from: `scipy.stats.kurtosistest`.

    Args:
        a: Array of the sample data
        axis: Axis along which to compute test. Default is 0. If None,
           compute over the whole array `a`.
    Returns:
        statistic: The computed z-score for this test.
        p-value: A 2-sided chi squared probability for the hypothesis test.
    '''
    x, n, dim = _x_n_dim(x, dim)
    if n < 20:
        raise ValueError(
            "Number of elements has to be >= 20 to compute kurtosis")
    b2 = (x**4).mean(dim) / (x**2).mean(dim)**2
    E = 3.0 * (n - 1) / (n + 1)
    varb2 = 24.0 * n * (n - 2) * (n - 3) / ((n + 1)**2 * (n + 3) * (n + 5))
    x = (b2 - E) / math.sqrt(varb2)
    sqrtbeta1 = 6.0 * (n * n - 5 * n + 2) / ((n + 7) * (n + 9)) *\
        math.sqrt((6.0 * (n + 3) * (n + 5)) / (n * (n - 2) * (n - 3)))
    A = 6.0 + 8.0 / sqrtbeta1 * \
        (2.0 / sqrtbeta1 + math.sqrt(1 + 4.0 / (sqrtbeta1**2)))
    term1 = 1 - 2 / (9.0 * A)
    denom = 1 + x * math.sqrt(2 / (A - 4.0))
    term2 = torch.sign(denom) * torch.pow((1 - 2.0 / A) /
                                          torch.abs(denom), 1 / 3.0)
    Z = (term1 - term2) / math.sqrt(2 / (9.0 * A))
    return Z, 1 + torch.erf(-math.sqrt(0.5) * torch.abs(Z))
开发者ID:ModarTensai,项目名称:network_moments,代码行数:34,代码来源:stats.py


示例17: guide

 def guide():
     pyro.module("mymodule", pt_guide)
     mu_q, tau_q = torch.exp(pt_guide.mu_q_log), torch.exp(pt_guide.tau_q_log)
     sigma = torch.pow(tau_q, -0.5)
     pyro.sample("mu_latent",
                 dist.Normal(mu_q, sigma, reparameterized=reparameterized),
                 baseline=dict(use_decaying_avg_baseline=True))
开发者ID:Magica-Chen,项目名称:pyro,代码行数:7,代码来源:test_tracegraph_elbo.py


示例18: test_regularization

    def test_regularization(self):
        penalty = self.model.get_regularization_penalty().data
        assert (penalty > 0).all()

        penalty2 = 0

        # Config specifies penalty as
        #   "regularizer": [
        #     ["weight$", {"type": "l2", "alpha": 10}],
        #     ["bias$", {"type": "l1", "alpha": 5}]
        #   ]
        for name, parameter in self.model.named_parameters():
            if name.endswith("weight"):
                weight_penalty = 10 * torch.sum(torch.pow(parameter, 2))
                penalty2 += weight_penalty
            elif name.endswith("bias"):
                bias_penalty = 5 * torch.sum(torch.abs(parameter))
                penalty2 += bias_penalty

        assert (penalty == penalty2.data).all()

        # You get a RuntimeError if you call `model.forward` twice on the same inputs.
        # The data and config are such that the whole dataset is one batch.
        training_batch = next(self.iterator(self.instances, num_epochs=1))
        validation_batch = next(self.iterator(self.instances, num_epochs=1))

        training_loss = self.trainer._batch_loss(training_batch, for_training=True).data
        validation_loss = self.trainer._batch_loss(validation_batch, for_training=False).data

        # Training loss should have the regularization penalty, but validation loss should not.
        assert (training_loss != validation_loss).all()

        # Training loss should equal the validation loss plus the penalty.
        penalized = validation_loss + penalty
        assert (training_loss == penalized).all()
开发者ID:pyknife,项目名称:allennlp,代码行数:35,代码来源:simple_tagger_test.py


示例19: singleTagLoss

def singleTagLoss(pred_tag, keypoints):
    """
    associative embedding loss for one image
    """
    eps = 1e-6
    tags = []
    pull = 0
    for i in keypoints:
        tmp = []
        for j in i:
            if j[1]>0:
                tmp.append(pred_tag[j[0]])
        if len(tmp) == 0:
            continue
        tmp = torch.stack(tmp)
        tags.append(torch.mean(tmp, dim=0))
        pull = pull +  torch.mean((tmp - tags[-1].expand_as(tmp))**2)

    if len(tags) == 0:
        return make_input(torch.zeros([1]).float()), make_input(torch.zeros([1]).float())

    tags = torch.stack(tags)[:,0]

    num = tags.size()[0]
    size = (num, num, tags.size()[1])
    A = tags.unsqueeze(dim=1).expand(*size)
    B = A.permute(1, 0, 2)

    diff = A - B
    diff = torch.pow(diff, 2).sum(dim=2)[:,:,0]
    push = torch.exp(-diff)
    push = (torch.sum(push) - num)
    return push/((num - 1) * num + eps) * 0.5, pull/(num + eps)
开发者ID:cuizy15,项目名称:pose-ae-train,代码行数:33,代码来源:loss.py


示例20: print_gradients

 def print_gradients(self, X, Y):
     """
     Print the gradients between the output and X
     """
     print ("--------- GRADIENTS ------------")
     predictions = self.forward(X)
     
     ## Define the loss: 
     loss = torch.sum(torch.pow(predictions - Y, 2))
     
     ## Clean previous gradients 
     self.zero_grad()
     loss.backward()
     
     print (self.linear1.weight.grad)
     print (self.linear1.bias.grad)
     
     print (self.W2.grad)
     print (self.b2.grad)
     
     print ("----------- STRUCTURE ------------")
     ## Clean previous gradients 
     print(loss.grad_fn)                       # MSELoss
     print(loss.grad_fn.next_functions[0][0])  # Linear 1
     print(loss.grad_fn.next_functions[0][0].next_functions[0][0])  # Sigmoid
 
 
     self.zero_grad()
开发者ID:manuwhs,项目名称:Trapyng,代码行数:28,代码来源:HalfBayesianMLP.py



注:本文中的torch.pow函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python torch.rand函数代码示例发布时间:2022-05-27
下一篇:
Python torch.ones_like函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap