• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python torch.clamp函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中torch.clamp函数的典型用法代码示例。如果您正苦于以下问题:Python clamp函数的具体用法?Python clamp怎么用?Python clamp使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了clamp函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: bbox_iou

def bbox_iou(box1, box2, x1y1x2y2=True):
    """
    Returns the IoU of two bounding boxes
    """
    if not x1y1x2y2:
        # Transform from center and width to exact coordinates
        b1_x1, b1_x2 = box1[:, 0] - box1[:, 2] / 2, box1[:, 0] + box1[:, 2] / 2
        b1_y1, b1_y2 = box1[:, 1] - box1[:, 3] / 2, box1[:, 1] + box1[:, 3] / 2
        b2_x1, b2_x2 = box2[:, 0] - box2[:, 2] / 2, box2[:, 0] + box2[:, 2] / 2
        b2_y1, b2_y2 = box2[:, 1] - box2[:, 3] / 2, box2[:, 1] + box2[:, 3] / 2
    else:
        # Get the coordinates of bounding boxes
        b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3]
        b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]

    # get the corrdinates of the intersection rectangle
    inter_rect_x1 = torch.max(b1_x1, b2_x1)
    inter_rect_y1 = torch.max(b1_y1, b2_y1)
    inter_rect_x2 = torch.min(b1_x2, b2_x2)
    inter_rect_y2 = torch.min(b1_y2, b2_y2)
    # Intersection area
    inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1 + 1, min=0) * torch.clamp(
        inter_rect_y2 - inter_rect_y1 + 1, min=0
    )
    # Union Area
    b1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1)
    b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1)

    iou = inter_area / (b1_area + b2_area - inter_area + 1e-16)

    return iou
开发者ID:cf904c27,项目名称:PyTorch-YOLOv3,代码行数:31,代码来源:utils.py


示例2: bbox_iou

def bbox_iou(box1, box2):
    """
    Returns the IoU of two bounding boxes 
    
    
    """
    #Get the coordinates of bounding boxes
    b1_x1, b1_y1, b1_x2, b1_y2 = box1[:,0], box1[:,1], box1[:,2], box1[:,3]
    b2_x1, b2_y1, b2_x2, b2_y2 = box2[:,0], box2[:,1], box2[:,2], box2[:,3]
    
    #get the corrdinates of the intersection rectangle
    inter_rect_x1 =  torch.max(b1_x1, b2_x1)
    inter_rect_y1 =  torch.max(b1_y1, b2_y1)
    inter_rect_x2 =  torch.min(b1_x2, b2_x2)
    inter_rect_y2 =  torch.min(b1_y2, b2_y2)
    
    #Intersection area
    inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1 + 1, min=0) * torch.clamp(inter_rect_y2 - inter_rect_y1 + 1, min=0)

    #Union Area
    b1_area = (b1_x2 - b1_x1 + 1)*(b1_y2 - b1_y1 + 1)
    b2_area = (b2_x2 - b2_x1 + 1)*(b2_y2 - b2_y1 + 1)
    
    iou = inter_area / (b1_area + b2_area - inter_area)
    
    return iou
开发者ID:Yasuharaaa,项目名称:YOLO_v3_tutorial_from_scratch,代码行数:26,代码来源:util.py


示例3: F_bilinear_interp2d

def F_bilinear_interp2d(input, coords):
    """
    bilinear interpolation of 2d torch.autograd.Variable
    """
    x = torch.clamp(coords[:,:,0], 0, input.size(1)-2)
    x0 = x.floor()
    x1 = x0 + 1
    y = torch.clamp(coords[:,:,1], 0, input.size(2)-2)
    y0 = y.floor()
    y1 = y0 + 1

    stride = torch.LongTensor(input.stride())
    x0_ix = x0.mul(stride[1]).long()
    x1_ix = x1.mul(stride[1]).long()
    y0_ix = y0.mul(stride[2]).long()
    y1_ix = y1.mul(stride[2]).long()

    input_flat = input.view(input.size(0),-1).contiguous()

    vals_00 = input_flat.gather(1, x0_ix.add(y0_ix).detach())
    vals_10 = input_flat.gather(1, x1_ix.add(y0_ix).detach())
    vals_01 = input_flat.gather(1, x0_ix.add(y1_ix).detach())
    vals_11 = input_flat.gather(1, x1_ix.add(y1_ix).detach())
    
    xd = x - x0
    yd = y - y0
    xm = 1 - xd
    ym = 1 - yd

    x_mapped = (vals_00.mul(xm).mul(ym) +
                vals_10.mul(xd).mul(ym) +
                vals_01.mul(xm).mul(yd) +
                vals_11.mul(xd).mul(yd))

    return x_mapped.view_as(input)
开发者ID:BrianDo2005,项目名称:torchsample,代码行数:35,代码来源:affine.py


示例4: sample_from_discretized_mix_logistic_1d

def sample_from_discretized_mix_logistic_1d(l, nr_mix):
    # Pytorch ordering
    l = l.permute(0, 2, 3, 1)
    ls = [int(y) for y in l.size()]
    xs = ls[:-1] + [1] #[3]

    # unpack parameters
    logit_probs = l[:, :, :, :nr_mix]
    l = l[:, :, :, nr_mix:].contiguous().view(xs + [nr_mix * 2]) # for mean, scale

    # sample mixture indicator from softmax
    temp = torch.FloatTensor(logit_probs.size())
    if l.is_cuda : temp = temp.cuda()
    temp.uniform_(1e-5, 1. - 1e-5)
    temp = logit_probs.data - torch.log(- torch.log(temp))
    _, argmax = temp.max(dim=3)
   
    one_hot = to_one_hot(argmax, nr_mix)
    sel = one_hot.view(xs[:-1] + [1, nr_mix])
    # select logistic parameters
    means = torch.sum(l[:, :, :, :, :nr_mix] * sel, dim=4) 
    log_scales = torch.clamp(torch.sum(
        l[:, :, :, :, nr_mix:2 * nr_mix] * sel, dim=4), min=-7.)
    u = torch.FloatTensor(means.size())
    if l.is_cuda : u = u.cuda()
    u.uniform_(1e-5, 1. - 1e-5)
    u = Variable(u)
    x = means + torch.exp(log_scales) * (torch.log(u) - torch.log(1. - u))
    x0 = torch.clamp(torch.clamp(x[:, :, :, 0], min=-1.), max=1.)
    out = x0.unsqueeze(1)
    return out
开发者ID:insperatum,项目名称:vhe,代码行数:31,代码来源:utils.py


示例5: forward

    def forward(self, input) -> torch.FloatTensor:
        """ Preprocess the input matrix
        :param input tensor
        """
        if isinstance(input, np.ndarray):
            input = torch.from_numpy(input).type(self.dtype)
        if isinstance(input, rlt.FeatureVector):
            input = input.float_features.type(self.dtype)

        # ONNX doesn't support != yet
        not_missing_input = (
            self.one_tensor.float() - (input == self.missing_tensor).float()
        )
        feature_starts = self._get_type_boundaries()

        outputs = []
        for i, feature_type in enumerate(FEATURE_TYPES):
            begin_index = feature_starts[i]
            if (i + 1) == len(FEATURE_TYPES):
                end_index = len(self.normalization_parameters)
            else:
                end_index = feature_starts[i + 1]
            if begin_index == end_index:
                continue  # No features of this type
            if feature_type == ENUM:
                # Process one-at-a-time
                for j in range(begin_index, end_index):
                    norm_params = self.normalization_parameters[self.sorted_features[j]]
                    new_output = self._preprocess_feature_single_column(
                        j, input[:, j : j + 1], norm_params
                    )
                    new_output *= not_missing_input[:, j : j + 1]
                    self._check_preprocessing_output(new_output, [norm_params])
                    outputs.append(new_output)
            else:
                norm_params = []
                for f in self.sorted_features[begin_index:end_index]:
                    norm_params.append(self.normalization_parameters[f])
                new_output = self._preprocess_feature_multi_column(
                    begin_index, input[:, begin_index:end_index], norm_params
                )
                new_output *= not_missing_input[:, begin_index:end_index]
                self._check_preprocessing_output(new_output, norm_params)
                outputs.append(new_output)

        def wrap(output):
            if self.typed_output:
                return rlt.FeatureVector(float_features=output)
            else:
                return output

        if len(outputs) == 1:
            return wrap(torch.clamp(outputs[0], MIN_FEATURE_VALUE, MAX_FEATURE_VALUE))

        return wrap(
            torch.clamp(torch.cat(outputs, dim=1), MIN_FEATURE_VALUE, MAX_FEATURE_VALUE)
        )
开发者ID:sra4077,项目名称:Horizon,代码行数:57,代码来源:preprocessor.py


示例6: train_actor_critic

def train_actor_critic(actor, critic, memory, actor_optim, critic_optim, args):
    memory = np.array(memory) 
    states = np.vstack(memory[:, 0]) 
    actions = list(memory[:, 1]) 
    rewards = list(memory[:, 2]) 
    masks = list(memory[:, 3]) 

    old_values = critic(torch.Tensor(states))
    returns, advants = get_gae(rewards, masks, old_values, args)
    
    mu, std = actor(torch.Tensor(states))
    old_policy = log_prob_density(torch.Tensor(actions), mu, std)

    criterion = torch.nn.MSELoss()
    n = len(states)
    arr = np.arange(n)

    for _ in range(args.ppo_update_num):
        np.random.shuffle(arr)

        for i in range(n // args.batch_size): 
            batch_index = arr[args.batch_size * i : args.batch_size * (i + 1)]
            batch_index = torch.LongTensor(batch_index)
            
            inputs = torch.Tensor(states)[batch_index]
            actions_samples = torch.Tensor(actions)[batch_index]
            returns_samples = returns.unsqueeze(1)[batch_index]
            advants_samples = advants.unsqueeze(1)[batch_index]
            oldvalue_samples = old_values[batch_index].detach()
            
            values = critic(inputs)
            clipped_values = oldvalue_samples + \
                             torch.clamp(values - oldvalue_samples,
                                         -args.clip_param, 
                                         args.clip_param)
            critic_loss1 = criterion(clipped_values, returns_samples)
            critic_loss2 = criterion(values, returns_samples)
            critic_loss = torch.max(critic_loss1, critic_loss2).mean()

            loss, ratio, entropy = surrogate_loss(actor, advants_samples, inputs,
                                         old_policy.detach(), actions_samples,
                                         batch_index)
            clipped_ratio = torch.clamp(ratio,
                                        1.0 - args.clip_param,
                                        1.0 + args.clip_param)
            clipped_loss = clipped_ratio * advants_samples
            actor_loss = -torch.min(loss, clipped_loss).mean()

            loss = actor_loss + 0.5 * critic_loss - 0.001 * entropy

            critic_optim.zero_grad()
            loss.backward(retain_graph=True) 
            critic_optim.step()

            actor_optim.zero_grad()
            loss.backward()
            actor_optim.step()
开发者ID:lanseyege,项目名称:lets-do-irl,代码行数:57,代码来源:train_model.py


示例7: forward

    def forward(self, x=None, warmup=1., inf_net=None): #, k=1): #, marginf_type=0):

        outputs = {}
        B = x.shape[0]

        if inf_net is None:
            # mu, logvar = self.inference_net(x)
            z, logits = self.q.sample(x) 
        else:
            # mu, logvar = inf_net.inference_net(x)   
            z, logqz = inf_net.sample(x) 

        # print (z[0])
        # b = harden(z)
        # print (b[0])
        
        # logpz = torch.sum( self.prior.log_prob(b), dim=1)

        # print (logpz[0])
        # print (logpz.shape)
        # fdasf

        probs_q = torch.sigmoid(logits)
        probs_q = torch.clamp(probs_q, min=.00000001, max=.9999999)
        probs_p = torch.ones(B, self.z_size).cuda() *.5
        KL = probs_q*torch.log(probs_q/probs_p) + (1-probs_q)*torch.log((1-probs_q)/(1-probs_p))
        KL = torch.sum(KL, dim=1)

        # print (z.shape)
        # Decode Image
        x_hat = self.generator.forward(z)
        alpha = torch.sigmoid(x_hat)
        beta = Beta(alpha*self.beta_scale, (1.-alpha)*self.beta_scale)
        x_noise = torch.clamp(x + torch.FloatTensor(x.shape).uniform_(0., 1./256.).cuda(), min=1e-5, max=1-1e-5)
        logpx = beta.log_prob(x_noise) #[120,3,112,112]  # add uniform noise here

        logpx = torch.sum(logpx.view(B, -1),1) # [PB]  * self.w_logpx

        # print (logpx.shape,logpz.shape,logqz.shape)
        # fsdfda

        log_ws = logpx - KL #+ logpz - logqz

        outputs['logpx'] = torch.mean(logpx)
        outputs['x_recon'] = alpha
        # outputs['welbo'] = torch.mean(logpx + warmup*( logpz - logqz))
        outputs['welbo'] = torch.mean(logpx + warmup*(KL))
        outputs['elbo'] = torch.mean(log_ws)
        outputs['logws'] = log_ws
        outputs['z'] = z
        outputs['logpz'] = torch.zeros(1) #torch.mean(logpz)
        outputs['logqz'] = torch.mean(KL)
        # outputs['logvar'] = logvar

        return outputs
开发者ID:chriscremer,项目名称:Other_Code,代码行数:55,代码来源:vae_discrete.py


示例8: F_trilinear_interp3d

def F_trilinear_interp3d(input, coords):
    """
    trilinear interpolation of 3D image
    """
    # take clamp then floor/ceil of x coords
    x = torch.clamp(coords[:,0], 0, input.size(1)-2)
    x0 = x.floor()
    x1 = x0 + 1
    # take clamp then floor/ceil of y coords
    y = torch.clamp(coords[:,1], 0, input.size(2)-2)
    y0 = y.floor()
    y1 = y0 + 1
    # take clamp then floor/ceil of z coords
    z = torch.clamp(coords[:,2], 0, input.size(3)-2)
    z0 = z.floor()
    z1 = z0 + 1

    stride = torch.LongTensor(input.stride())[1:]
    x0_ix = x0.mul(stride[0]).long()
    x1_ix = x1.mul(stride[0]).long()
    y0_ix = y0.mul(stride[1]).long()
    y1_ix = y1.mul(stride[1]).long()
    z0_ix = z0.mul(stride[2]).long()
    z1_ix = z1.mul(stride[2]).long()

    input_flat = th_flatten(input)

    vals_000 = input_flat[x0_ix.add(y0_ix).add(z0_ix).detach()]
    vals_100 = input_flat[x1_ix.add(y0_ix).add(z0_ix).detach()]
    vals_010 = input_flat[x0_ix.add(y1_ix).add(z0_ix).detach()]
    vals_001 = input_flat[x0_ix.add(y0_ix).add(z1_ix).detach()]
    vals_101 = input_flat[x1_ix.add(y0_ix).add(z1_ix).detach()]
    vals_011 = input_flat[x0_ix.add(y1_ix).add(z1_ix).detach()]
    vals_110 = input_flat[x1_ix.add(y1_ix).add(z0_ix).detach()]
    vals_111 = input_flat[x1_ix.add(y1_ix).add(z1_ix).detach()]

    xd = x - x0
    yd = y - y0
    zd = z - z0
    xm = 1 - xd
    ym = 1 - yd
    zm = 1 - zd

    x_mapped = (vals_000.mul(xm).mul(ym).mul(zm) +
                vals_100.mul(xd).mul(ym).mul(zm) +
                vals_010.mul(xm).mul(yd).mul(zm) +
                vals_001.mul(xm).mul(ym).mul(zd) +
                vals_101.mul(xd).mul(ym).mul(zd) +
                vals_011.mul(xm).mul(yd).mul(zd) +
                vals_110.mul(xd).mul(yd).mul(zm) +
                vals_111.mul(xd).mul(yd).mul(zd))

    return x_mapped.view_as(input)
开发者ID:BrianDo2005,项目名称:torchsample,代码行数:53,代码来源:affine.py


示例9: F_batch_trilinear_interp3d

def F_batch_trilinear_interp3d(input, coords):
    """
    input : torch.Tensor
        size = (N,H,W,C)
    coords : torch.Tensor
        size = (N,H*W*C,2)
    """
    x = torch.clamp(coords[:,:,0], 0, input.size(2)-2)
    x0 = x.floor()
    x1 = x0 + 1
    y = torch.clamp(coords[:,:,1], 0, input.size(3)-2)
    y0 = y.floor()
    y1 = y0 + 1
    z = torch.clamp(coords[:,:,2], 0, input.size(4)-2)
    z0 = z.floor()
    z1 = z0 + 1

    stride = torch.LongTensor(input.stride())
    x0_ix = x0.mul(stride[2]).long()
    x1_ix = x1.mul(stride[2]).long()
    y0_ix = y0.mul(stride[3]).long()
    y1_ix = y1.mul(stride[3]).long()
    z0_ix = z0.mul(stride[4]).long()
    z1_ix = z1.mul(stride[4]).long()

    input_flat = input.contiguous().view(input.size(0),-1)

    vals_000 = input_flat.gather(1,x0_ix.add(y0_ix).add(z0_ix).detach())
    vals_100 = input_flat.gather(1,x1_ix.add(y0_ix).add(z0_ix).detach())
    vals_010 = input_flat.gather(1,x0_ix.add(y1_ix).add(z0_ix).detach())
    vals_001 = input_flat.gather(1,x0_ix.add(y0_ix).add(z1_ix).detach())
    vals_101 = input_flat.gather(1,x1_ix.add(y0_ix).add(z1_ix).detach())
    vals_011 = input_flat.gather(1,x0_ix.add(y1_ix).add(z1_ix).detach())
    vals_110 = input_flat.gather(1,x1_ix.add(y1_ix).add(z0_ix).detach())
    vals_111 = input_flat.gather(1,x1_ix.add(y1_ix).add(z1_ix).detach())

    xd = x - x0
    yd = y - y0
    zd = z - z0
    xm = 1 - xd
    ym = 1 - yd
    zm = 1 - zd

    x_mapped = (vals_000.mul(xm).mul(ym).mul(zm) +
                vals_100.mul(xd).mul(ym).mul(zm) +
                vals_010.mul(xm).mul(yd).mul(zm) +
                vals_001.mul(xm).mul(ym).mul(zd) +
                vals_101.mul(xd).mul(ym).mul(zd) +
                vals_011.mul(xm).mul(yd).mul(zd) +
                vals_110.mul(xd).mul(yd).mul(zm) +
                vals_111.mul(xd).mul(yd).mul(zd))

    return x_mapped.view_as(input)
开发者ID:BrianDo2005,项目名称:torchsample,代码行数:53,代码来源:affine.py


示例10: encode

    def encode(self, x):

        # x = x.view(-1, 1, self.x_size, self.x_size)
        # print (x.shape)

        x = self.act_func(self.conv1(x))

        # print (x.shape)
        x = self.act_func(self.conv2(x))
        x = self.act_func(self.conv3(x))

        # print (x.size())

        x = x.view(-1, self.intermediate_size)

        h1 = self.act_func(self.fc1(x))
        h2 = self.fc2(h1)
        mean = h2[:,:self.z_size]
        logvar = h2[:,self.z_size:]

        #this solves the nan grad problem.
        logvar = torch.clamp(logvar, min=-20.)


        self.mean = mean
        self.logvar = logvar


        return mean, logvar
开发者ID:chriscremer,项目名称:Other_Code,代码行数:29,代码来源:vae_with_policy.py


示例11: calculate_distance_term

def calculate_distance_term(means, n_objects, delta_d, norm=2, usegpu=True):
    """means: bs, n_instances, n_filters"""

    bs, n_instances, n_filters = means.size()

    dist_term = 0.0
    for i in range(bs):
        _n_objects_sample = n_objects[i]

        if _n_objects_sample <= 1:
            continue

        _mean_sample = means[i, : _n_objects_sample, :]  # n_objects, n_filters
        means_1 = _mean_sample.unsqueeze(1).expand(
            _n_objects_sample, _n_objects_sample, n_filters)
        means_2 = means_1.permute(1, 0, 2)

        diff = means_1 - means_2  # n_objects, n_objects, n_filters

        _norm = torch.norm(diff, norm, 2)

        margin = 2 * delta_d * (1.0 - torch.eye(_n_objects_sample))
        if usegpu:
            margin = margin.cuda()
        margin = Variable(margin)

        _dist_term_sample = torch.sum(
            torch.clamp(margin - _norm, min=0.0) ** 2)
        _dist_term_sample = _dist_term_sample / \
            (_n_objects_sample * (_n_objects_sample - 1))
        dist_term += _dist_term_sample

    dist_term = dist_term / bs

    return dist_term
开发者ID:davnov134,项目名称:instance-segmentation-pytorch,代码行数:35,代码来源:discriminative.py


示例12: hinge_loss

def hinge_loss(positive_predictions, negative_predictions, mask=None):
    """
    Hinge pairwise loss function.

    Parameters
    ----------

    positive_predictions: tensor
        Tensor containing predictions for known positive items.
    negative_predictions: tensor
        Tensor containing predictions for sampled negative items.
    mask: tensor, optional
        A binary tensor used to zero the loss from some entries
        of the loss tensor.

    Returns
    -------

    loss, float
        The mean value of the loss function.
    """

    loss = torch.clamp(negative_predictions -
                       positive_predictions +
                       1.0, 0.0)

    if mask is not None:
        mask = mask.float()
        loss = loss * mask
        return loss.sum() / mask.sum()

    return loss.mean()
开发者ID:AlexMikhalev,项目名称:spotlight,代码行数:32,代码来源:losses.py


示例13: l2_pixel_loss

    def l2_pixel_loss(self, matches_b, non_matches_b, M_pixel=None):
        """
        Apply l2 loss in pixel space.

        This weights non-matches more if they are "far away" in pixel space.

        :param matches_b: A torch.LongTensor with shape torch.Shape([num_matches])
        :param non_matches_b: A torch.LongTensor with shape torch.Shape([num_non_matches])
        :return l2 loss per sample: A torch.FloatTensorof with shape torch.Shape([num_matches])
        """

        if M_pixel is None:
            M_pixel = self._config['M_pixel']

        num_non_matches_per_match = len(non_matches_b)/len(matches_b)

        ground_truth_pixels_for_non_matches_b = torch.t(matches_b.repeat(num_non_matches_per_match,1)).contiguous().view(-1,1)

        ground_truth_u_v_b = self.flattened_pixel_locations_to_u_v(ground_truth_pixels_for_non_matches_b)
        sampled_u_v_b      = self.flattened_pixel_locations_to_u_v(non_matches_b.unsqueeze(1))

        # each element is always within [0,1], you have 1 if you are at least M_pixel away in
        # L2 norm in pixel space
        norm_degree = 2
        squared_l2_pixel_loss = 1.0/M_pixel * torch.clamp((ground_truth_u_v_b - sampled_u_v_b).float().norm(norm_degree,1), max=M_pixel)


        return squared_l2_pixel_loss, ground_truth_u_v_b, sampled_u_v_b
开发者ID:shooter2062424,项目名称:pytorch-dense-correspondence,代码行数:28,代码来源:pixelwise_contrastive_loss.py


示例14: log_Bernoulli

def log_Bernoulli(x, mean, average=False, dim=None):
    probs = torch.clamp( mean, min=min_epsilon, max=max_epsilon )
    log_bernoulli = x * torch.log( probs ) + (1. - x ) * torch.log( 1. - probs )
    if average:
        return torch.mean( log_bernoulli, dim )
    else:
        return torch.sum( log_bernoulli, dim )
开发者ID:jramapuram,项目名称:vae_vampprior,代码行数:7,代码来源:distributions.py


示例15: pdist

    def pdist(self, fX):
        """Compute pdist à-la scipy.spatial.distance.pdist

        Parameters
        ----------
        fX : (n, d) torch.Tensor
            Embeddings.

        Returns
        -------
        distances : (n * (n-1) / 2,) torch.Tensor
            Condensed pairwise distance matrix
        """

        n_sequences, _ = fX.size()
        distances = []

        for i in range(n_sequences - 1):

            if self.metric in ('cosine', 'angular'):
                d = 1. - F.cosine_similarity(
                    fX[i, :].expand(n_sequences - 1 - i, -1),
                    fX[i+1:, :], dim=1, eps=1e-8)

                if self.metric == 'angular':
                    d = torch.acos(torch.clamp(1. - d, -1 + 1e-6, 1 - 1e-6))

            elif self.metric == 'euclidean':
                d = F.pairwise_distance(
                    fX[i, :].expand(n_sequences - 1 - i, -1),
                    fX[i+1:, :], p=2, eps=1e-06).view(-1)

            distances.append(d)

        return torch.cat(distances)
开发者ID:instinct2k18,项目名称:pyannote-audio,代码行数:35,代码来源:triplet_loss.py


示例16: _get_state_cost

    def _get_state_cost(self, worlds: List[WikiTablesWorld], state: CoverageState) -> torch.Tensor:
        if not state.is_finished():
            raise RuntimeError("_get_state_cost() is not defined for unfinished states!")
        world = worlds[state.batch_indices[0]]

        # Our checklist cost is a sum of squared error from where we want to be, making sure we
        # take into account the mask. We clamp the lower limit of the balance at 0 to avoid
        # penalizing agenda actions produced multiple times.
        checklist_balance = torch.clamp(state.checklist_state[0].get_balance(), min=0.0)
        checklist_cost = torch.sum((checklist_balance) ** 2)

        # This is the number of items on the agenda that we want to see in the decoded sequence.
        # We use this as the denotation cost if the path is incorrect.
        denotation_cost = torch.sum(state.checklist_state[0].checklist_target.float())
        checklist_cost = self._checklist_cost_weight * checklist_cost
        action_history = state.action_history[0]
        batch_index = state.batch_indices[0]
        action_strings = [state.possible_actions[batch_index][i][0] for i in action_history]
        logical_form = world.get_logical_form(action_strings)
        lisp_string = state.extras[batch_index]
        if self._executor.evaluate_logical_form(logical_form, lisp_string):
            cost = checklist_cost
        else:
            cost = checklist_cost + (1 - self._checklist_cost_weight) * denotation_cost
        return cost
开发者ID:apmoore1,项目名称:allennlp,代码行数:25,代码来源:wikitables_erm_semantic_parser.py


示例17: get_triplet_loss

    def get_triplet_loss(image_a_pred, image_b_pred, matches_a, matches_b, non_matches_a, non_matches_b, alpha):
        """
        Computes the loss function

        \sum_{triplets} ||D(I_a, u_a, I_b, u_{b,match})||_2^2 - ||D(I_a, u_a, I_b, u_{b,non-match)||_2^2 + alpha 

        """
        num_matches = matches_a.size()[0]
        num_non_matches = non_matches_a.size()[0]
        multiplier = num_non_matches / num_matches

        ## non_matches_a is already replicated up to be the right size
        ## non_matches_b is also that side
        ## matches_a is just a smaller version of non_matches_a
        ## matches_b is the only thing that needs to be replicated up in size

        matches_b_long =  torch.t(matches_b.repeat(multiplier, 1)).contiguous().view(-1)
                         
        matches_a_descriptors = torch.index_select(image_a_pred, 1, non_matches_a)
        matches_b_descriptors      = torch.index_select(image_b_pred, 1, matches_b_long)
        non_matches_b_descriptors  = torch.index_select(image_b_pred, 1, non_matches_b)

        triplet_losses = (matches_a_descriptors - matches_b_descriptors).pow(2) - (matches_a_descriptors - non_matches_b_descriptors).pow(2) + alpha
        triplet_loss = 1.0 / num_non_matches * torch.clamp(triplet_losses, min=0).sum()

        return triplet_loss
开发者ID:shooter2062424,项目名称:pytorch-dense-correspondence,代码行数:26,代码来源:pixelwise_contrastive_loss.py


示例18: project_to_2d

def project_to_2d(X, camera_params):
    """
    Project 3D points to 2D using the Human3.6M camera projection function.
    This is a differentiable and batched reimplementation of the original MATLAB script.
    
    Arguments:
    X -- 3D points in *camera space* to transform (N, *, 3)
    camera_params -- intrinsic parameteres (N, 2+2+3+2=9)
    """
    assert X.shape[-1] == 3
    assert len(camera_params.shape) == 2
    assert camera_params.shape[-1] == 9
    assert X.shape[0] == camera_params.shape[0]
    
    while len(camera_params.shape) < len(X.shape):
        camera_params = camera_params.unsqueeze(1)
        
    f = camera_params[..., :2]
    c = camera_params[..., 2:4]
    k = camera_params[..., 4:7]
    p = camera_params[..., 7:]
    
    XX = torch.clamp(X[..., :2] / X[..., 2:], min=-1, max=1)
    r2 = torch.sum(XX[..., :2]**2, dim=len(XX.shape)-1, keepdim=True)

    radial = 1 + torch.sum(k * torch.cat((r2, r2**2, r2**3), dim=len(r2.shape)-1), dim=len(r2.shape)-1, keepdim=True)
    tan = torch.sum(p*XX, dim=len(XX.shape)-1, keepdim=True)

    XXX = XX*(radial + tan) + p*r2
    
    return f*XXX + c
开发者ID:HenrryBryant,项目名称:VideoPose3D,代码行数:31,代码来源:camera.py


示例19: calculate_variance_term

def calculate_variance_term(pred, gt, means, n_objects, delta_v, norm=2):
    """pred: bs, height * width, n_filters
       gt: bs, height * width, n_instances
       means: bs, n_instances, n_filters"""

    bs, n_loc, n_filters = pred.size()
    n_instances = gt.size(2)

    # bs, n_loc, n_instances, n_filters
    means = means.unsqueeze(1).expand(bs, n_loc, n_instances, n_filters)
    # bs, n_loc, n_instances, n_filters
    pred = pred.unsqueeze(2).expand(bs, n_loc, n_instances, n_filters)
    # bs, n_loc, n_instances, n_filters
    gt = gt.unsqueeze(3).expand(bs, n_loc, n_instances, n_filters)

    _var = (torch.clamp(torch.norm((pred - means), norm, 3) -
                        delta_v, min=0.0) ** 2) * gt[:, :, :, 0]

    var_term = 0.0
    for i in range(bs):
        _var_sample = _var[i, :, :n_objects[i]]  # n_loc, n_objects
        _gt_sample = gt[i, :, :n_objects[i], 0]  # n_loc, n_objects

        var_term += torch.sum(_var_sample) / torch.sum(_gt_sample)
    var_term = var_term / bs

    return var_term
开发者ID:davnov134,项目名称:instance-segmentation-pytorch,代码行数:27,代码来源:discriminative.py


示例20: inference_net

 def inference_net(self, x):
     mean_logvar = self.image_encoder2(x)
     mean = mean_logvar[:,:6]
     logvar = mean_logvar[:,6:6*2]
     xenc = mean_logvar[:,6*2:]
     logvar = torch.clamp(logvar, min=-15., max=10.)
     return mean, logvar, xenc
开发者ID:chriscremer,项目名称:Other_Code,代码行数:7,代码来源:inference_net_grid.py



注:本文中的torch.clamp函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python torch.device函数代码示例发布时间:2022-05-27
下一篇:
Python torch.cat函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap