• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python numpy.sum函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中minpy.numpy.sum函数的典型用法代码示例。如果您正苦于以下问题:Python sum函数的具体用法?Python sum怎么用?Python sum使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了sum函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: softmax_loss

def softmax_loss(x, y):
    """
    Computes the loss and gradient for softmax classification.

    Inputs:
    - x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
      for the ith input.
    - y: Either of the followings:
      - One hot encoding of labels, of shape (N, C)
      - Label index of shape (N, ), each y[i] is the label of i^th example
        (0 <= y[i] < C)

    Returns a tuple of:
    - loss: Scalar giving the loss
    """
    N = x.shape[0]
    C = x.shape[1]
    if len(y.shape) == 1:
        #convert it to one hot encoding
        onehot_y = np.zeros([N, C])
        np.onehot_encode(y, onehot_y)
    else:
        onehot_y = y
    probs = x - np.max(x, axis=1, keepdims=True)
    loss = -np.sum(probs * onehot_y) / N
    loss += np.sum(np.log(np.sum(np.exp(probs), axis=1, keepdims=True))) / N
    return loss
开发者ID:ZihengJiang,项目名称:minpy,代码行数:27,代码来源:layers.py


示例2: svm_loss

def svm_loss(x, y, mode):
  """
  Computes the loss and gradient using for multiclass SVM classification.

  Inputs:
  - x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
    for the ith input.
  - y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
    0 <= y[i] < C

  Returns a tuple of:
  - loss: Scalar giving the loss
  - dx: Gradient of the loss with respect to x
  """
  if mode == 'cpu':
    np.set_policy(policy.OnlyNumpyPolicy())
  else:
    np.set_policy(policy.PreferMXNetPolicy())

  N = x.shape[0]
  correct_class_scores = x[np.arange(N), y]
  
  #margins = np.maximum(0, x - correct_class_scores[:, np.newaxis] + 1.0)
  margins = np.maximum(0, x - np.expand_dims(correct_class_scores, axis = 1) + 1.0)

  margins[np.arange(N), y] = 0
  loss = np.sum(margins) / N
  num_pos = np.sum(margins > 0, axis=1)
  dx = np.zeros_like(x)
  dx[margins > 0] = 1
  dx[np.arange(N), y] -= num_pos
  dx /= N

  return loss, dx
开发者ID:HrWangChengdu,项目名称:CS231n,代码行数:34,代码来源:layers_hack_layer_level.py


示例3: softmax_loss

def softmax_loss(x, y):
  """
  Computes the loss and gradient for softmax classification.

  Inputs:
  - x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
    for the ith input.
  - y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
    0 <= y[i] < C

  Returns a tuple of:
  - loss: Scalar giving the loss
  - dx: Gradient of the loss with respect to x
  """
  #np.expand_dims(correct_class_scores, axis = 1)
  #probs = np.exp(x - np.max(x, axis=1, keepdims=True))
  #print "x.shape", x.shape

  #Somehow Buggy. Max doesn't work.
  probs = np.exp(x - np.max(x, axis=1))
  #probs /= np.expand_dims(np.sum(probs, axis=1), axis = 1)
  probs /= np.expand_dims(np.sum(probs, axis=1), axis = 1)
  N = x.shape[0]
  loss = -np.sum(np.log(probs[np.arange(N), y])) / N

  dx = probs.copy()
  dx[np.arange(N), y] -= 1
  dx /= N

  return loss, dx
开发者ID:ZijiaLewisLu,项目名称:HeartDeep-Kaggle-DSB2,代码行数:30,代码来源:layers_test.py


示例4: svm_loss

def svm_loss(x, y):
  """
  Computes the loss and gradient using for multiclass SVM classification.

  Inputs:
  - x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
    for the ith input.
  - y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
    0 <= y[i] < C

  Returns a tuple of:
  - loss: Scalar giving the loss
  - dx: Gradient of the loss with respect to x
  """

  N = x.shape[0]
  correct_class_scores = x[np.arange(N), y]
  
  #TODO: Support broadcast case: (X,) (X, Y)
  #shape(x) is (d0, d1)
  #shape(correct_class_scores) is (d0,)
  #margins = np.maximum(0, x - correct_class_scores + 1.0)
  margins = np.transpose(np.maximum(0, np.transpose(x) - np.transpose(correct_class_scores) + 1.0))

  loss = (np.sum(margins) - np.sum(margins[np.arange(N), y])) / N

  return loss
开发者ID:ZijiaLewisLu,项目名称:HeartDeep-Kaggle-DSB2,代码行数:27,代码来源:layers.py


示例5: quick_grad_check

def quick_grad_check(fun, arg0, extra_args=(), kwargs={}, verbose=True,
                     eps=EPS, rtol=RTOL, atol=ATOL, rs=None):
    """Checks the gradient of a function (w.r.t. to its first arg) in a random direction"""

    if verbose:
        print("Checking gradient of {0} at {1}".format(fun, arg0))

    if rs is None:
        rs = nnp.random.RandomState()
    
    random_dir = rs.standard_normal(nnp.shape(arg0))
    random_dir = random_dir / nnp.sqrt(nnp.sum(random_dir * random_dir))
 
    if not extra_args == ():
      unary_fun = lambda x : fun(arg0 + x * random_dir, extra_args)
      numeric_grad = (unary_fun(eps/2) - unary_fun(-eps/2)) / eps
      analytic_grad = np.sum(grad(fun)(arg0, extra_args) * random_dir)
    else:
      unary_fun = lambda x : fun(arg0 + x * random_dir)
      numeric_grad = (unary_fun(eps/2) - unary_fun(-eps/2)) / eps
      analytic_grad = np.sum(grad(fun)(arg0) * random_dir)
  
    if isinstance(numeric_grad, minpy.array.Number):
        assert abs((analytic_grad - numeric_grad).get_data(None)) < atol and abs((analytic_grad - numeric_grad).get_data(None)) < abs((analytic_grad * rtol).get_data(None)), \
            "Check failed! nd={0}, ad={1}".format(numeric_grad, analytic_grad)
    elif isinstance(numeric_grad, minpy.array.Array):
        assert nnp.prod(nnp.shape(analytic_grad.asnumpy())[:]) == 1, "Currently only support check loss"
        assert abs((analytic_grad - numeric_grad).asnumpy()) < atol and abs((analytic_grad - numeric_grad).asnumpy()) < abs((analytic_grad * rtol).asnumpy()), \
            "Check failed! nd={0}, ad={1}".format(numeric_grad, analytic_grad)
    else:
        assert False
    if verbose:
        print("Gradient projection OK (numeric grad: {0}, analytic grad: {1})".format(
            numeric_grad, analytic_grad))
开发者ID:Vanova,项目名称:minpy,代码行数:34,代码来源:gradient_checker.py


示例6: softmax

def softmax(x, y):
    import numpy as np
    y = y.astype(int)
    probs = np.exp(x - np.max(x, axis=1, keepdims=True))
    probs /= np.sum(probs, axis=1, keepdims=True)
    N = x.shape[0]
    loss = -np.sum(np.log(probs[np.arange(N), y])) / N
    return loss
开发者ID:lryta,项目名称:minpy,代码行数:8,代码来源:test_customop.py


示例7: train_loss

 def train_loss(X, y, W1, W2, b1, b2):
     l1 = affine_relu_forward(X, W1, b1)
     l2 = affine_forward(l1, W2, b2)
     scores = l2
     if y:
         #[TODO]: softmax is not supported yet
         # loss, d_scores = softmax_loss(scores, y)
         loss = svm_loss(scores, y)
         loss_with_reg = loss + np.sum(W1**2) * 0.5 * self.reg + np.sum(
             W2**2) * 0.5 * self.reg
     return loss_with_reg
开发者ID:colinsongf,项目名称:minpy,代码行数:11,代码来源:fc_net_minpy.py


示例8: train_loss

    def train_loss(X, y, W1, W2, b1, b2):
      l1, l1_cache = affine_relu_forward(X, W1, b1)
      l2, l2_cache = affine_forward(l1, W2, b2)
      scores = l2 

      if y is None:
        return scores
   
      loss, d_scores = softmax_loss(scores, y)
      loss += np.sum(W1 ** 2) * 0.5 * self.reg
      loss += np.sum(W2 ** 2) * 0.5 * self.reg
      return loss
开发者ID:HrWangChengdu,项目名称:CS231n,代码行数:12,代码来源:fc_net_minpy.py


示例9: train_loss

    def train_loss(*args):
      inputs = args[0]
      softmax_label = args[1]
      probs = self.symbol_func(**self.make_mxnet_weight_dict(inputs, softmax_label, args[self.data_target_cnt:len(args)]))
      if softmax_label is None:
        return probs 

      samples_num = X.shape[0]
      targets = np.zeros((samples_num, self.num_classes))
      targets[np.arange(samples_num), softmax_label] = 1
      loss = -np.sum(targets * np.log(probs)) / samples_num
      for i in self.get_index_reg_weight():
        loss = loss + np.sum(0.5*args[i]**2*self.reg)

      return loss
开发者ID:ZijiaLewisLu,项目名称:HeartDeep-Kaggle-DSB2,代码行数:15,代码来源:cnn_minpy.py


示例10: affine_backward

def affine_backward(dout, cache):
  """
  Computes the backward pass for an affine layer.

  Inputs:
  - dout: Upstream derivative, of shape (N, M)
  - cache: Tuple of:
    - x: Input data, of shape (N, d_1, ... d_k)
    - w: Weights, of shape (D, M)

  Returns a tuple of:
  - dx: Gradient with respect to x, of shape (N, d1, ..., d_k)
  - dw: Gradient with respect to w, of shape (D, M)
  - db: Gradient with respect to b, of shape (M,)
  """
  x, w, b = cache
  x_plain = np.reshape(x, (x.shape[0], -1))

  db = np.sum(dout, axis=0)

  dx_plain = np.dot(dout, np.transpose(w))

  dx = np.reshape(dx_plain, x.shape)
  dw = np.dot(np.transpose(x_plain), dout)

  return dx, dw, db
开发者ID:ZijiaLewisLu,项目名称:HeartDeep-Kaggle-DSB2,代码行数:26,代码来源:layers_test.py


示例11: softmax_cross_entropy

def softmax_cross_entropy(prob, label):
    """
    Computes the cross entropy for softmax activation.

    Inputs:
    - prob: Probability, of shape (N, C) where x[i, j] is the probability for the jth class
      for the ith input.
    - label: Either of the followings:
      - One hot encoding of labels, of shape (N, C)
      - Label index of shape (N, ), each y[i] is the label of i^th example
        (0 <= y[i] < C)

    Returns a Value:
    - cross_entropy
    """

    N = prob.shape[0]
    C = prob.shape[1]
    if len(label.shape) == 1:
        #convert it to one hot encoding
        onehot_label = np.zeros([N, C])
        np.onehot_encode(label, onehot_label)
    else:
        onehot_label = label
    return -np.sum(np.log(prob) * onehot_label) / N
开发者ID:HrWangChengdu,项目名称:minpy,代码行数:25,代码来源:layers.py


示例12: check_accuracy

    def check_accuracy(self, dataiter, num_samples=None):
        """
        Check accuracy of the model on the provided data.

        Inputs:
        - dataiter: data iterator that can produce batches.
        - num_samples: If not None and dataiter has more than num_samples datapoints,
          subsample the data and only test the model on num_samples datapoints.

        Returns:
        - acc: Scalar giving the fraction of instances that were correctly
          classified by the model.
        """

        # Maybe subsample the data
        N = dataiter.num_data
        check_dataiter = dataiter
        if num_samples is not None and N > num_samples:
            # Sample a sub iter
            check_dataiter = dataiter.getsubiter(num_samples)
        else:
            # Use the entire dataiter otherwise.
            check_dataiter.reset()

        acc_count = 0
        num_samples = 0
        for each_batch in check_dataiter:
            predict = self.model.forward_batch(
                each_batch, mode='test').asnumpy()
            # TODO(minjie): multiple labels.
            acc_count += np.sum(
                np.argmax(
                    predict, axis=1) == each_batch.label[0])
            num_samples += check_dataiter.batch_size
        return float(acc_count.asnumpy()) / num_samples
开发者ID:ZihengJiang,项目名称:minpy,代码行数:35,代码来源:solver.py


示例13: log_likelihood

 def log_likelihood(weights, inputs, targets):
     logprobs = outputs(weights, inputs)
     loglik = 0.0
     num_time_steps, num_examples, _ = inputs.shape
     for t in range(num_time_steps):
         loglik += np.sum(logprobs[t] * targets[t])
     return loglik / (num_time_steps * num_examples)
开发者ID:pombredanne,项目名称:minpy,代码行数:7,代码来源:lstm.py


示例14: loss

    def loss(caffe_layer_specs, X, T):
        # original code:
        # log_prior = -L2_reg * np.dot(W_vect, W_vect)
        log_prior = 0
        for caffe_layer in caffe_layer_specs:
            log_prior += -L2_reg * np.dot(caffe_layer.get_learnable_params()[0], caffe_layer.get_learnable_params()[0])

        log_lik = np.sum(predictions(caffe_layer_specs, X) * T)
        return - log_prior - log_lik
开发者ID:pombredanne,项目名称:minpy,代码行数:9,代码来源:convnet_adv.py


示例15: grad

 def grad(g):
     import numpy as np
     y = label.astype(int)
     probs = np.exp(x - np.max(x, axis=1, keepdims=True))
     probs /= np.sum(probs, axis=1, keepdims=True)
     N = x.shape[0]
     dx = probs.copy()
     dx[np.arange(N), y] -= 1
     dx /= N
     return dx
开发者ID:lryta,项目名称:minpy,代码行数:10,代码来源:test_customop.py


示例16: softmax_cross_entropy

def softmax_cross_entropy(prob, label):
    N = prob.shape[0]
    C = prob.shape[1]
    if len(label.shape) == 1:
        #convert it to one hot encoding
        onehot_label = np.zeros([N, C])
        np.onehot_encode(label, onehot_label)
    else:
        onehot_label = label
    return -np.sum(np.log(prob) * onehot_label) / N
开发者ID:ZihengJiang,项目名称:minpy,代码行数:10,代码来源:layers.py


示例17: softmax_loss

def softmax_loss(x, y):
  """
  Computes the loss and gradient for softmax classification.

  Inputs:
  - x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
    for the ith input.
  - y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
    0 <= y[i] < C

  Returns a tuple of:
  - loss: Scalar giving the loss
  """
  #TODO: Missing Max Operator 
  probs = np.exp(x - np.expand_dims(np.max(x, axis=1), axis = 1))
  probs = probs / np.expand_dims(np.sum(probs, axis=1), axis = 1)
  N = x.shape[0]
  loss = -np.sum(np.log(probs[np.arange(N), y])) / N

  return loss
开发者ID:ZijiaLewisLu,项目名称:HeartDeep-Kaggle-DSB2,代码行数:20,代码来源:layers.py


示例18: l2_loss

def l2_loss(x, label):
    """
    The Mean Square Error loss for regression.
    """
    N = x.shape[0]
    C = x.shape[1]
    if len(label.shape) == 1:
        #convert it to one hot encoding
        onehot_label = np.zeros([N, C])
        np.onehot_encode(label, onehot_label)
    else:
        onehot_label = label
    return np.sum((x - onehot_label) ** 2) / N
开发者ID:ZihengJiang,项目名称:minpy,代码行数:13,代码来源:layers.py


示例19: temporal_softmax_loss

def temporal_softmax_loss(x, y, mask, verbose=False):
    """
    A temporal version of softmax loss for use in RNNs. We assume that we are
    making predictions over a vocabulary of size V for each timestep of a
    timeseries of length T, over a minibatch of size N. The input x gives scores
    for all vocabulary elements at all timesteps, and y gives the indices of the
    ground-truth element at each timestep. We use a cross-entropy loss at each
    timestep, summing the loss over all timesteps and averaging across the
    minibatch.

    As an additional complication, we may want to ignore the model output at some
    timesteps, since sequences of different length may have been combined into a
    minibatch and padded with NULL tokens. The optional mask argument tells us
    which elements should contribute to the loss.

    Inputs:
    - x: Input scores, of shape (N, T, V)
    - y: Ground-truth indices, of shape (N, T) where each element is in the range
       0 <= y[i, t] < V
    - mask: Boolean array of shape (N, T) where mask[i, t] tells whether or not
    the scores at x[i, t] should contribute to the loss.

    Returns a tuple of:
    - loss: Scalar giving loss
    - dx: Gradient of loss with respect to scores x.
    """
    N, T, V = x.shape

    x_flat = x.reshape(N * T, V)
    y_flat = y.reshape(N * T)
    mask_flat = mask.reshape(N * T)

    probs = np.exp(x_flat - np.max(x_flat, axis=1, keepdims=True))
    probs = probs / np.sum(probs, axis=1, keepdims=True)
    loss = -np.sum(mask_flat * np.log(probs[np.arange(N * T), y_flat])) / N

    return loss
开发者ID:ZihengJiang,项目名称:minpy,代码行数:37,代码来源:layers.py


示例20: check_accuracy

    def check_accuracy(self, dataiter, num_samples=None):
        """
        Check accuracy of the model on the provided data.

        Parameters
        ----------
        dataiter
            data iterator that can produce batches.
        num_samples
            If not None and dataiter has more than num_samples datapoints,
            subsample the data and only test the model on num_samples datapoints.

        Returns
        -------
        acc
            Scalar giving the fraction of instances that were correctly
            classified by the model.
        """
        # Maybe subsample the data
        N = dataiter.num_data
        check_dataiter = dataiter
        if num_samples is not None and N > num_samples:
            # Sample a sub iter
            check_dataiter = dataiter.getsubiter(num_samples)
        else:
            # Use the entire dataiter otherwise.
            check_dataiter.reset()

        if self.task_type is 'classification':
            acc_count = 0
            num_samples = 0
            for each_batch in check_dataiter:
                predict = self.model.forward_batch(each_batch, mode='test').asnumpy()
                # TODO(minjie): multiple labels.
                acc_count += np.sum(np.argmax(predict, axis=1) == each_batch.label[0])
                num_samples += check_dataiter.batch_size
            return float(acc_count.asnumpy()) / num_samples
        elif self.task_type is 'regression':
            loss = 0
            batch_count = 0
            for each_batch in check_dataiter:
                predict = self.model.forward_batch(each_batch, mode='test').asnumpy()
                loss += self.model.loss(predict, each_batch.label[0])
                batch_count += 1
            return float(loss.asnumpy()) / batch_count
        else:
            raise ValueError('Task type is either classification or regression.')
开发者ID:HrWangChengdu,项目名称:minpy,代码行数:47,代码来源:solver.py



注:本文中的minpy.numpy.sum函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python numpy.zeros函数代码示例发布时间:2022-05-27
下一篇:
Python minitwisted.ThreadedReactor类代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap