• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python math_ops.log函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.math_ops.log函数的典型用法代码示例。如果您正苦于以下问题:Python log函数的具体用法?Python log怎么用?Python log使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了log函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: log_loss

def log_loss(predictions, labels=None, weights=1.0, epsilon=1e-7, scope=None):
  """Adds a Log Loss term to the training procedure.

  `weights` acts as a coefficient for the loss. If a scalar is provided, then
  the loss is simply scaled by the given value. If `weights` is a tensor of size
  [batch_size], then the total loss for each sample of the batch is rescaled
  by the corresponding element in the `weights` vector. If the shape of
  `weights` matches the shape of `predictions`, then the loss of each
  measurable element of `predictions` is scaled by the corresponding value of
  `weights`.

  Args:
    predictions: The predicted outputs.
    labels: The ground truth output tensor, same dimensions as 'predictions'.
    weights: Coefficients for the loss a scalar, a tensor of shape
      [batch_size] or a tensor whose shape matches `predictions`.
    epsilon: A small increment to add to avoid taking a log of zero.
    scope: The scope for the operations performed in computing the loss.

  Returns:
    A scalar `Tensor` representing the loss value.

  Raises:
    ValueError: If the shape of `predictions` doesn't match that of `labels` or
      if the shape of `weights` is invalid.
  """
  with ops.name_scope(scope, "log_loss",
                      [predictions, labels, weights]) as scope:
    predictions.get_shape().assert_is_compatible_with(labels.get_shape())
    predictions = math_ops.to_float(predictions)
    labels = math_ops.to_float(labels)
    losses = -math_ops.multiply(
        labels, math_ops.log(predictions + epsilon)) - math_ops.multiply(
            (1 - labels), math_ops.log(1 - predictions + epsilon))
    return compute_weighted_loss(losses, weights, scope=scope)
开发者ID:ThunderQi,项目名称:tensorflow,代码行数:35,代码来源:loss_ops.py


示例2: _inverse_log_det_jacobian

  def _inverse_log_det_jacobian(self, y, use_saved_statistics=False):
    if not y.shape.is_fully_defined():
      raise ValueError("Input must have shape known at graph construction.")
    input_shape = np.int32(y.shape.as_list())

    if not self.batchnorm.built:
      # Create variables.
      self.batchnorm.build(input_shape)

    event_dims = self.batchnorm.axis
    reduction_axes = [i for i in range(len(input_shape)) if i not in event_dims]

    if use_saved_statistics or not self._training:
      log_variance = math_ops.log(
          self.batchnorm.moving_variance + self.batchnorm.epsilon)
    else:
      # At training-time, ildj is computed from the mean and log-variance across
      # the current minibatch.
      _, v = nn.moments(y, axes=reduction_axes, keepdims=True)
      log_variance = math_ops.log(v + self.batchnorm.epsilon)

    # `gamma` and `log Var(y)` reductions over event_dims.
    # Log(total change in area from gamma term).
    log_total_gamma = math_ops.reduce_sum(math_ops.log(self.batchnorm.gamma))

    # Log(total change in area from log-variance term).
    log_total_variance = math_ops.reduce_sum(log_variance)
    # The ildj is scalar, as it does not depend on the values of x and are
    # constant across minibatch elements.
    return log_total_gamma - 0.5 * log_total_variance
开发者ID:Albert-Z-Guo,项目名称:tensorflow,代码行数:30,代码来源:batch_normalization.py


示例3: _log_prob

 def _log_prob(self, x):
   y = (x - self.mu) / self.sigma
   half_df = 0.5 * self.df
   return (math_ops.lgamma(0.5 + half_df) - math_ops.lgamma(half_df) - 0.5 *
           math_ops.log(self.df) - 0.5 * math.log(math.pi) -
           math_ops.log(self.sigma) -
           (0.5 + half_df) * math_ops.log(1. + math_ops.square(y) / self.df))
开发者ID:kadeng,项目名称:tensorflow,代码行数:7,代码来源:student_t.py


示例4: _sample_n

  def _sample_n(self, n, seed=None):
    sample_shape = array_ops.concat(([n], array_ops.shape(self.logits)), 0)
    logits = self.logits * array_ops.ones(sample_shape)
    logits_2d = array_ops.reshape(logits, [-1, self.event_size])
    np_dtype = self.dtype.as_numpy_dtype

    # Uniform variates must be sampled from the interval (0,1] rather than
    # [0,1], as they are passed through log() to compute Gumbel variates.
    # We need to use np.finfo(np_dtype).tiny because it is the smallest,
    # positive, "normal" number.  A "normal" number is such that the mantissa
    # has an implicit leading 1.  Normal, positive numbers x, y have the
    # reasonable property that: x + y >= max(x, y).
    # minval=np.nextafter(np.float32(0),1)) can cause
    # tf.random_uniform(dtype=tf.float32) to sample 0.

    uniform = random_ops.random_uniform(shape=array_ops.shape(logits_2d),
                                        minval=np.finfo(np_dtype).tiny,
                                        maxval=1,
                                        dtype=self.dtype,
                                        seed=seed)
    gumbel = -math_ops.log(-math_ops.log(uniform))
    noisy_logits = math_ops.div(gumbel + logits_2d, self._temperature_2d)
    samples = nn_ops.log_softmax(noisy_logits)
    ret = array_ops.reshape(samples, sample_shape)
    return ret
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:25,代码来源:relaxed_onehot_categorical.py


示例5: log_prob

  def log_prob(self, x, name="log_prob"):
    """Log prob of observations in `x` under these Gamma distribution(s).

    Args:
      x: tensor of dtype `dtype`, must be broadcastable with `alpha` and `beta`.
      name: The name to give this op.

    Returns:
      log_prob: tensor of dtype `dtype`, the log-PDFs of `x`.

    Raises:
      TypeError: if `x` and `alpha` are different dtypes.
    """
    with ops.name_scope(self.name):
      with ops.op_scope([self._alpha, self._beta, x], name):
        alpha = self._alpha
        beta = self._beta
        x = ops.convert_to_tensor(x)
        x = control_flow_ops.with_dependencies(
            [check_ops.assert_positive(x)] if self.strict else [],
            x)
        contrib_tensor_util.assert_same_float_dtype(tensors=[x,],
                                                    dtype=self.dtype)

        return (alpha * math_ops.log(beta) + (alpha - 1) * math_ops.log(x) -
                beta * x - math_ops.lgamma(self._alpha))
开发者ID:31H0B1eV,项目名称:tensorflow,代码行数:26,代码来源:gamma.py


示例6: _BetaincGrad

def _BetaincGrad(op, grad):
  """Returns gradient of betainc(a, b, x) with respect to x."""
  # TODO(ebrevdo): Perhaps add the derivative w.r.t. a, b
  a, b, x = op.inputs

  # two cases: x is a scalar and a/b are same-shaped tensors, or vice
  # versa; so its sufficient to check against shape(a).
  sa = array_ops.shape(a)
  sx = array_ops.shape(x)
  # pylint: disable=protected-access
  _, rx = gen_array_ops._broadcast_gradient_args(sa, sx)
  # pylint: enable=protected-access

  # Perform operations in log space before summing, because terms
  # can grow large.
  log_beta = (
      gen_math_ops.lgamma(a) + gen_math_ops.lgamma(b) -
      gen_math_ops.lgamma(a + b))
  partial_x = math_ops.exp((b - 1) * math_ops.log(1 - x) +
                           (a - 1) * math_ops.log(x) - log_beta)

  # TODO(b/36815900): Mark None return values as NotImplemented
  return (
      None,  # da
      None,  # db
      array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx))
开发者ID:neuroradiology,项目名称:tensorflow,代码行数:26,代码来源:math_grad.py


示例7: compute_step

    def compute_step(x_val, geometric=False):
      if geometric:
        # Consider geometric series where t_mul != 1
        # 1 + t_mul + t_mul^2 ... = (1 - t_mul^i_restart) / (1 - t_mul)

        # First find how many restarts were performed for a given x_val
        # Find maximal integer i_restart value for which this equation holds
        # x_val >= (1 - t_mul^i_restart) / (1 - t_mul)
        # x_val * (1 - t_mul) <= (1 - t_mul^i_restart)
        # t_mul^i_restart <= (1 - x_val * (1 - t_mul))

        # tensorflow allows only log with base e
        # i_restart <= log(1 - x_val * (1 - t_mul) / log(t_mul)
        # Find how many restarts were performed

        i_restart = math_ops.floor(
            math_ops.log(c_one - x_val * (c_one - t_mul)) / math_ops.log(t_mul))
        # Compute the sum of all restarts before the current one
        sum_r = (c_one - t_mul ** i_restart) / (c_one - t_mul)
        # Compute our position within the current restart
        x_val = (x_val - sum_r) / t_mul ** i_restart

      else:
        # Find how many restarts were performed
        i_restart = math_ops.floor(x_val)
        # Compute our position within the current restart
        x_val = x_val - i_restart
      return i_restart, x_val
开发者ID:1000sprites,项目名称:tensorflow,代码行数:28,代码来源:sgdr_learning_rate_decay.py


示例8: log_prob

  def log_prob(self, counts, name="log_prob"):
    """`Log(P[counts])`, computed for every batch member.

    For each batch member of counts `k`, `P[counts]` is the probability that
    after sampling `n` draws from this Binomial distribution, the number of
    successes is `k`.  Note that different sequences of draws can result in the
    same counts, thus the probability includes a combinatorial coefficient.

    Args:
      counts:  Non-negative tensor with dtype `dtype` and whose shape can be
        broadcast with `self.p` and `self.n`. `counts` is only legal if it is
        less than or equal to `n` and its components are equal to integer
        values.
      name:  Name to give this Op, defaults to "log_prob".

    Returns:
      Log probabilities for each record, shape `[N1,...,Nm]`.
    """
    n = self._n
    p = self._p
    with ops.name_scope(self.name):
      with ops.name_scope(name, values=[self._n, self._p, counts]):
        counts = self._check_counts(counts)

        prob_prob = counts * math_ops.log(p) + (
            n - counts) * math_ops.log(1 - p)

        combinations = math_ops.lgamma(n + 1) - math_ops.lgamma(
            counts + 1) - math_ops.lgamma(n - counts + 1)
        log_prob = prob_prob + combinations
        return log_prob
开发者ID:alephman,项目名称:Tensorflow,代码行数:31,代码来源:binomial.py


示例9: _inverse

 def _inverse(self, y):
   y = self._maybe_assert_valid_y(y)
   if self.power == 0.:
     return math_ops.log(y)
   # If large y accuracy is an issue, consider using:
   # (y**self.power - 1.) / self.power when y >> 1.
   return math_ops.expm1(math_ops.log(y) * self.power) / self.power
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:7,代码来源:power_transform.py


示例10: _forward_log_det_jacobian

 def _forward_log_det_jacobian(self, x):
   x = self._maybe_assert_valid_x(x)
   return (
       -(x / self.scale) ** self.concentration +
       (self.concentration - 1) * math_ops.log(x) +
       math_ops.log(self.concentration) +
       -self.concentration * math_ops.log(self.scale))
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:7,代码来源:weibull.py


示例11: _sample_n

 def _sample_n(self, n, seed=None):
     shape = array_ops.concat(0, ([n], array_ops.shape(self.mean())))
     np_dtype = self.dtype.as_numpy_dtype()
     minval = np.nextafter(np_dtype(0), np_dtype(1))
     uniform = random_ops.random_uniform(shape=shape, minval=minval, maxval=1, dtype=self.dtype, seed=seed)
     sampled = -math_ops.log(-math_ops.log(uniform))
     return sampled * self.scale + self.loc
开发者ID:ppwwyyxx,项目名称:tensorflow,代码行数:7,代码来源:gumbel.py


示例12: logloss

def logloss(y_true, y_pred):
  y_pred = ops.convert_to_tensor(y_pred)
  y_true = math_ops.cast(y_true, y_pred.dtype)
  losses = math_ops.multiply(y_true, math_ops.log(y_pred + K.epsilon()))
  losses += math_ops.multiply((1 - y_true),
                              math_ops.log(1 - y_pred + K.epsilon()))
  return K.mean(-losses, axis=-1)
开发者ID:terrytangyuan,项目名称:tensorflow,代码行数:7,代码来源:losses.py


示例13: _phi

def _phi(r, order):
  """Coordinate-wise nonlinearity used to define the order of the interpolation.

  See https://en.wikipedia.org/wiki/Polyharmonic_spline for the definition.

  Args:
    r: input op
    order: interpolation order

  Returns:
    phi_k evaluated coordinate-wise on r, for k = r
  """

  # using EPSILON prevents log(0), sqrt0), etc.
  # sqrt(0) is well-defined, but its gradient is not
  with ops.name_scope('phi'):
    if order == 1:
      r = math_ops.maximum(r, EPSILON)
      r = math_ops.sqrt(r)
      return r
    elif order == 2:
      return 0.5 * r * math_ops.log(math_ops.maximum(r, EPSILON))
    elif order == 4:
      return 0.5 * math_ops.square(r) * math_ops.log(
          math_ops.maximum(r, EPSILON))
    elif order % 2 == 0:
      r = math_ops.maximum(r, EPSILON)
      return 0.5 * math_ops.pow(r, 0.5 * order) * math_ops.log(r)
    else:
      r = math_ops.maximum(r, EPSILON)
      return math_ops.pow(r, 0.5 * order)
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:31,代码来源:interpolate_spline.py


示例14: log_prob

  def log_prob(self, x, name="log_prob"):
    """`Log(P[counts])`, computed for every batch member.

    Args:
      x:  Non-negative floating point tensor whose shape can
        be broadcast with `self.a` and `self.b`.  For fixed leading
        dimensions, the last dimension represents counts for the corresponding
        Beta distribution in `self.a` and `self.b`. `x` is only legal if
        0 < x < 1.
      name:  Name to give this Op, defaults to "log_prob".

    Returns:
      Log probabilities for each record, shape `[N1,...,Nm]`.
    """
    a = self._a
    b = self._b
    with ops.name_scope(self.name):
      with ops.name_scope(name, values=[a, x]):
        x = self._check_x(x)

        unnorm_pdf = (a - 1) * math_ops.log(x) + (
            b - 1) * math_ops.log(1 - x)
        normalization_factor = -(math_ops.lgamma(a) + math_ops.lgamma(b)
                                 - math_ops.lgamma(a + b))
        log_prob = unnorm_pdf + normalization_factor

        return log_prob
开发者ID:JamesFysh,项目名称:tensorflow,代码行数:27,代码来源:beta.py


示例15: _kl_gamma_gamma

def _kl_gamma_gamma(g0, g1, name=None):
  """Calculate the batched KL divergence KL(g0 || g1) with g0 and g1 Gamma.

  Args:
    g0: instance of a Gamma distribution object.
    g1: instance of a Gamma distribution object.
    name: (optional) Name to use for created operations.
      Default is "kl_gamma_gamma".

  Returns:
    kl_gamma_gamma: `Tensor`. The batchwise KL(g0 || g1).
  """
  with ops.name_scope(name, "kl_gamma_gamma", values=[
      g0.concentration, g0.rate, g1.concentration, g1.rate]):
    # Result from:
    #   http://www.fil.ion.ucl.ac.uk/~wpenny/publications/densities.ps
    # For derivation see:
    #   http://stats.stackexchange.com/questions/11646/kullback-leibler-divergence-between-two-gamma-distributions   pylint: disable=line-too-long
    return (((g0.concentration - g1.concentration)
             * math_ops.digamma(g0.concentration))
            + math_ops.lgamma(g1.concentration)
            - math_ops.lgamma(g0.concentration)
            + g1.concentration * math_ops.log(g0.rate)
            - g1.concentration * math_ops.log(g1.rate)
            + g0.concentration * (g1.rate / g0.rate - 1.))
开发者ID:aritratony,项目名称:tensorflow,代码行数:25,代码来源:gamma.py


示例16: __init__

  def __init__(self,
               logits=None,
               p=None,
               dtype=dtypes.int32,
               validate_args=True,
               allow_nan_stats=False,
               name="Bernoulli"):
    """Construct Bernoulli distributions.

    Args:
      logits: An N-D `Tensor` representing the log-odds
        of a positive event. Each entry in the `Tensor` parametrizes
        an independent Bernoulli distribution where the probability of an event
        is sigmoid(logits).
      p: An N-D `Tensor` representing the probability of a positive
          event. Each entry in the `Tensor` parameterizes an independent
          Bernoulli distribution.
      dtype: dtype for samples.
      validate_args: Whether to assert that `0 <= p <= 1`. If not validate_args,
       `log_pmf` may return nans.
      allow_nan_stats:  Boolean, default False.  If False, raise an exception if
        a statistic (e.g. mean/mode/etc...) is undefined for any batch member.
        If True, batch members with valid parameters leading to undefined
        statistics will return NaN for this statistic.
      name: A name for this distribution.

    Raises:
      ValueError: If p and logits are passed, or if neither are passed.
    """
    self._allow_nan_stats = allow_nan_stats
    self._name = name
    self._dtype = dtype
    self._validate_args = validate_args
    check_op = check_ops.assert_less_equal
    if p is None and logits is None:
      raise ValueError("Must pass p or logits.")
    elif p is not None and logits is not None:
      raise ValueError("Must pass either p or logits, not both.")
    elif p is None:
      with ops.op_scope([logits], name):
        self._logits = array_ops.identity(logits, name="logits")
      with ops.name_scope(name):
        with ops.name_scope("p"):
          self._p = math_ops.sigmoid(self._logits)
    elif logits is None:
      with ops.name_scope(name):
        with ops.name_scope("p"):
          p = array_ops.identity(p)
          one = constant_op.constant(1., p.dtype)
          zero = constant_op.constant(0., p.dtype)
          self._p = control_flow_ops.with_dependencies(
              [check_op(p, one), check_op(zero, p)] if validate_args else [], p)
        with ops.name_scope("logits"):
          self._logits = math_ops.log(self._p) - math_ops.log(1. - self._p)
    with ops.name_scope(name):
      with ops.name_scope("q"):
        self._q = 1. - self._p
    self._batch_shape = array_ops.shape(self._logits)
    self._event_shape = array_ops.constant([], dtype=dtypes.int32)
开发者ID:2020zyc,项目名称:tensorflow,代码行数:59,代码来源:bernoulli.py


示例17: _inverse_log_det_jacobian

 def _inverse_log_det_jacobian(self, y):
   y = self._maybe_assert_valid(y)
   event_dims = self._event_dims_tensor(y)
   return math_ops.reduce_sum(
       math_ops.log(self.concentration1) + math_ops.log(self.concentration0) +
       (self.concentration1 - 1) * math_ops.log(y) +
       (self.concentration0 - 1) * math_ops.log1p(-y**self.concentration1),
       axis=event_dims)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:8,代码来源:kumaraswamy.py


示例18: _log_prob

 def _log_prob(self, x):
     x = control_flow_ops.with_dependencies([check_ops.assert_positive(x)] if self.validate_args else [], x)
     return (
         self.alpha * math_ops.log(self.beta)
         - math_ops.lgamma(self.alpha)
         - (self.alpha + 1.0) * math_ops.log(x)
         - self.beta / x
     )
开发者ID:kdavis-mozilla,项目名称:tensorflow,代码行数:8,代码来源:inverse_gamma.py


示例19: _inverse_log_det_jacobian

 def _inverse_log_det_jacobian(self, y):
   y = self._maybe_assert_valid_y(y)
   event_dims = self._event_dims_tensor(y)
   return math_ops.reduce_sum(
       -math_ops.log1p(-y) +
       (1 / self.concentration - 1) * math_ops.log(-math_ops.log1p(-y)) +
       math_ops.log(self.scale / self.concentration),
       axis=event_dims)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:8,代码来源:weibull.py


示例20: _log_prob

 def _log_prob(self, x):
   x = self._assert_valid_sample(x)
   log_unnormalized_prob = ((self.a - 1.) * math_ops.log(x) +
                            (self.b - 1.) * math_ops.log(1. - x))
   log_normalization = (math_ops.lgamma(self.a) +
                        math_ops.lgamma(self.b) -
                        math_ops.lgamma(self.a_b_sum))
   return log_unnormalized_prob - log_normalization
开发者ID:cg31,项目名称:tensorflow,代码行数:8,代码来源:beta.py



注:本文中的tensorflow.python.ops.math_ops.log函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python math_ops.log1p函数代码示例发布时间:2022-05-27
下一篇:
Python math_ops.linspace函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap