• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensorflow.assert_positive函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.assert_positive函数的典型用法代码示例。如果您正苦于以下问题:Python assert_positive函数的具体用法?Python assert_positive怎么用?Python assert_positive使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了assert_positive函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: kl_multivariate_normal

def kl_multivariate_normal(loc_one, scale_one, loc_two=0.0, scale_two=1.0):
    """Calculate the KL of multivariate normal distributions with
    diagonal covariances.

    Parameters
    ----------
    loc_one : tf.Tensor
        A 0-D tensor, 1-D tensor of length n, or 2-D tensor of shape M
        x n where each row represents the mean of a n-dimensional
        Gaussian.
    scale_one : tf.Tensor
        A tensor of same shape as ``loc_one``, representing the
        standard deviation.
    loc_two : tf.Tensor, optional
        A tensor of same shape as ``loc_one``, representing the
        mean of another Gaussian.
    scale_two : tf.Tensor, optional
        A tensor of same shape as ``loc_one``, representing the
        standard deviation of another Gaussian.

    Returns
    -------
    tf.Tensor
        For 0-D or 1-D tensor inputs, outputs the 0-D tensor
        ``KL( N(z; loc_one, scale_one) || N(z; loc_two, scale_two) )``
        For 2-D tensor inputs, outputs the 1-D tensor
        ``[KL( N(z; loc_one[m,:], scale_one[m,:]) || N(z; loc_two[m,:], scale_two[m,:]) )]_{m=1}^M``

    Raises
    ------
    InvalidArgumentError
        If the location variables have Inf or NaN values, or if the scale
        variables are not positive.
    """
    dependencies = [tf.verify_tensor_all_finite(loc_one, msg=''),
                    tf.verify_tensor_all_finite(loc_two, msg=''),
                    tf.assert_positive(scale_one),
                    tf.assert_positive(scale_two)]
    loc_one = control_flow_ops.with_dependencies(dependencies, loc_one)
    scale_one = control_flow_ops.with_dependencies(dependencies, scale_one)
    loc_one = tf.cast(loc_one, tf.float32)
    scale_one = tf.cast(scale_one, tf.float32)

    if loc_two == 0.0 and scale_two == 1.0:
        # With default arguments, we can avoid some intermediate computation.
        out = tf.square(scale_one) + tf.square(loc_one) - \
              1.0 - 2.0 * tf.log(scale_one)
    else:
        loc_two = control_flow_ops.with_dependencies(dependencies, loc_two)
        scale_two = control_flow_ops.with_dependencies(dependencies, scale_two)
        loc_two = tf.cast(loc_two, tf.float32)
        scale_two = tf.cast(scale_two, tf.float32)
        out = tf.square(scale_one/scale_two) + \
              tf.square((loc_two - loc_one)/scale_two) - \
              1.0 + 2.0 * tf.log(scale_two) - 2.0 * tf.log(scale_one)

    if len(out.get_shape()) <= 1: # scalar or vector
        return 0.5 * tf.reduce_sum(out)
    else: # matrix
        return 0.5 * tf.reduce_sum(out, 1)
开发者ID:TalkingData,项目名称:edward,代码行数:60,代码来源:util.py


示例2: __init__

  def __init__(self,
               concentration,
               rate,
               validate_args=False,
               allow_nan_stats=True,
               name="InverseGamma"):
    """Construct InverseGamma with `concentration` and `rate` parameters.

    The parameters `concentration` and `rate` must be shaped in a way that
    supports broadcasting (e.g. `concentration + rate` is a valid operation).

    Args:
      concentration: Floating point tensor, the concentration params of the
        distribution(s). Must contain only positive values.
      rate: Floating point tensor, the inverse scale params of the
        distribution(s). Must contain only positive values.
      validate_args: Python `bool`, default `False`. When `True` distribution
        parameters are checked for validity despite possibly degrading runtime
        performance. When `False` invalid inputs may silently render incorrect
        outputs.
      allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
        (e.g., mean, mode, variance) use the value "`NaN`" to indicate the
        result is undefined. When `False`, an exception is raised if one or
        more of the statistic's batch members are undefined.
      name: Python `str` name prefixed to Ops created by this class.


    Raises:
      TypeError: if `concentration` and `rate` are different dtypes.
    """
    parameters = dict(locals())
    with tf.name_scope(name, values=[concentration, rate]) as name:
      dtype = dtype_util.common_dtype([concentration, rate],
                                      preferred_dtype=tf.float32)
      concentration = tf.convert_to_tensor(
          concentration, name="concentration", dtype=dtype)
      rate = tf.convert_to_tensor(rate, name="rate", dtype=dtype)
      with tf.control_dependencies([
          tf.assert_positive(
              concentration,
              message="Concentration must be positive."),
          tf.assert_positive(
              rate,
              message="Rate must be positive."),
      ] if validate_args else []):
        self._concentration = tf.identity(concentration, name="concentration")
        self._rate = tf.identity(rate, name="rate")
      tf.assert_same_float_dtype([self._concentration, self._rate])

    super(InverseGamma, self).__init__(
        dtype=self._concentration.dtype,
        validate_args=validate_args,
        allow_nan_stats=allow_nan_stats,
        reparameterization_type=reparameterization.FULLY_REPARAMETERIZED,
        parameters=parameters,
        graph_parents=[self._concentration, self._rate],
        name=name)
开发者ID:asudomoeva,项目名称:probability,代码行数:57,代码来源:inverse_gamma.py


示例3: rbf

def rbf(X, X2=None, lengthscale=1.0, variance=1.0):
  """Radial basis function kernel, also known as the squared
  exponential or exponentiated quadratic. It is defined as

  $k(x, x') = \sigma^2 \exp\Big(
      -\\frac{1}{2} \sum_{d=1}^D \\frac{1}{\ell_d^2} (x_d - x'_d)^2 \Big)$

  for output variance $\sigma^2$ and lengthscale $\ell^2$.

  The kernel is evaluated over all pairs of rows, `k(X[i, ], X2[j, ])`.
  If `X2` is not specified, then it evaluates over all pairs
  of rows in `X`, `k(X[i, ], X[j, ])`. The output is a matrix
  where each entry (i, j) is the kernel over the ith and jth rows.

  Args:
    X: tf.Tensor.
      N x D matrix of N data points each with D features.
    X2: tf.Tensor.
      N x D matrix of N data points each with D features.
    lengthscale: tf.Tensor.
      Lengthscale parameter, a positive scalar or D-dimensional vector.
    variance: tf.Tensor.
      Output variance parameter, a positive scalar.

  #### Examples

  ```python
  X = tf.random_normal([100, 5])
  K = ed.rbf(X)
  assert K.shape == (100, 100)
  ```
  """
  lengthscale = tf.convert_to_tensor(lengthscale)
  variance = tf.convert_to_tensor(variance)
  dependencies = [tf.assert_positive(lengthscale),
                  tf.assert_positive(variance)]
  lengthscale = control_flow_ops.with_dependencies(dependencies, lengthscale)
  variance = control_flow_ops.with_dependencies(dependencies, variance)

  X = tf.convert_to_tensor(X)
  X = X / lengthscale
  Xs = tf.reduce_sum(tf.square(X), 1)
  if X2 is None:
    X2 = X
    X2s = Xs
  else:
    X2 = tf.convert_to_tensor(X2)
    X2 = X2 / lengthscale
    X2s = tf.reduce_sum(tf.square(X2), 1)

  square = tf.reshape(Xs, [-1, 1]) + tf.reshape(X2s, [1, -1]) - \
      2 * tf.matmul(X, X2, transpose_b=True)
  output = variance * tf.exp(-square / 2)
  return output
开发者ID:JoyceYa,项目名称:edward,代码行数:54,代码来源:tensorflow.py


示例4: _validate

 def _validate(self):
   vops = [tf.assert_positive(self._scale),
           tf.assert_positive(self._high - self._low),
           tf.verify_tensor_all_finite(self._high,
                                       "Upper bound not finite"),
           tf.verify_tensor_all_finite(self._low,
                                       "Lower bound not finite"),
           tf.verify_tensor_all_finite(self._loc,
                                       "Loc not finite"),
           tf.verify_tensor_all_finite(self._scale,
                                       "Scale not finite"),
          ]
   return tf.group(*vops, name="ValidationOps")
开发者ID:lewisKit,项目名称:probability,代码行数:13,代码来源:truncated_normal.py


示例5: kl_multivariate_normal

def kl_multivariate_normal(loc_one, scale_one, loc_two=0.0, scale_two=1.0):
    """Calculate the KL of multivariate normal distributions with
    diagonal covariances.

    Parameters
    ----------
    loc_one : tf.Tensor
        n-dimensional vector, or M x n-dimensional matrix where each
        row represents the mean of a n-dimensional Gaussian
    scale_one : tf.Tensor
        n-dimensional vector, or M x n-dimensional matrix where each
        row represents the standard deviation of a n-dimensional Gaussian
    loc_two : tf.Tensor, optional
        n-dimensional vector, or M x n-dimensional matrix where each
        row represents the mean of a n-dimensional Gaussian
    scale_two : tf.Tensor, optional
        n-dimensional vector, or M x n-dimensional matrix where each
        row represents the standard deviation of a n-dimensional Gaussian

    Returns
    -------
    tf.Tensor
        for scalar or vector inputs, outputs the scalar
        ``KL( N(z; loc_one, scale_one) || N(z; loc_two, scale_two) )``
        for matrix inputs, outputs the vector
        ``[KL( N(z; loc_one[m,:], scale_one[m,:]) || N(z; loc_two[m,:], scale_two[m,:]) )]_{m=1}^M``

    Raises
    ------
    InvalidArgumentError
        If the location variables have Inf or NaN values, or if the scale
        variables are not positive.
    """
    dependencies = [tf.verify_tensor_all_finite(loc_one, msg=''),
                  tf.verify_tensor_all_finite(loc_two, msg=''),
                  tf.assert_positive(scale_one),
                  tf.assert_positive(scale_two)]
    loc_one = control_flow_ops.with_dependencies(dependencies, loc_one)
    loc_two = control_flow_ops.with_dependencies(dependencies, loc_two)
    scale_one = control_flow_ops.with_dependencies(dependencies, scale_one)
    scale_two = control_flow_ops.with_dependencies(dependencies, scale_two)

    if loc_two == 0.0 and scale_two == 1.0:
        return 0.5 * tf.reduce_sum(
            tf.square(scale_one) + tf.square(loc_one) - \
            1.0 - 2.0 * tf.log(scale_one))
    else:
        return 0.5 * tf.reduce_sum(
            tf.square(scale_one/scale_two) + \
            tf.square((loc_two - loc_one)/scale_two) - \
            1.0 + 2.0 * tf.log(scale_two) - 2.0 * tf.log(scale_one), 1)
开发者ID:leezqcst,项目名称:edward,代码行数:51,代码来源:util.py


示例6: ptb_producer

def ptb_producer(raw_data, batch_size, num_steps, name=None):
    
    with tf.name_scope(name, "PTBProducer", [raw_data, batch_size, num_steps]):
        raw_data = tf.convert_to_tensor(raw_data, 
                                        dtype=tf.int32, name="raw_data")
        data_len = tf.size(raw_data)
        batch_len = data_len // batch_size
        data = tf.reshape(raw_data[0: batch_len*batch_size],
                          [batch_size, batch_len])
        epoch_size = (batch_len-1) // num_steps
        assertion = tf.assert_positive(
                epoch_size,
                message="batch size too large")
        
        with tf.control_dependencies([assertion]):
            epoch_size = tf.identity(epoch_size, name="epoch_size")
        
        i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()
        x = tf.strided_slice(data, [0, i*num_steps],
                             [batch_size, (i+1)*num_steps])
        
        x.set_shape([batch_size, num_steps])
        y = tf.strided_slice(data, [0, i*num_steps+1],
                             [batch_size, (i+1)*num_steps+1])
        y.set_shape([batch_size, num_steps])
        return x, y
开发者ID:lacozhang,项目名称:torchcode,代码行数:26,代码来源:embed.py


示例7: _maybe_assert_valid_x

 def _maybe_assert_valid_x(self, x):
   if not self.validate_args:
     return x
   is_valid = tf.assert_positive(
       x[..., 1:] - x[..., :-1],
       message="Forward transformation input must be strictly increasing.")
   return control_flow_ops.with_dependencies([is_valid], x)
开发者ID:asudomoeva,项目名称:probability,代码行数:7,代码来源:ordered.py


示例8: enqueuer

def enqueuer(raw_data, batch_size, num_steps, name=None):
    """Iterate on the raw PTB data.
    This chunks up raw_data into batches of examples and returns Tensors that
    are drawn from these batches.
    Args:
    raw_data: one of the raw data outputs from ptb_raw_data.
    batch_size: int, the batch size.
    num_steps: int, the number of unrolls.
    name: the name of this operation (optional).
    Returns:
    A pair of Tensors, each shaped [batch_size, num_steps]. The second element
    of the tuple is the same data time-shifted to the right by one.
    Raises:
    tf.errors.InvalidArgumentError: if batch_size or num_steps are too high.
    """
    with tf.name_scope(name, "InputEnqueuer", [raw_data, batch_size, num_steps]):
        raw_data = tf.convert_to_tensor(raw_data, name = "raw_data", dtype = tf.int32)

        data_len = tf.size(raw_data)
        batch_len = data_len // batch_size
        data = tf.reshape(raw_data[0 : batch_size * batch_len],
                          [batch_size, batch_len])

        epoch_size = (batch_len - 1) // num_steps
        assertion = tf.assert_positive(epoch_size, message = "epoch_size == 0, decrease batch_size or num_steps")
    with tf.control_dependencies([assertion]):
        epoch_size = tf.identity(epoch_size, name = "epoch_size")

    i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue() # output queue index from the queue object (relies on tf.train.Supervisor)
    x = tf.slice(data, [0, i * num_steps], [batch_size, num_steps]) # slice data
    y = tf.slice(data, [0, i * num_steps + 1], [batch_size, num_steps])
    return x, y
开发者ID:alex-ten,项目名称:PDP,代码行数:32,代码来源:reader.py


示例9: test_raises_when_zero

 def test_raises_when_zero(self):
   with self.test_session():
     meechum = tf.constant([0], name="meechum")
     with tf.control_dependencies([tf.assert_positive(meechum)]):
       out = tf.identity(meechum)
     with self.assertRaisesOpError("meechum"):
       out.eval()
开发者ID:3kwa,项目名称:tensorflow,代码行数:7,代码来源:check_ops_test.py


示例10: check_3d_image

def check_3d_image(image, require_static=True):
  """Assert that we are working with properly shaped image.

  Args:
    image: 3-D Tensor of shape [height, width, channels]
    require_static: If `True`, requires that all dimensions of `image` are
      known and non-zero.

  Raises:
    ValueError: if `image.shape` is not a 3-vector.

  Returns:
    An empty list, if `image` has fully defined dimensions. Otherwise, a list
    containing an assert op is returned.
  """
  try:
    image_shape = image.get_shape().with_rank(3)
  except ValueError:
    raise ValueError("'image' must be three-dimensional.")
  if require_static and not image_shape.is_fully_defined():
    raise ValueError("'image' must be fully defined.")
  if any(x == 0 for x in image_shape):
    raise ValueError("all dims of 'image.shape' must be > 0: %s" %
                     image_shape)
  if not image_shape.is_fully_defined():
    return [tf.assert_positive(tf.shape(image),
                                      ["all dims of 'image.shape' "
                                       "must be > 0."])]
  else:
    return []
开发者ID:tigercut,项目名称:MobileNet,代码行数:30,代码来源:mobilenetdet_preprocessing.py


示例11: _maybe_assert_valid_sample

 def _maybe_assert_valid_sample(self, x):
   tf.assert_same_float_dtype(tensors=[x], dtype=self.dtype)
   if not self.validate_args:
     return x
   return control_flow_ops.with_dependencies([
       tf.assert_positive(x),
   ], x)
开发者ID:asudomoeva,项目名称:probability,代码行数:7,代码来源:inverse_gamma.py


示例12: reduce_mean

def reduce_mean(seq_batch, allow_empty=False):
    """Compute the mean of each sequence in a SequenceBatch.

    Args:
        seq_batch (SequenceBatch): a SequenceBatch with the following attributes:
            values (Tensor): a Tensor of shape (batch_size, seq_length, :, ..., :)
            mask (Tensor): if the mask values are arbitrary floats (rather than binary), the mean will be
            a weighted average.
        allow_empty (bool): allow computing the average of an empty sequence. In this case, we assume 0/0 == 0, rather
            than NaN. Default is False, causing an error to be thrown.

    Returns:
        Tensor: of shape (batch_size, :, ..., :)
    """
    values, mask = seq_batch.values, seq_batch.mask
    # compute weights for the average
    sums = tf.reduce_sum(mask, 1, keep_dims=True)  # (batch_size, 1)

    if allow_empty:
        asserts = []  # no assertion
        sums = tf.select(tf.equal(sums, 0), tf.ones(tf.shape(sums)), sums)  # replace 0's with 1's
    else:
        asserts = [tf.assert_positive(sums)]  # throw error if 0's exist

    with tf.control_dependencies(asserts):
        weights = mask / sums  # (batch_size, seq_length)
    return weighted_sum(seq_batch, weights)
开发者ID:siddk,项目名称:lang2program,代码行数:27,代码来源:seq_batch.py


示例13: __init__

  def __init__(self,
               scale,
               validate_args=False,
               allow_nan_stats=True,
               name="HalfNormal"):
    """Construct HalfNormals with scale `scale`.

    Args:
      scale: Floating point tensor; the scales of the distribution(s).
        Must contain only positive values.
      validate_args: Python `bool`, default `False`. When `True` distribution
        parameters are checked for validity despite possibly degrading runtime
        performance. When `False` invalid inputs may silently render incorrect
        outputs.
      allow_nan_stats: Python `bool`, default `True`. When `True`,
        statistics (e.g., mean, mode, variance) use the value "`NaN`" to
        indicate the result is undefined. When `False`, an exception is raised
        if one or more of the statistic's batch members are undefined.
      name: Python `str` name prefixed to Ops created by this class.
    """
    parameters = dict(locals())
    with tf.name_scope(name, values=[scale]) as name:
      with tf.control_dependencies([tf.assert_positive(scale)]
                                   if validate_args else []):
        self._scale = tf.identity(scale, name="scale")
    super(HalfNormal, self).__init__(
        dtype=self._scale.dtype,
        reparameterization_type=tf.distributions.FULLY_REPARAMETERIZED,
        validate_args=validate_args,
        allow_nan_stats=allow_nan_stats,
        parameters=parameters,
        graph_parents=[self._scale],
        name=name)
开发者ID:lewisKit,项目名称:probability,代码行数:33,代码来源:half_normal.py


示例14: test_raises_when_negative

 def test_raises_when_negative(self):
   with self.test_session():
     freddie = tf.constant([-1, -2], name="freddie")
     with tf.control_dependencies([tf.assert_positive(freddie)]):
       out = tf.identity(freddie)
     with self.assertRaisesOpError("freddie"):
       out.eval()
开发者ID:3kwa,项目名称:tensorflow,代码行数:7,代码来源:check_ops_test.py


示例15: __init__

  def __init__(self,
               loc=0.,
               scale=1.,
               validate_args=False,
               name="gumbel"):
    """Instantiates the `Gumbel` bijector.

    Args:
      loc: Float-like `Tensor` that is the same dtype and is
        broadcastable with `scale`.
        This is `loc` in `Y = g(X) = exp(-exp(-(X - loc) / scale))`.
      scale: Positive Float-like `Tensor` that is the same dtype and is
        broadcastable with `loc`.
        This is `scale` in `Y = g(X) = exp(-exp(-(X - loc) / scale))`.
      validate_args: Python `bool` indicating whether arguments should be
        checked for correctness.
      name: Python `str` name given to ops managed by this object.
    """
    self._graph_parents = []
    self._name = name
    self._validate_args = validate_args
    with self._name_scope("init", values=[loc, scale]):
      self._loc = tf.convert_to_tensor(loc, name="loc")
      self._scale = tf.convert_to_tensor(scale, name="scale")
      tf.assert_same_float_dtype([self._loc, self._scale])
      if validate_args:
        self._scale = control_flow_ops.with_dependencies([
            tf.assert_positive(
                self._scale, message="Argument scale was not positive")
        ], self._scale)

    super(Gumbel, self).__init__(
        validate_args=validate_args,
        forward_min_event_ndims=0,
        name=name)
开发者ID:lewisKit,项目名称:probability,代码行数:35,代码来源:gumbel.py


示例16: calculate_reshape

def calculate_reshape(original_shape, new_shape, validate=False, name=None):
  """Calculates the reshaped dimensions (replacing up to one -1 in reshape)."""
  batch_shape_static = tensor_util.constant_value_as_shape(new_shape)
  if batch_shape_static.is_fully_defined():
    return np.int32(batch_shape_static.as_list()), batch_shape_static, []
  with tf.name_scope(name, "calculate_reshape", [original_shape, new_shape]):
    original_size = tf.reduce_prod(original_shape)
    implicit_dim = tf.equal(new_shape, -1)
    size_implicit_dim = (
        original_size // tf.maximum(1, -tf.reduce_prod(new_shape)))
    new_ndims = tf.shape(new_shape)
    expanded_new_shape = tf.where(  # Assumes exactly one `-1`.
        implicit_dim, tf.fill(new_ndims, size_implicit_dim), new_shape)
    validations = [] if not validate else [
        tf.assert_rank(
            original_shape, 1, message="Original shape must be a vector."),
        tf.assert_rank(new_shape, 1, message="New shape must be a vector."),
        tf.assert_less_equal(
            tf.count_nonzero(implicit_dim, dtype=tf.int32),
            1,
            message="At most one dimension can be unknown."),
        tf.assert_positive(
            expanded_new_shape, message="Shape elements must be >=-1."),
        tf.assert_equal(
            tf.reduce_prod(expanded_new_shape),
            original_size,
            message="Shape sizes do not match."),
    ]
    return expanded_new_shape, batch_shape_static, validations
开发者ID:lewisKit,项目名称:probability,代码行数:29,代码来源:batch_reshape.py


示例17: __init__

  def __init__(self,
               total_count,
               logits=None,
               probs=None,
               validate_args=False,
               allow_nan_stats=True,
               name="NegativeBinomial"):
    """Construct NegativeBinomial distributions.

    Args:
      total_count: Non-negative floating-point `Tensor` with shape
        broadcastable to `[B1,..., Bb]` with `b >= 0` and the same dtype as
        `probs` or `logits`. Defines this as a batch of `N1 x ... x Nm`
        different Negative Binomial distributions. In practice, this represents
        the number of negative Bernoulli trials to stop at (the `total_count`
        of failures), but this is still a valid distribution when
        `total_count` is a non-integer.
      logits: Floating-point `Tensor` with shape broadcastable to
        `[B1, ..., Bb]` where `b >= 0` indicates the number of batch dimensions.
        Each entry represents logits for the probability of success for
        independent Negative Binomial distributions and must be in the open
        interval `(-inf, inf)`. Only one of `logits` or `probs` should be
        specified.
      probs: Positive floating-point `Tensor` with shape broadcastable to
        `[B1, ..., Bb]` where `b >= 0` indicates the number of batch dimensions.
        Each entry represents the probability of success for independent
        Negative Binomial distributions and must be in the open interval
        `(0, 1)`. Only one of `logits` or `probs` should be specified.
      validate_args: Python `bool`, default `False`. When `True` distribution
        parameters are checked for validity despite possibly degrading runtime
        performance. When `False` invalid inputs may silently render incorrect
        outputs.
      allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
        (e.g., mean, mode, variance) use the value "`NaN`" to indicate the
        result is undefined. When `False`, an exception is raised if one or
        more of the statistic's batch members are undefined.
      name: Python `str` name prefixed to Ops created by this class.
    """

    parameters = dict(locals())
    with tf.name_scope(name, values=[total_count, logits, probs]) as name:
      dtype = dtype_util.common_dtype([total_count, logits, probs],
                                      preferred_dtype=tf.float32)
      self._logits, self._probs = distribution_util.get_logits_and_probs(
          logits, probs, validate_args=validate_args, name=name, dtype=dtype)
      total_count = tf.convert_to_tensor(
          total_count, name="total_count", dtype=dtype)
      with tf.control_dependencies([tf.assert_positive(total_count)]
                                   if validate_args else []):
        self._total_count = tf.identity(total_count, name="total_count")

    super(NegativeBinomial, self).__init__(
        dtype=self._probs.dtype,
        reparameterization_type=reparameterization.NOT_REPARAMETERIZED,
        validate_args=validate_args,
        allow_nan_stats=allow_nan_stats,
        parameters=parameters,
        graph_parents=[self._total_count, self._probs, self._logits],
        name=name)
开发者ID:asudomoeva,项目名称:probability,代码行数:59,代码来源:negative_binomial.py


示例18: _maybe_assert_valid_concentration

 def _maybe_assert_valid_concentration(self, concentration, validate_args):
   """Checks the validity of a concentration parameter."""
   if not validate_args:
     return concentration
   return control_flow_ops.with_dependencies([
       tf.assert_positive(
           concentration, message="Concentration parameter must be positive."),
   ], concentration)
开发者ID:lewisKit,项目名称:probability,代码行数:8,代码来源:kumaraswamy.py


示例19: multivariate_rbf

def multivariate_rbf(x, y=0.0, sigma=1.0, l=1.0):
    """Squared-exponential kernel

    .. math:: k(x, y) = \sigma^2 \exp{ -1/(2l^2) \sum_i (x_i - y_i)^2 }

    Parameters
    ----------
    x : tf.Tensor
        A n-D tensor.
    y : tf.Tensor, optional
        A tensor of same shape as ``x``.
    sigma : tf.Tensor, optional
        A 0-D tensor, representing the standard deviation of radial
        basis function.
    l : tf.Tensor, optional
        A 0-D tensor, representing the lengthscale of radial basis
        function.

    Returns
    -------
    tf.Tensor
        A tensor of one less dimension than the input.

    Raises
    ------
    InvalidArgumentError
        If the mean variables have Inf or NaN values, or if the scale
        and length variables are not positive.
    """
    dependencies = [tf.verify_tensor_all_finite(x, msg=''),
                    tf.verify_tensor_all_finite(y, msg=''),
                    tf.assert_positive(sigma),
                    tf.assert_positive(l)]
    x = control_flow_ops.with_dependencies(dependencies, x)
    y = control_flow_ops.with_dependencies(dependencies, y)
    sigma = control_flow_ops.with_dependencies(dependencies, sigma)
    l = control_flow_ops.with_dependencies(dependencies, l)
    x = tf.cast(x, dtype=tf.float32)
    y = tf.cast(y, dtype=tf.float32)
    sigma = tf.cast(sigma, dtype=tf.float32)
    l = tf.cast(l, dtype=tf.float32)

    return tf.pow(sigma, 2.0) * \
           tf.exp(-1.0/(2.0*tf.pow(l, 2.0)) * \
           tf.reduce_sum(tf.pow(x - y , 2.0)))
开发者ID:TalkingData,项目名称:edward,代码行数:45,代码来源:util.py


示例20: __init__

  def __init__(self,
               concentration,
               scale=1.,
               validate_args=False,
               allow_nan_stats=True,
               name="Pareto"):
    """Construct Pareto distribution with `concentration` and `scale`.

    Args:
      concentration: Floating point tensor. Must contain only positive values.
      scale: Floating point tensor, equivalent to `mode`. `scale` also
        restricts the domain of this distribution to be in `[scale, inf)`.
        Must contain only positive values. Default value: `1`.
      validate_args: Python `bool`, default `False`. When `True` distribution
        parameters are checked for validity despite possibly degrading runtime
        performance. When `False` invalid inputs may silently render incorrect
        outputs. Default value: `False` (i.e. do not validate args).
      allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
        (e.g., mean, mode, variance) use the value "`NaN`" to indicate the
        result is undefined. When `False`, an exception is raised if one or
        more of the statistic's batch members are undefined.
        Default value: `True`.
      name: Python `str` name prefixed to Ops created by this class.
        Default value: 'Pareto'.
    """
    parameters = dict(locals())
    with tf.name_scope(name, values=[concentration, scale]):
      dtype = dtype_util.common_dtype([concentration, scale], tf.float32)
      self._concentration = tf.convert_to_tensor(
          concentration, name="concentration", dtype=dtype)
      self._scale = tf.convert_to_tensor(scale, name="scale", dtype=dtype)
      with tf.control_dependencies([
          tf.assert_positive(self._concentration),
          tf.assert_positive(self._scale)] if validate_args else []):
        self._concentration = tf.identity(
            self._concentration, name="concentration")
        self._scale = tf.identity(self._scale, name="scale")
    super(Pareto, self).__init__(
        dtype=self._concentration.dtype,
        reparameterization_type=reparameterization.FULLY_REPARAMETERIZED,
        validate_args=validate_args,
        allow_nan_stats=allow_nan_stats,
        parameters=parameters,
        graph_parents=[self._concentration, self._scale],
        name=name)
开发者ID:asudomoeva,项目名称:probability,代码行数:45,代码来源:pareto.py



注:本文中的tensorflow.assert_positive函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.assert_rank函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.assert_less_equal函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap