• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python math_ops.squared_difference函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.math_ops.squared_difference函数的典型用法代码示例。如果您正苦于以下问题:Python squared_difference函数的具体用法?Python squared_difference怎么用?Python squared_difference使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了squared_difference函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _r2

def _r2(probabilities, targets, weights=None):
  targets = math_ops.cast(targets, dtypes.float32)
  y_mean = math_ops.reduce_mean(targets, 0)
  squares_total = math_ops.reduce_sum(
      math_ops.squared_difference(targets, y_mean), 0)
  squares_residuals = math_ops.reduce_sum(
      math_ops.squared_difference(targets, probabilities), 0)
  score = 1 - math_ops.reduce_sum(squares_residuals / squares_total)
  return metrics.mean(score, weights=weights)
开发者ID:Albert-Z-Guo,项目名称:tensorflow,代码行数:9,代码来源:eval_metrics.py


示例2: testSquaredDifference

 def testSquaredDifference(self):
   x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)
   y = np.array([-3, -2, -1], dtype=np.int32)
   z = (x - y)*(x - y)
   with self.test_session():
     z_tf = math_ops.squared_difference(x, y).eval()
     self.assertAllClose(z, z_tf)
开发者ID:yaroslavvb,项目名称:imperative,代码行数:7,代码来源:math_ops_test.py


示例3: mean_squared_error

def mean_squared_error(predictions, labels=None, weights=1.0, scope=None):
  """Adds a Sum-of-Squares loss to the training procedure.

  `weights` acts as a coefficient for the loss. If a scalar is provided, then
  the loss is simply scaled by the given value. If `weights` is a tensor of size
  [batch_size], then the total loss for each sample of the batch is rescaled
  by the corresponding element in the `weights` vector. If the shape of
  `weights` matches the shape of `predictions`, then the loss of each
  measurable element of `predictions` is scaled by the corresponding value of
  `weights`.

  Args:
    predictions: The predicted outputs.
    labels: The ground truth output tensor, same dimensions as 'predictions'.
    weights: Coefficients for the loss a scalar, a tensor of shape
      [batch_size] or a tensor whose shape matches `predictions`.
    scope: The scope for the operations performed in computing the loss.

  Returns:
    A scalar `Tensor` representing the loss value.

  Raises:
    ValueError: If the shape of `predictions` doesn't match that of `labels` or
      if the shape of `weights` is invalid.
  """
  with ops.name_scope(scope, "mean_squared_error",
                      [predictions, labels, weights]) as scope:
    predictions.get_shape().assert_is_compatible_with(labels.get_shape())
    predictions = math_ops.cast(predictions, dtypes.float32)
    labels = math_ops.cast(labels, dtypes.float32)
    losses = math_ops.squared_difference(predictions, labels)
    return compute_weighted_loss(losses, weights, scope=scope)
开发者ID:Albert-Z-Guo,项目名称:tensorflow,代码行数:32,代码来源:loss_ops.py


示例4: exact_gaussian_kernel

def exact_gaussian_kernel(x, y, stddev):
  r"""Computes exact Gaussian kernel value(s) for tensors x and y and stddev.

  The Gaussian kernel for vectors u, v is defined as follows:
       K(u, v) = exp(-||u-v||^2 / (2* stddev^2))
  where the norm is the l2-norm. x, y can be either vectors or matrices. If they
  are vectors, they must have the same dimension. If they are matrices, they
  must have the same number of columns. In the latter case, the method returns
  (as a matrix) K(u, v) values for all pairs (u, v) where u is a row from x and
  v is a row from y.

  Args:
    x: a tensor of rank 1 or 2. It's shape should be either [dim] or [m, dim].
    y: a tensor of rank 1 or 2. It's shape should be either [dim] or [n, dim].
    stddev: The width of the Gaussian kernel.

  Returns:
    A single value (scalar) with shape (1, 1) (if x, y are vectors) or a matrix
      of shape (m, n) with entries K(u, v) (where K is the Gaussian kernel) for
      all (u,v) pairs where u, v are rows from x and y respectively.

  Raises:
    ValueError: if the shapes of x, y are not compatible.
  """
  x_aligned, y_aligned = _align_matrices(x, y)
  diff_squared_l2_norm = math_ops.reduce_sum(
      math_ops.squared_difference(x_aligned, y_aligned), 2)
  return math_ops.exp(-diff_squared_l2_norm / (2 * stddev * stddev))
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:28,代码来源:kernelized_utils.py


示例5: testSampleConsistentStats

  def testSampleConsistentStats(self):
    loc = np.float32([[-1., 1], [1, -1]])
    scale = np.float32([1., 0.5])
    n_samp = 1e4
    with self.test_session() as sess:
      ind = independent_lib.Independent(
          distribution=mvn_diag_lib.MultivariateNormalDiag(
              loc=loc,
              scale_identity_multiplier=scale),
          reduce_batch_ndims=1)

      x = ind.sample(int(n_samp), seed=42)
      sample_mean = math_ops.reduce_mean(x, axis=0)
      sample_var = math_ops.reduce_mean(
          math_ops.squared_difference(x, sample_mean), axis=0)
      sample_std = math_ops.sqrt(sample_var)
      sample_entropy = -math_ops.reduce_mean(ind.log_prob(x), axis=0)

      [
          sample_mean_, sample_var_, sample_std_, sample_entropy_,
          actual_mean_, actual_var_, actual_std_, actual_entropy_,
          actual_mode_,
      ] = sess.run([
          sample_mean, sample_var, sample_std, sample_entropy,
          ind.mean(), ind.variance(), ind.stddev(), ind.entropy(), ind.mode(),
      ])

      self.assertAllClose(sample_mean_, actual_mean_, rtol=0.02, atol=0.)
      self.assertAllClose(sample_var_, actual_var_, rtol=0.04, atol=0.)
      self.assertAllClose(sample_std_, actual_std_, rtol=0.02, atol=0.)
      self.assertAllClose(sample_entropy_, actual_entropy_, rtol=0.01, atol=0.)
      self.assertAllClose(loc, actual_mode_, rtol=1e-6, atol=0.)
开发者ID:benoitsteiner,项目名称:tensorflow-opencl,代码行数:32,代码来源:independent_test.py


示例6: _mean_squared_loss

def _mean_squared_loss(logits, target):
  # To prevent broadcasting inside "-".
  if len(target.get_shape()) == 1:
    target = array_ops.expand_dims(target, axis=1)

  logits.get_shape().assert_is_compatible_with(target.get_shape())
  return math_ops.squared_difference(logits, math_ops.to_float(target))
开发者ID:jackd,项目名称:tensorflow,代码行数:7,代码来源:target_column.py


示例7: contrastive_loss

def contrastive_loss(labels, embeddings_anchor, embeddings_positive,
                     margin=1.0):
  """Computes the contrastive loss.

  This loss encourages the embedding to be close to each other for
    the samples of the same label and the embedding to be far apart at least
    by the margin constant for the samples of different labels.
  See: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf

  Args:
    labels: 1-D tf.int32 `Tensor` with shape [batch_size] of
      binary labels indicating positive vs negative pair.
    embeddings_anchor: 2-D float `Tensor` of embedding vectors for the anchor
      images. Embeddings should be l2 normalized.
    embeddings_positive: 2-D float `Tensor` of embedding vectors for the
      positive images. Embeddings should be l2 normalized.
    margin: margin term in the loss definition.

  Returns:
    contrastive_loss: tf.float32 scalar.
  """
  # Get per pair distances
  distances = math_ops.sqrt(
      math_ops.reduce_sum(
          math_ops.squared_difference(embeddings_anchor, embeddings_positive),
          1))

  # Add contrastive loss for the siamese network.
  #   label here is {0,1} for neg, pos.
  return math_ops.reduce_mean(
      math_ops.to_float(labels) * math_ops.square(distances) +
      (1. - math_ops.to_float(labels)) *
      math_ops.square(math_ops.maximum(margin - distances, 0.)),
      name='contrastive_loss')
开发者ID:jackd,项目名称:tensorflow,代码行数:34,代码来源:metric_loss_ops.py


示例8: testSquaredDifference

 def testSquaredDifference(self):
   for dtype in [np.int32, np.float16]:
     x = np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)
     y = np.array([-3, -2, -1], dtype=dtype)
     z = (x - y) * (x - y)
     with test_util.device(use_gpu=True):
       z_tf = self.evaluate(math_ops.squared_difference(x, y))
       self.assertAllClose(z, z_tf)
开发者ID:LongJun123456,项目名称:tensorflow,代码行数:8,代码来源:math_ops_test.py


示例9: testSquaredDifference

 def testSquaredDifference(self):
   for dtype in [np.int32, np.float16]:
     x = np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)
     y = np.array([-3, -2, -1], dtype=dtype)
     z = (x - y)*(x - y)
     with self.test_session(use_gpu=True):
       z_tf = math_ops.squared_difference(x, y).eval()
       self.assertAllClose(z, z_tf)
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:8,代码来源:math_ops_test.py


示例10: moments

def moments(x, axes, name=None, keep_dims=False):
  """Calculate the mean and variance of `x`.

  The mean and variance are calculated by aggregating the contents of `x`
  across `axes`.  If `x` is 1-D and `axes = [0]` this is just the mean
  and variance of a vector.

  When using these moments for batch normalization (see
  `tf.nn.batch_normalization`):
    * for so-called "global normalization", used with convolutional filters with
      shape `[batch, height, width, depth]`, pass `axes=[0, 1, 2]`.
    * for simple batch normalization pass `axes=[0]` (batch only).

  Args:
    x: A `Tensor`.
    axes: array of ints.  Axes along which to compute mean and
      variance.
    keep_dims: produce moments with the same dimensionality as the input.
    name: Name used to scope the operations that compute the moments.

  Returns:
    Two `Tensor` objects: `mean` and `variance`.
  """
  with ops.op_scope([x, axes], name, "moments"):
    x = ops.convert_to_tensor(x, name="x")
    x_shape = x.get_shape()
    if all(x_shape[d].value is not None for d in axes):
      # The shape is known in the relevant axes, so we can statically
      # compute the divisor.
      divisor = 1.0
      for d in set(axes):
        divisor *= x.get_shape()[d].value
      divisor = constant_op.constant(1.0 / divisor, x.dtype, name="divisor")
    else:
      divisor = constant_op.constant(1.0, dtype=x.dtype)
      x_dynamic_shape = array_ops.shape(x)
      for d in set(axes):
        divisor *= math_ops.cast(x_dynamic_shape[d], x.dtype)
      divisor = math_ops.inv(divisor, name="divisor")
    constant_axes = constant_op.constant(axes, name="axes")
    # Note: We do not use Mean here because it is very slow on GPU.
    mean = math_ops.mul(
        math_ops.reduce_sum(x,
                            constant_axes,
                            keep_dims=True),
        divisor,
        name="mean")
    var = math_ops.mul(
        math_ops.reduce_sum(
            math_ops.squared_difference(x, mean),
            constant_axes,
            keep_dims=keep_dims),
        divisor,
        name="variance")
    if keep_dims:
      return mean, var
    else:
      return array_ops.squeeze(mean, squeeze_dims=axes), var
开发者ID:13331151,项目名称:tensorflow,代码行数:58,代码来源:nn.py


示例11: testComplexSquaredDifference

 def testComplexSquaredDifference(self):
   for dtype in [np.complex64, np.complex128]:
     x = np.array([[1 + 3j, 2 + 2j, 3 + 1j], [4 - 1j, 5 - 2j, 6 - 3j]],
                  dtype=dtype)
     y = np.array([-3 + 1j, -2 + 2j, -1 + 3j], dtype=dtype)
     z = np.conj(x - y) * (x - y)
     with test_util.device(use_gpu=False):
       z_tf = self.evaluate(math_ops.squared_difference(x, y))
       self.assertAllClose(z, z_tf)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:9,代码来源:math_ops_test.py


示例12: sufficient_statistics

def sufficient_statistics(x, axes, shift=None, keep_dims=False, name=None):
  """Calculate the sufficient statistics for the mean and variance of `x`.

  These sufficient statistics are computed using the one pass algorithm on
  an input that's optionally shifted. See:
  https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Computing_shifted_data

  Args:
    x: A `Tensor`.
    axes: Array of ints. Axes along which to compute mean and variance.
    shift: A `Tensor` containing the value by which to shift the data for
      numerical stability, or `None` if no shift is to be performed. A shift
      close to the true mean provides the most numerically stable results.
    keep_dims: produce statistics with the same dimensionality as the input.
    name: Name used to scope the operations that compute the sufficient stats.

  Returns:
    Four `Tensor` objects of the same type as `x`:
    * the count (number of elements to average over).
    * the (possibly shifted) sum of the elements in the array.
    * the (possibly shifted) sum of squares of the elements in the array.
    * the shift by which the mean must be corrected or None if `shift` is None.
  """
  with ops.op_scope([x, axes, shift], name, "sufficient_statistics"):
    x = ops.convert_to_tensor(x, name="x")
    x_shape = x.get_shape()
    if x_shape.is_fully_defined():
      counts = 1
      m_shape = []
      for d in xrange(x_shape.ndims):
        dim = x_shape[d].value
        if d in set(axes):
          counts *= dim
          dim = 1
        m_shape.append(dim)
      counts = constant_op.constant(counts, dtype=x.dtype)
    else:  # shape needs to be inferred at runtime.
      x_shape = array_ops.shape(x)
      select_axes = sparse_ops.sparse_to_dense(axes, array_ops.shape(x_shape),
                                               True, False)
      m_shape = math_ops.select(select_axes, array_ops.ones_like(x_shape),
                                x_shape)
      counts = math_ops.cast(
          math_ops.reduce_prod(x_shape / m_shape),
          x.dtype,
          name="count")
    if shift is not None:
      shift = ops.convert_to_tensor(shift, name="shift")
      m_ss = math_ops.sub(x, shift)
      v_ss = math_ops.squared_difference(x, shift)
    else:  # no shift.
      m_ss = x
      v_ss = math_ops.square(x)
    m_ss = math_ops.reduce_sum(m_ss, axes, keep_dims=keep_dims, name="mean_ss")
    v_ss = math_ops.reduce_sum(v_ss, axes, keep_dims=keep_dims, name="var_ss")
  return counts, m_ss, v_ss, shift
开发者ID:285219011,项目名称:hello-world,代码行数:56,代码来源:nn.py


示例13: _reduce_variance

def _reduce_variance(x, axis=None, biased=True, keepdims=False):
  with ops.name_scope("reduce_variance"):
    x = ops.convert_to_tensor(x, name="x")
    mean = math_ops.reduce_mean(x, axis=axis, keepdims=True)
    biased_var = math_ops.reduce_mean(
        math_ops.squared_difference(x, mean), axis=axis, keepdims=keepdims)
    if biased:
      return biased_var
    n = _axis_size(x, axis)
    return (n / (n - 1.)) * biased_var
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:10,代码来源:mcmc_diagnostics_impl.py


示例14: mean_only_frechet_classifier_distance_from_activations

def mean_only_frechet_classifier_distance_from_activations(
    real_activations, generated_activations):
  """Classifier distance for evaluating a generative model from activations.

  Given two Gaussian distribution with means m and m_w and covariance matrices
  C and C_w, this function calcuates

                                |m - m_w|^2

  which captures how different the distributions of real images and generated
  images (or more accurately, their visual features) are. Note that unlike the
  Inception score, this is a true distance and utilizes information about real
  world images.

  Note that when computed using sample means and sample covariance matrices,
  Frechet distance is biased. It is more biased for small sample sizes. (e.g.
  even if the two distributions are the same, for a small sample size, the
  expected Frechet distance is large). It is important to use the same
  sample size to compute frechet classifier distance when comparing two
  generative models.

  In this variant, we only compute the difference between the means of the
  fitted Gaussians. The computation leads to O(n) vs. O(n^2) memory usage, yet
  still retains much of the same information as FID.

  Args:
    real_activations: 2D array of activations of real images of size
      [num_images, num_dims] to use to compute Frechet Inception distance.
    generated_activations: 2D array of activations of generated images of size
      [num_images, num_dims] to use to compute Frechet Inception distance.

  Returns:
    The mean-only Frechet Inception distance. A floating-point scalar of the
    same type as the output of the activations.
  """
  real_activations.shape.assert_has_rank(2)
  generated_activations.shape.assert_has_rank(2)

  activations_dtype = real_activations.dtype
  if activations_dtype != dtypes.float64:
    real_activations = math_ops.cast(real_activations, dtypes.float64)
    generated_activations = math_ops.cast(generated_activations, dtypes.float64)

  # Compute means of activations.
  m = math_ops.reduce_mean(real_activations, 0)
  m_w = math_ops.reduce_mean(generated_activations, 0)

  # Next the distance between means.
  mean = math_ops.reduce_sum(
      math_ops.squared_difference(m, m_w))  # Equivalent to L2 but more stable.
  mofid = mean
  if activations_dtype != dtypes.float64:
    mofid = math_ops.cast(mofid, activations_dtype)

  return mofid
开发者ID:jackd,项目名称:tensorflow,代码行数:55,代码来源:classifier_metrics_impl.py


示例15: mean_squared_error

def mean_squared_error(
    labels, predictions, weights=1.0, scope=None,
    loss_collection=ops.GraphKeys.LOSSES,
    reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
  """Adds a Sum-of-Squares loss to the training procedure.

  `weights` acts as a coefficient for the loss. If a scalar is provided, then
  the loss is simply scaled by the given value. If `weights` is a tensor of size
  `[batch_size]`, then the total loss for each sample of the batch is rescaled
  by the corresponding element in the `weights` vector. If the shape of
  `weights` matches the shape of `predictions`, then the loss of each
  measurable element of `predictions` is scaled by the corresponding value of
  `weights`.

  Args:
    labels: The ground truth output tensor, same dimensions as 'predictions'.
    predictions: The predicted outputs.
    weights: Optional `Tensor` whose rank is either 0, or the same rank as
      `labels`, and must be broadcastable to `labels` (i.e., all dimensions must
      be either `1`, or the same as the corresponding `losses` dimension).
    scope: The scope for the operations performed in computing the loss.
    loss_collection: collection to which the loss will be added.
    reduction: Type of reduction to apply to loss.

  Returns:
    Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
    shape as `labels`; otherwise, it is scalar.

  Raises:
    ValueError: If the shape of `predictions` doesn't match that of `labels` or
      if the shape of `weights` is invalid.  Also if `labels` or `predictions`
      is None.

  @compatibility(eager)
  The `loss_collection` argument is ignored when executing eagerly. Consider
  holding on to the return value or collecting losses via a `tf.keras.Model`.
  @end_compatibility
  """
  if labels is None:
    raise ValueError("labels must not be None.")
  if predictions is None:
    raise ValueError("predictions must not be None.")
  with ops.name_scope(scope, "mean_squared_error",
                      (predictions, labels, weights)) as scope:
    predictions = math_ops.to_float(predictions)
    labels = math_ops.to_float(labels)
    predictions.get_shape().assert_is_compatible_with(labels.get_shape())
    losses = math_ops.squared_difference(predictions, labels)
    return compute_weighted_loss(
        losses, weights, scope, loss_collection, reduction=reduction)
开发者ID:bunbutter,项目名称:tensorflow,代码行数:50,代码来源:losses_impl.py


示例16: _variance

 def _variance(self):
   with ops.control_dependencies(self._runtime_assertions):
     # Law of total variance: Var(Y) = E[Var(Y|X)] + Var(E[Y|X])
     probs = self._pad_mix_dims(
         self.mixture_distribution.probs)                   # [B, k, [1]*e]
     mean_cond_var = math_ops.reduce_sum(
         probs * self.components_distribution.variance(),
         axis=-1 - self._event_ndims)                       # [B, E]
     var_cond_mean = math_ops.reduce_sum(
         probs * math_ops.squared_difference(
             self.components_distribution.mean(),
             self._pad_sample_dims(self._mean())),
         axis=-1 - self._event_ndims)                       # [B, E]
     return mean_cond_var + var_cond_mean                   # [B, E]
开发者ID:DjangoPeng,项目名称:tensorflow,代码行数:14,代码来源:mixture_same_family.py


示例17: per_example_squared_loss

def per_example_squared_loss(labels, weights, predictions):
  """Squared loss given labels, example weights and predictions.

  Args:
    labels: Rank 2 (N, D) tensor of per-example labels.
    weights: Rank 2 (N, 1) tensor of per-example weights.
    predictions: Rank 2 (N, D) tensor of per-example predictions.

  Returns:
    loss: A Rank 2 (N, 1) tensor of per-example squared loss.
    update_op: An update operation to update the loss's internal state.
  """
  unweighted_loss = math_ops.reduce_sum(
      math_ops.squared_difference(predictions, labels), 1, keepdims=True)

  return unweighted_loss * weights, control_flow_ops.no_op()
开发者ID:AndreasGocht,项目名称:tensorflow,代码行数:16,代码来源:losses.py


示例18: _variance

 def _variance(self):
   var = (
       math_ops.square(self.rate) / math_ops.squared_difference(
           self.concentration, 1.) / (self.concentration - 2.))
   if self.allow_nan_stats:
     nan = array_ops.fill(
         self.batch_shape_tensor(),
         np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),
         name="nan")
     return array_ops.where(self.concentration > 2., var, nan)
   else:
     return control_flow_ops.with_dependencies([
         check_ops.assert_less(
             constant_op.constant(2., dtype=self.dtype),
             self.concentration,
             message="variance undefined when any concentration <= 2"),
     ], var)
开发者ID:Albert-Z-Guo,项目名称:tensorflow,代码行数:17,代码来源:inverse_gamma.py


示例19: least_squares_generator_loss

def least_squares_generator_loss(
    discriminator_gen_outputs,
    real_label=1,
    weights=1.0,
    scope=None,
    loss_collection=ops.GraphKeys.LOSSES,
    reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
    add_summaries=False):
  """Least squares generator loss.

  This loss comes from `Least Squares Generative Adversarial Networks`
  (https://arxiv.org/abs/1611.04076).

  L = 1/2 * (D(G(z)) - `real_label`) ** 2

  where D(y) are discriminator logits.

  Args:
    discriminator_gen_outputs: Discriminator output on generated data. Expected
      to be in the range of (-inf, inf).
    real_label: The value that the generator is trying to get the discriminator
      to output on generated data.
    weights: Optional `Tensor` whose rank is either 0, or the same rank as
      `discriminator_gen_outputs`, and must be broadcastable to
      `discriminator_gen_outputs` (i.e., all dimensions must be either `1`, or
      the same as the corresponding dimension).
    scope: The scope for the operations performed in computing the loss.
    loss_collection: collection to which this loss will be added.
    reduction: A `tf.losses.Reduction` to apply to loss.
    add_summaries: Whether or not to add summaries for the loss.

  Returns:
    A loss Tensor. The shape depends on `reduction`.
  """
  with ops.name_scope(scope, 'lsq_generator_loss',
                      (discriminator_gen_outputs, real_label)) as scope:
    discriminator_gen_outputs = math_ops.to_float(discriminator_gen_outputs)
    loss = math_ops.squared_difference(
        discriminator_gen_outputs, real_label) / 2.0
    loss = losses.compute_weighted_loss(
        loss, weights, scope, loss_collection, reduction)

  if add_summaries:
    summary.scalar('generator_lsq_loss', loss)

  return loss
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:46,代码来源:losses_impl.py


示例20: _testGrad

  def _testGrad(self, left_shape, right_shape):

    if len(left_shape) > len(right_shape):
      output_shape = left_shape
    else:
      output_shape = right_shape
    l = np.random.randn(*left_shape)
    r = np.random.randn(*right_shape)

    with self.cached_session(use_gpu=True):
      left_tensor = constant_op.constant(l, shape=left_shape)
      right_tensor = constant_op.constant(r, shape=right_shape)
      output = math_ops.squared_difference(left_tensor, right_tensor)
      left_err = gradient_checker.compute_gradient_error(
          left_tensor, left_shape, output, output_shape, x_init_value=l)
      right_err = gradient_checker.compute_gradient_error(
          right_tensor, right_shape, output, output_shape, x_init_value=r)
    self.assertLess(left_err, 1e-10)
    self.assertLess(right_err, 1e-10)
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:19,代码来源:math_grad_test.py



注:本文中的tensorflow.python.ops.math_ops.squared_difference函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python math_ops.sub函数代码示例发布时间:2022-05-27
下一篇:
Python math_ops.square函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap