• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python math_ops.linspace函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.math_ops.linspace函数的典型用法代码示例。如果您正苦于以下问题:Python linspace函数的具体用法?Python linspace怎么用?Python linspace使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了linspace函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: testConditionalMaskUpdate

 def testConditionalMaskUpdate(self):
   param_list = [
       "pruning_frequency=2", "begin_pruning_step=1", "end_pruning_step=6"
   ]
   test_spec = ",".join(param_list)
   pruning_hparams = pruning.get_pruning_hparams().parse(test_spec)
   weights = variables.Variable(
       math_ops.linspace(1.0, 100.0, 100), name="weights")
   masked_weights = pruning.apply_mask(weights)
   sparsity = variables.Variable(0.00, name="sparsity")
   # Set up pruning
   p = pruning.Pruning(pruning_hparams, sparsity=sparsity)
   p._spec.threshold_decay = 0.0
   mask_update_op = p.conditional_mask_update_op()
   sparsity_val = math_ops.linspace(0.0, 0.9, 10)
   increment_global_step = state_ops.assign_add(self.global_step, 1)
   non_zero_count = []
   with self.test_session() as session:
     variables.global_variables_initializer().run()
     for i in range(10):
       session.run(state_ops.assign(sparsity, sparsity_val[i]))
       session.run(mask_update_op)
       session.run(increment_global_step)
       non_zero_count.append(np.count_nonzero(masked_weights.eval()))
   # Weights pruned at steps 0,2,4,and,6
   expected_non_zero_count = [100, 100, 80, 80, 60, 60, 40, 40, 40, 40]
   self.assertAllEqual(expected_non_zero_count, non_zero_count)
开发者ID:Kongsea,项目名称:tensorflow,代码行数:27,代码来源:pruning_test.py


示例2: testWeightSpecificSparsity

  def testWeightSpecificSparsity(self):
    param_list = [
        "begin_pruning_step=1", "pruning_frequency=1", "end_pruning_step=100",
        "target_sparsity=0.5", "weight_sparsity_map=[layer2/weights:0.75]",
        "threshold_decay=0.0"
    ]
    test_spec = ",".join(param_list)
    pruning_hparams = pruning.get_pruning_hparams().parse(test_spec)

    with variable_scope.variable_scope("layer1"):
      w1 = variables.Variable(
          math_ops.linspace(1.0, 100.0, 100), name="weights")
      _ = pruning.apply_mask(w1)
    with variable_scope.variable_scope("layer2"):
      w2 = variables.Variable(
          math_ops.linspace(1.0, 100.0, 100), name="weights")
      _ = pruning.apply_mask(w2)

    p = pruning.Pruning(pruning_hparams)
    mask_update_op = p.conditional_mask_update_op()
    increment_global_step = state_ops.assign_add(self.global_step, 1)

    with self.cached_session() as session:
      variables.global_variables_initializer().run()
      for _ in range(110):
        session.run(mask_update_op)
        session.run(increment_global_step)

      self.assertAllEqual(
          session.run(pruning.get_weight_sparsity()), [0.5, 0.75])
开发者ID:AnishShah,项目名称:tensorflow,代码行数:30,代码来源:pruning_test.py


示例3: testNanFromGradsDontPropagate

  def testNanFromGradsDontPropagate(self):
    """Test that update with NaN gradients does not cause NaN in results."""
    def _nan_log_prob_with_nan_gradient(x):
      return np.nan * math_ops.reduce_sum(x)

    with self.test_session() as sess:
      initial_x = math_ops.linspace(0.01, 5, 10)
      updated_x, kernel_results = hmc.kernel(
          target_log_prob_fn=_nan_log_prob_with_nan_gradient,
          current_state=initial_x,
          step_size=2.,
          num_leapfrog_steps=5,
          seed=47)
      initial_x_, updated_x_, acceptance_probs_ = sess.run(
          [initial_x, updated_x, kernel_results.acceptance_probs])

      logging_ops.vlog(1, "initial_x = {}".format(initial_x_))
      logging_ops.vlog(1, "updated_x = {}".format(updated_x_))
      logging_ops.vlog(1, "acceptance_probs = {}".format(acceptance_probs_))

      self.assertAllEqual(initial_x_, updated_x_)
      self.assertEqual(acceptance_probs_, 0.)

      self.assertAllFinite(
          gradients_ops.gradients(updated_x, initial_x)[0].eval())
      self.assertAllEqual([True], [g is None for g in gradients_ops.gradients(
          kernel_results.proposed_grads_target_log_prob, initial_x)])
      self.assertAllEqual([False], [g is None for g in gradients_ops.gradients(
          kernel_results.proposed_grads_target_log_prob,
          kernel_results.proposed_state)])
开发者ID:ClowJ,项目名称:tensorflow,代码行数:30,代码来源:hmc_test.py


示例4: testNanRejection

  def testNanRejection(self):
    """Tests that an update that yields NaN potentials gets rejected.

    We run HMC with a target distribution that returns NaN
    log-likelihoods if any element of x < 0, and unit-scale
    exponential log-likelihoods otherwise. The exponential potential
    pushes x towards 0, ensuring that any reasonably large update will
    push us over the edge into NaN territory.
    """
    def _unbounded_exponential_log_prob(x):
      """An exponential distribution with log-likelihood NaN for x < 0."""
      per_element_potentials = array_ops.where(
          x < 0.,
          array_ops.fill(array_ops.shape(x), x.dtype.as_numpy_dtype(np.nan)),
          -x)
      return math_ops.reduce_sum(per_element_potentials)

    with self.test_session() as sess:
      initial_x = math_ops.linspace(0.01, 5, 10)
      updated_x, kernel_results = hmc.kernel(
          target_log_prob_fn=_unbounded_exponential_log_prob,
          current_state=initial_x,
          step_size=2.,
          num_leapfrog_steps=5,
          seed=46)
      initial_x_, updated_x_, acceptance_probs_ = sess.run(
          [initial_x, updated_x, kernel_results.acceptance_probs])

      logging_ops.vlog(1, "initial_x = {}".format(initial_x_))
      logging_ops.vlog(1, "updated_x = {}".format(updated_x_))
      logging_ops.vlog(1, "acceptance_probs = {}".format(acceptance_probs_))

      self.assertAllEqual(initial_x_, updated_x_)
      self.assertEqual(acceptance_probs_, 0.)
开发者ID:ClowJ,项目名称:tensorflow,代码行数:34,代码来源:hmc_test.py


示例5: _LinSpace

 def _LinSpace(self, start, stop, num):
   # NOTE(touts): Needs to pass a graph to get a new session each time.
   with ops.Graph().as_default() as graph:
     with self.test_session(graph=graph, force_gpu=self.force_gpu):
       tf_ans = math_ops.linspace(start, stop, num, name="linspace")
       self.assertEqual([num], tf_ans.get_shape())
       return tf_ans.eval()
开发者ID:HughKu,项目名称:tensorflow,代码行数:7,代码来源:init_ops_test.py


示例6: testNanFromGradsDontPropagate

  def testNanFromGradsDontPropagate(self):
    """Test that update with NaN gradients does not cause NaN in results."""
    def _nan_log_prob_with_nan_gradient(x):
      return np.nan * math_ops.reduce_sum(x)

    with self.test_session() as sess:
      initial_x = math_ops.linspace(0.01, 5, 10)
      updated_x, acceptance_probs, new_log_prob, new_grad = hmc.kernel(
          2., 5, initial_x, _nan_log_prob_with_nan_gradient, [0])
      initial_x_val, updated_x_val, acceptance_probs_val = sess.run(
          [initial_x, updated_x, acceptance_probs])

      logging.vlog(1, 'initial_x = {}'.format(initial_x_val))
      logging.vlog(1, 'updated_x = {}'.format(updated_x_val))
      logging.vlog(1, 'acceptance_probs = {}'.format(acceptance_probs_val))

      self.assertAllEqual(initial_x_val, updated_x_val)
      self.assertEqual(acceptance_probs_val, 0.)

      self.assertAllFinite(
          gradients_impl.gradients(updated_x, initial_x)[0].eval())
      self.assertTrue(
          gradients_impl.gradients(new_grad, initial_x)[0] is None)

      # Gradients of the acceptance probs and new log prob are not finite.
      _ = new_log_prob  # Prevent unused arg error.
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:26,代码来源:hmc_test.py


示例7: testNanRejection

  def testNanRejection(self):
    """Tests that an update that yields NaN potentials gets rejected.

    We run HMC with a target distribution that returns NaN
    log-likelihoods if any element of x < 0, and unit-scale
    exponential log-likelihoods otherwise. The exponential potential
    pushes x towards 0, ensuring that any reasonably large update will
    push us over the edge into NaN territory.
    """
    def _unbounded_exponential_log_prob(x):
      """An exponential distribution with log-likelihood NaN for x < 0."""
      per_element_potentials = array_ops.where(x < 0,
                                               np.nan * array_ops.ones_like(x),
                                               -x)
      return math_ops.reduce_sum(per_element_potentials)

    with self.test_session() as sess:
      initial_x = math_ops.linspace(0.01, 5, 10)
      updated_x, acceptance_probs, _, _ = hmc.kernel(
          2., 5, initial_x, _unbounded_exponential_log_prob, [0])
      initial_x_val, updated_x_val, acceptance_probs_val = sess.run(
          [initial_x, updated_x, acceptance_probs])

      logging.vlog(1, 'initial_x = {}'.format(initial_x_val))
      logging.vlog(1, 'updated_x = {}'.format(updated_x_val))
      logging.vlog(1, 'acceptance_probs = {}'.format(acceptance_probs_val))

      self.assertAllEqual(initial_x_val, updated_x_val)
      self.assertEqual(acceptance_probs_val, 0.)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:29,代码来源:hmc_test.py


示例8: setUp

  def setUp(self):
    ops.reset_default_graph()
    dim = 1
    num = 3
    with ops.name_scope('some_scope'):
      # Basically from 0 to dim*num-1.
      flat_data = math_ops.linspace(0.0, dim * num - 1, dim * num)
      bias = variables.Variable(
          array_ops.reshape(flat_data, (num, dim)), name='bias')
    save = saver.Saver([bias])
    with self.test_session() as sess:
      variables.global_variables_initializer().run()
      self.bundle_file = os.path.join(test.get_temp_dir(), 'bias_checkpoint')
      save.save(sess, self.bundle_file)

    self.new_class_vocab_file = os.path.join(
        test.test_src_dir_path(_TESTDATA_PATH), 'keyword_new.txt')
    self.old_class_vocab_file = os.path.join(
        test.test_src_dir_path(_TESTDATA_PATH), 'keyword.txt')
    self.init_val = 42

    def _init_val_initializer(shape, dtype=None, partition_info=None):
      del dtype, partition_info  # Unused by this unit-testing initializer.
      return array_ops.tile(
          constant_op.constant([[self.init_val]], dtype=dtypes.float32), shape)

    self.initializer = _init_val_initializer
开发者ID:1000sprites,项目名称:tensorflow,代码行数:27,代码来源:checkpoint_ops_test.py


示例9: make_variable

 def make_variable(self):
   n = 256
   shape = (n, n, n)
   items = n**3
   var = variables.Variable(
       array_ops.reshape(math_ops.linspace(1., float(items), items), shape),
       dtype=dtypes.float32)
   return var
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:8,代码来源:array_ops_test.py


示例10: _multi_gamma_sequence

 def _multi_gamma_sequence(self, a, p, name="multi_gamma_sequence"):
     """Creates sequence used in multivariate (di)gamma; shape = shape(a)+[p]."""
     with self._name_scope(name, values=[a, p]):
         # Linspace only takes scalars, so we'll add in the offset afterwards.
         seq = math_ops.linspace(
             constant_op.constant(0.0, dtype=self.dtype), 0.5 - 0.5 * p, math_ops.cast(p, dtypes.int32)
         )
         return seq + array_ops.expand_dims(a, [-1])
开发者ID:kdavis-mozilla,项目名称:tensorflow,代码行数:8,代码来源:wishart.py


示例11: test_finds_max_of_long_array

 def test_finds_max_of_long_array(self):
   # d - 1 == d in float32 and d = 3e7.
   # So this test only passes if we use double for the percentile indices.
   # If float is used, it fails with InvalidArgumentError about an index out of
   # bounds.
   x = math_ops.linspace(0., 3e7, num=int(3e7))
   with self.cached_session():
     minval = sample_stats.percentile(x, q=0, validate_args=True)
     self.assertAllEqual(0, minval.eval())
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:9,代码来源:sample_stats_test.py


示例12: input_fn

 def input_fn():
   start = random_ops.random_uniform(
       (), minval=0, maxval=(np.pi * 2.0), dtype=dtypes.float32, seed=seed)
   sin_curves = math_ops.sin(
       math_ops.linspace(start, (sequence_length - 1) * increment,
                         sequence_length + 1))
   inputs = array_ops.slice(sin_curves, [0], [sequence_length])
   labels = array_ops.slice(sin_curves, [1], [sequence_length])
   return {'inputs': inputs}, labels
开发者ID:finardi,项目名称:tensorflow,代码行数:9,代码来源:state_saving_rnn_estimator_test.py


示例13: input_fn

 def input_fn():
   start = random_ops.random_uniform(
       (), minval=0, maxval=(np.pi * 2.0), dtype=dtypes.float32, seed=seed)
   sin_curves = math_ops.sin(
       math_ops.linspace(start, (sequence_length - 1) * increment,
                         sequence_length + 1))
   inputs = array_ops.slice(sin_curves, [0], [sequence_length])
   labels = array_ops.slice(sin_curves, [1], [sequence_length])
   input_key = string_ops.string_join([
       'key_',
       string_ops.as_string(math_ops.cast(10000 * start, dtypes.int32))
   ])
   return {'inputs': inputs, input_key_column_name: input_key}, labels
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:13,代码来源:state_saving_rnn_estimator_test.py


示例14: _compute_quantiles

 def _compute_quantiles():
   """Helper to build quantiles."""
   # Omit {0, 1} since they might lead to Inf/NaN.
   zero = array_ops.zeros([], dtype=dist.dtype)
   edges = math_ops.linspace(zero, 1., quadrature_size + 3)[1:-1]
   # Expand edges so its broadcast across batch dims.
   edges = array_ops.reshape(edges, shape=array_ops.concat([
       [-1], array_ops.ones([batch_ndims], dtype=dtypes.int32)], axis=0))
   quantiles = dist.quantile(edges)
   # Cyclically permute left by one.
   perm = array_ops.concat([
       math_ops.range(1, 1 + batch_ndims), [0]], axis=0)
   quantiles = array_ops.transpose(quantiles, perm)
   return quantiles
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:14,代码来源:poisson_lognormal.py


示例15: testUpdateSingleMask

 def testUpdateSingleMask(self):
   with self.test_session() as session:
     weights = variables.Variable(
         math_ops.linspace(1.0, 100.0, 100), name="weights")
     masked_weights = pruning.apply_mask(weights)
     sparsity = variables.Variable(0.5, name="sparsity")
     p = pruning.Pruning(sparsity=sparsity)
     p._spec.threshold_decay = 0.0
     mask_update_op = p.mask_update_op()
     variables.global_variables_initializer().run()
     masked_weights_val = masked_weights.eval()
     self.assertAllEqual(np.count_nonzero(masked_weights_val), 100)
     session.run(mask_update_op)
     masked_weights_val = masked_weights.eval()
     self.assertAllEqual(np.count_nonzero(masked_weights_val), 51)
开发者ID:Kongsea,项目名称:tensorflow,代码行数:15,代码来源:pruning_test.py


示例16: testPartitionedVariableMasking

 def testPartitionedVariableMasking(self):
   partitioner = partitioned_variables.variable_axis_size_partitioner(40)
   with self.test_session() as session:
     with variable_scope.variable_scope("", partitioner=partitioner):
       sparsity = variables.Variable(0.5, name="Sparsity")
       weights = variable_scope.get_variable(
           "weights", initializer=math_ops.linspace(1.0, 100.0, 100))
       masked_weights = pruning.apply_mask(
           weights, scope=variable_scope.get_variable_scope())
     p = pruning.Pruning(sparsity=sparsity)
     p._spec.threshold_decay = 0.0
     mask_update_op = p.mask_update_op()
     variables.global_variables_initializer().run()
     masked_weights_val = masked_weights.eval()
     session.run(mask_update_op)
     masked_weights_val = masked_weights.eval()
     self.assertAllEqual(np.count_nonzero(masked_weights_val), 51)
开发者ID:Kongsea,项目名称:tensorflow,代码行数:17,代码来源:pruning_test.py


示例17: test_gradients

  def test_gradients(self):
    """Test that spectral_ops.stft has a working gradient."""
    with spectral_ops_test_util.fft_kernel_label_map(), (
        self.test_session(use_gpu=True)) as sess:
      signal_length = 512

      # An all-zero signal has all zero gradients with respect to the sum of the
      # magnitude STFT.
      empty_signal = array_ops.zeros([signal_length], dtype=dtypes.float32)
      empty_signal_gradient = sess.run(
          self._compute_stft_gradient(empty_signal))
      self.assertTrue((empty_signal_gradient == 0.0).all())

      # A sinusoid will have non-zero components of its gradient with respect to
      # the sum of the magnitude STFT.
      sinusoid = math_ops.sin(
          2 * np.pi * math_ops.linspace(0.0, 1.0, signal_length))
      sinusoid_gradient = sess.run(self._compute_stft_gradient(sinusoid))
      self.assertFalse((sinusoid_gradient == 0.0).all())
开发者ID:1000sprites,项目名称:tensorflow,代码行数:19,代码来源:spectral_ops_test.py


示例18: _forward_log_det_jacobian

  def _forward_log_det_jacobian(self, x):
    # Let Y be a symmetric, positive definite matrix and write:
    #   Y = X X.T
    # where X is lower-triangular.
    #
    # Observe that,
    #   dY[i,j]/dX[a,b]
    #   = d/dX[a,b] { X[i,:] X[j,:] }
    #   = sum_{d=1}^p { I[i=a] I[d=b] X[j,d] + I[j=a] I[d=b] X[i,d] }
    #
    # To compute the Jacobian dX/dY we must represent X,Y as vectors. Since Y is
    # symmetric and X is lower-triangular, we need vectors of dimension:
    #   d = p (p + 1) / 2
    # where X, Y are p x p matrices, p > 0. We use a row-major mapping, i.e.,
    #   k = { i (i + 1) / 2 + j   i>=j
    #       { undef               i<j
    # and assume zero-based indexes. When k is undef, the element is dropped.
    # Example:
    #           j      k
    #        0 1 2 3  /
    #    0 [ 0 . . . ]
    # i  1 [ 1 2 . . ]
    #    2 [ 3 4 5 . ]
    #    3 [ 6 7 8 9 ]
    # Write vec[.] to indicate transforming a matrix to vector via k(i,j). (With
    # slight abuse: k(i,j)=undef means the element is dropped.)
    #
    # We now show d vec[Y] / d vec[X] is lower triangular. Assuming both are
    # defined, observe that k(i,j) < k(a,b) iff (1) i<a or (2) i=a and j<b.
    # In both cases dvec[Y]/dvec[X]@[k(i,j),k(a,b)] = 0 since:
    # (1) j<=i<a thus i,j!=a.
    # (2) i=a>j  thus i,j!=a.
    #
    # Since the Jacobian is lower-triangular, we need only compute the product
    # of diagonal elements:
    #   d vec[Y] / d vec[X] @[k(i,j), k(i,j)]
    #   = X[j,j] + I[i=j] X[i,j]
    #   = 2 X[j,j].
    # Since there is a 2 X[j,j] term for every lower-triangular element of X we
    # conclude:
    #   |Jac(d vec[Y]/d vec[X])| = 2^p prod_{j=0}^{p-1} X[j,j]^{p-j}.
    if self._static_event_ndims == 0:
      if self.validate_args:
        is_positive = check_ops.assert_positive(
            x, message="All elements must be positive.")
        x = control_flow_ops.with_dependencies([is_positive], x)
      return np.log(2.) + math_ops.log(x)

    diag = array_ops.matrix_diag_part(x)

    # We now ensure diag is columnar. Eg, if `diag = [1, 2, 3]` then the output
    # is `[[1], [2], [3]]` and if `diag = [[1, 2, 3], [4, 5, 6]]` then the
    # output is unchanged.
    diag = self._make_columnar(diag)

    if self.validate_args:
      is_matrix = check_ops.assert_rank_at_least(
          x, 2, message="Input must be a (batch of) matrix.")
      shape = array_ops.shape(x)
      is_square = check_ops.assert_equal(
          shape[-2], shape[-1],
          message="Input must be a (batch of) square matrix.")
      # Assuming lower-triangular means we only need check diag>0.
      is_positive_definite = check_ops.assert_positive(
          diag, message="Input must be positive definite.")
      x = control_flow_ops.with_dependencies(
          [is_matrix, is_square, is_positive_definite], x)

    # Create a vector equal to: [p, p-1, ..., 2, 1].
    if x.get_shape().ndims is None or x.get_shape()[-1].value is None:
      p_int = array_ops.shape(x)[-1]
      p_float = math_ops.cast(p_int, dtype=x.dtype)
    else:
      p_int = x.get_shape()[-1].value
      p_float = np.array(p_int, dtype=x.dtype.as_numpy_dtype)
    exponents = math_ops.linspace(p_float, 1., p_int)

    sum_weighted_log_diag = array_ops.squeeze(
        math_ops.matmul(math_ops.log(diag),
                        exponents[..., array_ops.newaxis]),
        squeeze_dims=-1)
    fldj = p_float * np.log(2.) + sum_weighted_log_diag

    return fldj
开发者ID:Immexxx,项目名称:tensorflow,代码行数:84,代码来源:cholesky_outer_product_impl.py


示例19: ais_chain


#.........这里部分代码省略.........
  Args:
    n_iterations: Integer number of Markov chain updates to run. More
      iterations means more expense, but smoother annealing between q
      and p, which in turn means exponentially lower variance for the
      normalizing constant estimator.
    step_size: Scalar step size or array of step sizes for the
      leapfrog integrator. Broadcasts to the shape of
      `initial_x`. Larger step sizes lead to faster progress, but
      too-large step sizes make rejection exponentially more likely.
      When possible, it's often helpful to match per-variable step
      sizes to the standard deviations of the target distribution in
      each variable.
    n_leapfrog_steps: Integer number of steps to run the leapfrog
      integrator for. Total progress per HMC step is roughly
      proportional to step_size * n_leapfrog_steps.
    initial_x: Tensor of initial state(s) of the Markov chain(s). Must
      be a sample from q, or results will be incorrect.
    target_log_prob_fn: Python callable which takes an argument like `initial_x`
      and returns its (possibly unnormalized) log-density under the target
      distribution.
    proposal_log_prob_fn: Python callable that returns the log density of the
      initial distribution.
    event_dims: List of dimensions that should not be treated as
      independent. This allows for multiple chains to be run independently
      in parallel. Default is (), i.e., all dimensions are independent.
    name: Python `str` name prefixed to Ops created by this function.

  Returns:
    ais_weights: Tensor with the estimated weight(s). Has shape matching
      `target_log_prob_fn(initial_x)`.
    chain_states: Tensor with the state(s) of the Markov chain(s) the final
      iteration. Has shape matching `initial_x`.
    acceptance_probs: Tensor with the acceptance probabilities for the final
      iteration. Has shape matching `target_log_prob_fn(initial_x)`.

  #### Examples:

  ```python
  # Estimating the normalizing constant of a log-gamma distribution:
  def proposal_log_prob(x):
    # Standard normal log-probability. This is properly normalized.
    return tf.reduce_sum(-0.5 * tf.square(x) - 0.5 * np.log(2 * np.pi), 1)
  def target_log_prob(x):
    # Unnormalized log-gamma(2, 3) distribution.
    # True normalizer is (lgamma(2) - 2 * log(3)) * x.shape[1]
    return tf.reduce_sum(2. * x - 3. * tf.exp(x), 1)
  # Run 100 AIS chains in parallel
  initial_x = tf.random_normal([100, 20])
  w, _, _ = hmc.ais_chain(1000, 0.2, 2, initial_x, target_log_prob,
                          proposal_log_prob, event_dims=[1])
  log_normalizer_estimate = tf.reduce_logsumexp(w) - np.log(100)
  ```

  ```python
  # Estimating the marginal likelihood of a Bayesian regression model:
  base_measure = -0.5 * np.log(2 * np.pi)
  def proposal_log_prob(x):
    # Standard normal log-probability. This is properly normalized.
    return tf.reduce_sum(-0.5 * tf.square(x) + base_measure, 1)
  def regression_log_joint(beta, x, y):
    # This function returns a vector whose ith element is log p(beta[i], y | x).
    # Each row of beta corresponds to the state of an independent Markov chain.
    log_prior = tf.reduce_sum(-0.5 * tf.square(beta) + base_measure, 1)
    means = tf.matmul(beta, x, transpose_b=True)
    log_likelihood = tf.reduce_sum(-0.5 * tf.square(y - means) +
                                   base_measure, 1)
    return log_prior + log_likelihood
  def log_joint_partial(beta):
    return regression_log_joint(beta, x, y)
  # Run 100 AIS chains in parallel
  initial_beta = tf.random_normal([100, x.shape[1]])
  w, beta_samples, _ = hmc.ais_chain(1000, 0.1, 2, initial_beta,
                                     log_joint_partial, proposal_log_prob,
                                     event_dims=[1])
  log_normalizer_estimate = tf.reduce_logsumexp(w) - np.log(100)
  ```
  """
  with ops.name_scope(name, 'hmc_ais_chain',
                      [n_iterations, step_size, n_leapfrog_steps, initial_x]):
    non_event_shape = array_ops.shape(target_log_prob_fn(initial_x))

    beta_series = math_ops.linspace(0., 1., n_iterations+1)[1:]
    def _body(a, beta):  # pylint: disable=missing-docstring
      def log_prob_beta(x):
        return ((1 - beta) * proposal_log_prob_fn(x) +
                beta * target_log_prob_fn(x))
      last_x = a[0]
      w = a[2]
      w += (1. / n_iterations) * (target_log_prob_fn(last_x) -
                                  proposal_log_prob_fn(last_x))
      # TODO(b/66917083): There's an opportunity for gradient reuse here.
      updated_x, acceptance_probs, _, _ = kernel(step_size, n_leapfrog_steps,
                                                 last_x, log_prob_beta,
                                                 event_dims)
      return updated_x, acceptance_probs, w

    x, acceptance_probs, w = functional_ops.scan(
        _body, beta_series, (initial_x, array_ops.zeros(non_event_shape),
                             array_ops.zeros(non_event_shape)))
  return w[-1], x[-1], acceptance_probs[-1]
开发者ID:Crazyonxh,项目名称:tensorflow,代码行数:101,代码来源:hmc_impl.py


示例20: _sin_fn

 def _sin_fn(x):
   ranger = math_ops.linspace(
       array_ops.reshape(x[0], []), (sequence_length - 1) * increment,
       sequence_length + 1)
   return math_ops.sin(ranger)
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:5,代码来源:dynamic_rnn_estimator_test.py



注:本文中的tensorflow.python.ops.math_ops.linspace函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python math_ops.log函数代码示例发布时间:2022-05-27
下一篇:
Python math_ops.lgamma函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap