• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensorflow.reduce_logsumexp函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.reduce_logsumexp函数的典型用法代码示例。如果您正苦于以下问题:Python reduce_logsumexp函数的具体用法?Python reduce_logsumexp怎么用?Python reduce_logsumexp使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了reduce_logsumexp函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: while_step

 def while_step(t, rnn_state, tas, accs):
   """Implements one timestep of IWAE computation."""
   log_weights_acc, kl_acc = accs
   cur_inputs, cur_mask = nested.read_tas([inputs_ta, mask_ta], t)
   # Run the cell for one step.
   log_q_z, log_p_z, log_p_x_given_z, kl, new_state = cell(
       cur_inputs,
       rnn_state,
       cur_mask,
   )
   # Compute the incremental weight and use it to update the current
   # accumulated weight.
   kl_acc += kl * cur_mask
   log_alpha = (log_p_x_given_z + log_p_z - log_q_z) * cur_mask
   log_alpha = tf.reshape(log_alpha, [num_samples, batch_size])
   log_weights_acc += log_alpha
   # Calculate the effective sample size.
   ess_num = 2 * tf.reduce_logsumexp(log_weights_acc, axis=0)
   ess_denom = tf.reduce_logsumexp(2 * log_weights_acc, axis=0)
   log_ess = ess_num - ess_denom
   # Update the  Tensorarrays and accumulators.
   ta_updates = [log_weights_acc, log_ess]
   new_tas = [ta.write(t, x) for ta, x in zip(tas, ta_updates)]
   new_accs = (log_weights_acc, kl_acc)
   return t + 1, new_state, new_tas, new_accs
开发者ID:ALISCIFP,项目名称:models,代码行数:25,代码来源:bounds.py


示例2: ess_criterion

def ess_criterion(log_weights, unused_t):
  """A criterion that resamples based on effective sample size."""
  num_particles = tf.shape(log_weights)[0]
  # Calculate the effective sample size.
  ess_num = 2 * tf.reduce_logsumexp(log_weights, axis=0)
  ess_denom = tf.reduce_logsumexp(2 * log_weights, axis=0)
  log_ess = ess_num - ess_denom
  return log_ess <= tf.log(tf.to_float(num_particles) / 2.0)
开发者ID:812864539,项目名称:models,代码行数:8,代码来源:smc.py


示例3: __init__

    def __init__(self, env_spec, expert_trajs=None,
                 discrim_arch=relu_net,
                 discrim_arch_args={},
                 score_using_discrim=False,
                 l2_reg=0,
                 name='gcl'):
        super(AIRLDiscrete, self).__init__()
        self.dO = env_spec.observation_space.flat_dim
        self.dU = env_spec.action_space.flat_dim
        self.score_using_discrim = score_using_discrim
        if expert_trajs:
            self.expert_trajs = expert_trajs
            self.expert_trajs_extracted = self.extract_paths(expert_trajs)

        # build energy model
        with tf.variable_scope(name) as _vs:
            # Should be batch_size x T x dO/dU
            self.obs_t = tf.placeholder(tf.float32, [None, self.dO], name='obs')
            self.act_t = tf.placeholder(tf.float32, [None, self.dU], name='act')
            self.labels = tf.placeholder(tf.float32, [None, 1], name='labels')
            self.lprobs = tf.placeholder(tf.float32, [None, 1], name='log_probs')
            self.lr = tf.placeholder(tf.float32, (), name='lr')

            obs_act = tf.concat([self.obs_t, self.act_t], axis=1)
            with tf.variable_scope('discrim') as dvs:
                with tf.variable_scope('energy'):
                    energy = discrim_arch(obs_act, dout=self.dU, **discrim_arch_args)

                self.value_fn = tf.reduce_logsumexp(-energy, axis=1, keep_dims=True)
                self.energy = tf.reduce_sum(energy*self.act_t, axis=1, keep_dims=True)  # select action

                log_p_tau = -self.energy - self.value_fn  
                discrim_vars = tf.get_collection('reg_vars', scope=dvs.name)


            log_q_tau = self.lprobs

            if l2_reg > 0:
                reg_loss = l2_reg*tf.reduce_sum([tf.reduce_sum(tf.square(var)) for var in discrim_vars])
            else:
                reg_loss = 0

            log_pq = tf.reduce_logsumexp([log_p_tau, log_q_tau], axis=0)
            self.d_tau = tf.exp(log_p_tau-log_pq)
            cent_loss = -tf.reduce_mean(self.labels*(log_p_tau-log_pq) + (1-self.labels)*(log_q_tau-log_pq))

            self.loss = cent_loss + reg_loss
            self.step = tf.train.AdamOptimizer(learning_rate=self.lr).minimize(self.loss)
            self._make_param_ops(_vs)
开发者ID:saadmahboob,项目名称:inverse_rl,代码行数:49,代码来源:imitation_learning.py


示例4: testCrfLogNorm

  def testCrfLogNorm(self):
    inputs = np.array(
        [[4, 5, -3], [3, -1, 3], [-1, 2, 1], [0, 0, 0]], dtype=np.float32)
    transition_params = np.array(
        [[-3, 5, -2], [3, 4, 1], [1, 2, 1]], dtype=np.float32)
    num_words = inputs.shape[0]
    num_tags = inputs.shape[1]
    sequence_lengths = np.array(3, dtype=np.int32)
    with self.test_session() as sess:
      all_sequence_scores = []

      # Compare the dynamic program with brute force computation.
      for tag_indices in itertools.product(
          range(num_tags), repeat=sequence_lengths):
        tag_indices = list(tag_indices)
        tag_indices.extend([0] * (num_words - sequence_lengths))
        all_sequence_scores.append(
            tf.contrib.crf.crf_sequence_score(
                inputs=tf.expand_dims(inputs, 0),
                tag_indices=tf.expand_dims(tag_indices, 0),
                sequence_lengths=tf.expand_dims(sequence_lengths, 0),
                transition_params=tf.constant(transition_params)))

      brute_force_log_norm = tf.reduce_logsumexp(all_sequence_scores)
      log_norm = tf.contrib.crf.crf_log_norm(
          inputs=tf.expand_dims(inputs, 0),
          sequence_lengths=tf.expand_dims(sequence_lengths, 0),
          transition_params=tf.constant(transition_params))
      log_norm = tf.squeeze(log_norm, [0])
      tf_brute_force_log_norm, tf_log_norm = sess.run(
          [brute_force_log_norm, log_norm])

      self.assertAllClose(tf_log_norm, tf_brute_force_log_norm)
开发者ID:821760408-sp,项目名称:tensorflow,代码行数:33,代码来源:crf_test.py


示例5: _log_prob

 def _log_prob(self, x):
   with tf.control_dependencies(self._runtime_assertions):
     x = self._pad_sample_dims(x)
     log_prob_x = self.components_distribution.log_prob(x)  # [S, B, k]
     log_mix_prob = tf.nn.log_softmax(
         self.mixture_distribution.logits, axis=-1)  # [B, k]
     return tf.reduce_logsumexp(log_prob_x + log_mix_prob, axis=-1)  # [S, B]
开发者ID:lewisKit,项目名称:probability,代码行数:7,代码来源:mixture_same_family.py


示例6: _log_variance

 def _log_variance(self):
   # Following calculation is based on law of total variance:
   #
   # Var[Z] = E[Var[Z | V]] + Var[E[Z | V]]
   #
   # where,
   #
   # Z|v ~ interpolate_affine[v](distribution)
   # V ~ mixture_distribution
   #
   # thus,
   #
   # E[Var[Z | V]] = sum{ prob[d] Var[d] : d=0, ..., deg-1 }
   # Var[E[Z | V]] = sum{ prob[d] (Mean[d] - Mean)**2 : d=0, ..., deg-1 }
   v = tf.stack(
       [
           # log(self.distribution.variance()) = log(Var[d]) = log(rate[d])
           self.distribution.log_rate,
           # log((Mean[d] - Mean)**2)
           2. * tf.log(
               tf.abs(self.distribution.mean() -
                      self._mean()[..., tf.newaxis])),
       ],
       axis=-1)
   return tf.reduce_logsumexp(
       self.mixture_distribution.logits[..., tf.newaxis] + v, axis=[-2, -1])
开发者ID:asudomoeva,项目名称:probability,代码行数:26,代码来源:poisson_lognormal.py


示例7: testCrfLogLikelihood

  def testCrfLogLikelihood(self):
    inputs = np.array(
        [[4, 5, -3], [3, -1, 3], [-1, 2, 1], [0, 0, 0]], dtype=np.float32)
    transition_params = np.array(
        [[-3, 5, -2], [3, 4, 1], [1, 2, 1]], dtype=np.float32)
    sequence_lengths = np.array(3, dtype=np.int32)
    num_words = inputs.shape[0]
    num_tags = inputs.shape[1]
    with self.test_session() as sess:
      all_sequence_log_likelihoods = []

      # Make sure all probabilities sum to 1.
      for tag_indices in itertools.product(
          range(num_tags), repeat=sequence_lengths):
        tag_indices = list(tag_indices)
        tag_indices.extend([0] * (num_words - sequence_lengths))
        sequence_log_likelihood, _ = tf.contrib.crf.crf_log_likelihood(
            inputs=tf.expand_dims(inputs, 0),
            tag_indices=tf.expand_dims(tag_indices, 0),
            sequence_lengths=tf.expand_dims(sequence_lengths, 0),
            transition_params=tf.constant(transition_params))
        all_sequence_log_likelihoods.append(sequence_log_likelihood)
      total_log_likelihood = tf.reduce_logsumexp(all_sequence_log_likelihoods)
      tf_total_log_likelihood = sess.run(total_log_likelihood)
      self.assertAllClose(tf_total_log_likelihood, 0.0)
开发者ID:821760408-sp,项目名称:tensorflow,代码行数:25,代码来源:crf_test.py


示例8: log_alpha_likelihood_ratio

  def log_alpha_likelihood_ratio(self, activation_fn=tf.nn.relu):

    # each nn sample returns (log f, log likelihoods)
    nn_samples = [
        self.sample_neural_network(activation_fn)
        for _ in range(self.num_mc_nn_samples)
    ]
    nn_log_f_samples = [elt[0] for elt in nn_samples]
    nn_log_lk_samples = [elt[1] for elt in nn_samples]

    # we stack the (log f, log likelihoods) from the k nn samples
    nn_log_f_stack = tf.stack(nn_log_f_samples)      # k x 1
    nn_log_lk_stack = tf.stack(nn_log_lk_samples)    # k x N
    nn_f_tile = tf.tile(nn_log_f_stack, [self.batch_size])
    nn_f_tile = tf.reshape(nn_f_tile,
                           [self.num_mc_nn_samples, self.batch_size])

    # now both the log f and log likelihood terms have shape: k x N
    # apply formula in https://www.overleaf.com/12837696kwzjxkyhdytk#/49028744/
    nn_log_ratio = nn_log_lk_stack - nn_f_tile
    nn_log_ratio = self.alpha * tf.transpose(nn_log_ratio)
    logsumexp_value = tf.reduce_logsumexp(nn_log_ratio, -1)
    log_k_scalar = tf.log(tf.cast(self.num_mc_nn_samples, tf.float32))
    log_k = log_k_scalar * tf.ones([self.batch_size])

    return tf.reduce_sum(logsumexp_value - log_k, -1)
开发者ID:812864539,项目名称:models,代码行数:26,代码来源:bb_alpha_divergence_model.py


示例9: _assert_valid_sample

 def _assert_valid_sample(self, x):
   if not self.validate_args:
     return x
   return control_flow_ops.with_dependencies([
       tf.assert_non_positive(x),
       tf.assert_near(
           tf.zeros([], dtype=self.dtype), tf.reduce_logsumexp(x, axis=[-1])),
   ], x)
开发者ID:asudomoeva,项目名称:probability,代码行数:8,代码来源:relaxed_onehot_categorical.py


示例10: log_prob

 def log_prob(self, x):
   n1 = tf.contrib.distributions.Normal(self.mu, self.sigma1)
   n2 = tf.contrib.distributions.Normal(self.mu, self.sigma2)
   mix1 = tf.reduce_sum(n1.log_prob(x), -1) + tf.log(self.pi)
   mix2 = tf.reduce_sum(n2.log_prob(x), -1) + tf.log(np.float32(1.0 - self.pi))
   prior_mix = tf.stack([mix1, mix2])
   lse_mix = tf.reduce_logsumexp(prior_mix, [0])
   return tf.reduce_sum(lse_mix)
开发者ID:geniusjiqing,项目名称:sonnet,代码行数:8,代码来源:brnn_ptb.py


示例11: eval_func

 def eval_func(f):
     feval = f(*Xs, **Ys)  # f should be elementwise: return shape N x H**Din
     if logspace:
         log_gh_w = np.log(gh_w.reshape(1, -1))
         result = tf.reduce_logsumexp(feval + log_gh_w, axis=1)
     else:
         result = tf.matmul(feval, gh_w.reshape(-1, 1))
     return tf.reshape(result, shape)
开发者ID:sanket-kamthe,项目名称:GPflow,代码行数:8,代码来源:quadrature.py


示例12: reduce_logmeanexp

def reduce_logmeanexp(input_tensor, axis=None, keep_dims=False):
  logsumexp = tf.reduce_logsumexp(input_tensor, axis, keep_dims)
  input_tensor = tf.convert_to_tensor(input_tensor)
  n = input_tensor.shape.as_list()
  if axis is None:
    n = tf.cast(tf.reduce_prod(n), logsumexp.dtype)
  else:
    n = tf.cast(tf.reduce_prod(n[axis]), logsumexp.dtype)

  return -tf.log(n) + logsumexp
开发者ID:JoyceYa,项目名称:edward,代码行数:10,代码来源:iwvi.py


示例13: neg_log_likelihood

 def neg_log_likelihood(state):
   state_ext = tf.expand_dims(state, 0)
   linear_part = tf.matmul(state_ext, x_data)
   linear_part_ex = tf.stack([tf.zeros_like(linear_part),
                              linear_part], axis=0)
   term1 = tf.squeeze(tf.matmul(
       tf.reduce_logsumexp(linear_part_ex, axis=0), y_data), -1)
   term2 = (0.5 * tf.reduce_sum(state_ext * state_ext, -1) -
            tf.reduce_sum(linear_part, -1))
   return  tf.squeeze(term1 + term2)
开发者ID:asudomoeva,项目名称:probability,代码行数:10,代码来源:bfgs_test.py


示例14: get_KL_divergence_Sample

def get_KL_divergence_Sample(shape, mu, sigma, prior, Z):
    
    """
    Compute KL divergence between posterior and prior.
    Instead of computing the real KL distance between the Prior and Variatiational
    posterior of the weights, we will jsut sample its value of the specific values
    of the sampled weights  W. 
    
    In this case:
        - Posterior: Multivariate Independent Gaussian.
        - Prior: Mixture model
    
    The sample of the posterior is:
        KL_sample = log(q(W|theta)) - log(p(W|theta_0)) where
         p(theta) = pi*N(0,sigma1) + (1-pi)*N(0,sigma2)
    
    Input:
        - mus,sigmas: 
        - Z: Samples weights values, the hidden variables !
    shape = shape of the sample we want to compute the KL of
    mu = the mu variable used when sampling
    sigma= the sigma variable used when sampling
    prior = the prior object with parameters
    sample = the sample from the posterior
    
    """
    
    # Flatten the hidden variables (weights)
    Z = tf.reshape(Z, [-1])
    
    #Get the log probability distribution of your sampled variable
    
    # Distribution of the Variational Posterior
    VB_distribution = Normal(mu, sigma)
    # Distribution of the Gaussian Components of the prior
    prior_1_distribution = Normal(0.0, prior.sigma1)
    prior_2_distribution = Normal(0.0, prior.sigma2)
    
    # Now we compute the log likelihood of those Hidden variables for their
    # prior and posterior.
    
    #get: sum( log[ q( theta | mu, sigma ) ] )
    q_ll = tf.reduce_sum(VB_distribution.log_prob(Z))
    
    #get: sum( log[ p( theta ) ] ) for mixture prior
    mix1 = tf.reduce_sum(prior_1_distribution.log_prob(Z)) + tf.log(prior.pi_mix)
    mix2 = tf.reduce_sum(prior_2_distribution.log_prob(Z)) + tf.log(1.0 - prior.pi_mix)
    p_ll = tf.reduce_logsumexp([mix1,mix2])
    
    #Compute the sample of the KL distance as the substaction ob both
    KL = q_ll -  p_ll
    
    return KL
开发者ID:manuwhs,项目名称:Trapyng,代码行数:53,代码来源:Variational_inferences_lib_tf.py


示例15: _log_cdf

 def _log_cdf(self, x):
   with tf.control_dependencies(self._assertions):
     x = tf.convert_to_tensor(x, name="x")
     distribution_log_cdfs = [d.log_cdf(x) for d in self.components]
     cat_log_probs = self._cat_probs(log_probs=True)
     final_log_cdfs = [
         cat_lp + d_lcdf
         for (cat_lp, d_lcdf) in zip(cat_log_probs, distribution_log_cdfs)
     ]
     concatted_log_cdfs = tf.stack(final_log_cdfs, axis=0)
     mixture_log_cdf = tf.reduce_logsumexp(concatted_log_cdfs, [0])
     return mixture_log_cdf
开发者ID:lewisKit,项目名称:probability,代码行数:12,代码来源:mixture.py


示例16: _log_prob

 def _log_prob(self, x):
   with tf.control_dependencies(self._assertions):
     x = tf.convert_to_tensor(x, name="x")
     distribution_log_probs = [d.log_prob(x) for d in self.components]
     cat_log_probs = self._cat_probs(log_probs=True)
     final_log_probs = [
         cat_lp + d_lp
         for (cat_lp, d_lp) in zip(cat_log_probs, distribution_log_probs)
     ]
     concat_log_probs = tf.stack(final_log_probs, 0)
     log_sum_exp = tf.reduce_logsumexp(concat_log_probs, [0])
     return log_sum_exp
开发者ID:lewisKit,项目名称:probability,代码行数:12,代码来源:mixture.py


示例17: marginal_log_prob

  def marginal_log_prob(self, x, **kwargs):
    'The marginal log probability of the observed variable. Sums out `cat`.'
    batch_event_rank = self.event_shape.ndims + self.batch_shape.ndims
    # expand x to broadcast log probs over num_components dimension
    expanded_x = tf.expand_dims(x, -1 - batch_event_rank)
    log_probs = self.components.log_prob(expanded_x)

    p_ndims = self.cat.probs.shape.ndims
    perm = tf.concat([[p_ndims - 1], tf.range(p_ndims - 1)], 0)
    transposed_p = tf.transpose(self.cat.probs, perm)

    return tf.reduce_logsumexp(log_probs + tf.log(transposed_p),
                               -1 - batch_event_rank)
开发者ID:ekostem,项目名称:edward,代码行数:13,代码来源:param_mixture.py


示例18: _forward_log_det_jacobian

 def _forward_log_det_jacobian(self, x):
   # This code is similar to tf.nn.log_softmax but different because we have
   # an implicit zero column to handle. I.e., instead of:
   #   reduce_sum(logits - reduce_sum(exp(logits), dim))
   # we must do:
   #   log_normalization = 1 + reduce_sum(exp(logits))
   #   -log_normalization + reduce_sum(logits - log_normalization)
   log_normalization = tf.nn.softplus(
       tf.reduce_logsumexp(x, axis=-1, keep_dims=True))
   return tf.squeeze(
       (-log_normalization + tf.reduce_sum(
           x - log_normalization, axis=-1, keepdims=True)),
       axis=-1)
开发者ID:asudomoeva,项目名称:probability,代码行数:13,代码来源:softmax_centered.py


示例19: while_step

  def while_step(t, state, tas, log_weights_acc, log_z_hat_acc):
    """Implements one timestep of the particle filter."""
    particle_state, loop_state = state
    cur_mask = nested.read_tas(mask_ta, t)
    # Propagate the particles one step.
    log_alpha, new_particle_state, loop_args = transition(particle_state, t)
    # Update the current weights with the incremental weights.
    log_alpha *= cur_mask
    log_alpha = tf.reshape(log_alpha, [num_particles, batch_size])
    log_weights_acc += log_alpha

    should_resample = resampling_criterion(log_weights_acc, t)

    if resampling_criterion == never_resample_criterion:
      resampled = tf.to_float(should_resample)
    else:
      # Compute the states as if we did resample.
      resampled_states = resampling_fn(
          log_weights_acc,
          new_particle_state,
          num_particles,
          batch_size)
      # Decide whether or not we should resample; don't resample if we are past
      # the end of a sequence.
      should_resample = tf.logical_and(should_resample,
                                       cur_mask[:batch_size] > 0.)
      float_should_resample = tf.to_float(should_resample)
      new_particle_state = nested.where_tensors(
          tf.tile(should_resample, [num_particles]),
          resampled_states,
          new_particle_state)
      resampled = float_should_resample

    new_loop_state = loop_fn(loop_state, loop_args, new_particle_state,
                             log_weights_acc, resampled, cur_mask, t)
    # Update log Z hat.
    log_z_hat_update = tf.reduce_logsumexp(
        log_weights_acc, axis=0) - tf.log(tf.to_float(num_particles))
    # If it is the last timestep, always add the update.
    log_z_hat_acc += tf.cond(t < max_num_steps - 1,
                             lambda: log_z_hat_update * resampled,
                             lambda: log_z_hat_update)
    # Update the TensorArrays before we reset the weights so that we capture
    # the incremental weights and not zeros.
    ta_updates = [log_weights_acc, resampled]
    new_tas = [ta.write(t, x) for ta, x in zip(tas, ta_updates)]
    # For the particle filters that resampled, reset weights to zero.
    log_weights_acc *= (1. - tf.tile(resampled[tf.newaxis, :],
                                     [num_particles, 1]))
    new_state = (new_particle_state, new_loop_state)
    return t + 1, new_state, new_tas, log_weights_acc, log_z_hat_acc
开发者ID:812864539,项目名称:models,代码行数:51,代码来源:smc.py


示例20: _log_prob

  def _log_prob(self, y):
    # For caching to work, it is imperative that the bijector is the first to
    # modify the input.
    x = self.bijector.inverse(y)
    event_ndims = self._maybe_get_static_event_ndims()

    ildj = self.bijector.inverse_log_det_jacobian(y, event_ndims=event_ndims)
    if self.bijector._is_injective:  # pylint: disable=protected-access
      return self._finish_log_prob_for_one_fiber(y, x, ildj, event_ndims)

    lp_on_fibers = [
        self._finish_log_prob_for_one_fiber(y, x_i, ildj_i, event_ndims)
        for x_i, ildj_i in zip(x, ildj)]
    return tf.reduce_logsumexp(tf.stack(lp_on_fibers), axis=0)
开发者ID:asudomoeva,项目名称:probability,代码行数:14,代码来源:transformed_distribution.py



注:本文中的tensorflow.reduce_logsumexp函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.reduce_max函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.reduce_any函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap