• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensorflow.reduce_prod函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.reduce_prod函数的典型用法代码示例。如果您正苦于以下问题:Python reduce_prod函数的具体用法?Python reduce_prod怎么用?Python reduce_prod使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了reduce_prod函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: U_t_variance

def U_t_variance(timestep_outputs_matrix, total_timesteps, gamma = 5):

	with tf.op_scope(timestep_outputs_matrix + total_timesteps + gamma, "U_t_variance"):

		G_i_matrix = G_i_piecewise_variance(timestep_outputs_matrix, total_timesteps)
		tf.mul(timestep_outputs_matrix, )
		tf.reduce_prod(timestep_outputs_matrix_with_g)
开发者ID:dmakian,项目名称:TwitchRNNBot,代码行数:7,代码来源:decoding_enhanced.py


示例2: calculate_reshape

def calculate_reshape(original_shape, new_shape, validate=False, name=None):
  """Calculates the reshaped dimensions (replacing up to one -1 in reshape)."""
  batch_shape_static = tensor_util.constant_value_as_shape(new_shape)
  if batch_shape_static.is_fully_defined():
    return np.int32(batch_shape_static.as_list()), batch_shape_static, []
  with tf.name_scope(name, "calculate_reshape", [original_shape, new_shape]):
    original_size = tf.reduce_prod(original_shape)
    implicit_dim = tf.equal(new_shape, -1)
    size_implicit_dim = (
        original_size // tf.maximum(1, -tf.reduce_prod(new_shape)))
    new_ndims = tf.shape(new_shape)
    expanded_new_shape = tf.where(  # Assumes exactly one `-1`.
        implicit_dim, tf.fill(new_ndims, size_implicit_dim), new_shape)
    validations = [] if not validate else [
        tf.assert_rank(
            original_shape, 1, message="Original shape must be a vector."),
        tf.assert_rank(new_shape, 1, message="New shape must be a vector."),
        tf.assert_less_equal(
            tf.count_nonzero(implicit_dim, dtype=tf.int32),
            1,
            message="At most one dimension can be unknown."),
        tf.assert_positive(
            expanded_new_shape, message="Shape elements must be >=-1."),
        tf.assert_equal(
            tf.reduce_prod(expanded_new_shape),
            original_size,
            message="Shape sizes do not match."),
    ]
    return expanded_new_shape, batch_shape_static, validations
开发者ID:lewisKit,项目名称:probability,代码行数:29,代码来源:batch_reshape.py


示例3: build_psi_stats_rbf_plus_linear

def build_psi_stats_rbf_plus_linear(Z, kern, mu, S):
    # TODO: make sure the acvite dimensions are overlapping completely

    # use only active dimensions
    mu, S = kern._slice(mu, S)  # only use the active dimensions.
    Z, _ = kern._slice(Z, None)

    psi0_lin, psi1_lin, psi2_lin = build_psi_stats_linear(Z, kern.linear, mu, S)
    psi0_rbf, psi1_rbf, psi2_rbf = build_psi_stats_rbf(Z, kern.rbf, mu, S)
    psi0, psi1, psi2 = psi0_lin + psi0_rbf, psi1_lin + psi1_rbf, psi2_lin + psi2_rbf

    # extra terms for the 'interaction' of linear and rbf
    l2 = tf.square(kern.rbf.lengthscales)
    A = tf.expand_dims(1./S + 1./l2, 1)  # N x 1 x Q
    m = (tf.expand_dims(mu/S, 1) + tf.expand_dims(Z/l2, 0)) / A  # N x M x Q
    mTAZ = tf.reduce_sum(tf.expand_dims(m * kern.linear.variance, 1) *
                         tf.expand_dims(tf.expand_dims(Z, 0), 0), 3)  # N x M x M
    Z2 = tf.reduce_sum(tf.square(Z) / l2, 1)  # M,
    mu2 = tf.reduce_sum(tf.square(mu) / S, 1)  # N
    mAm = tf.reduce_sum(tf.square(m) * A, 2)  # N x M
    exp_term = tf.exp(-(tf.reshape(Z2, (1, -1)) + tf.reshape(mu2, (-1, 1))-mAm) / 2.)  # N x M
    psi2_extra = tf.reduce_sum(kern.rbf.variance *
                               tf.expand_dims(exp_term, 2) *
                               tf.expand_dims(tf.expand_dims(tf.reduce_prod(S, 1), 1), 2) *
                               tf.expand_dims(tf.reduce_prod(A, 2), 1) *
                               mTAZ, 0)

    psi2 = psi2 + psi2_extra + tf.transpose(psi2_extra)
    return psi0, psi1, psi2
开发者ID:blutooth,项目名称:dgp,代码行数:29,代码来源:kernel_expectations.py


示例4: testGradient

  def testGradient(self):
    s = [2, 3, 4, 2]
    # NOTE(kearnes): divide by 20 so product is a reasonable size
    x = np.arange(1.0, 49.0).reshape(s).astype(np.float32) / 20.
    with self.test_session():
      t = tf.convert_to_tensor(x)

      su = tf.reduce_prod(t, [])
      jacob_t, jacob_n = gradient_checker.ComputeGradient(
          t, s, su, [2, 3, 4, 2], x_init_value=x, delta=1)
      self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)

      su = tf.reduce_prod(t, [1, 2])
      jacob_t, jacob_n = gradient_checker.ComputeGradient(
          t, s, su, [2, 2], x_init_value=x, delta=1)
      self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)

      su = tf.reduce_prod(t, [0, 1, 2, 3])
      jacob_t, jacob_n = gradient_checker.ComputeGradient(
          t, s, su, [1], x_init_value=x, delta=1)
      self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)

    # NOTE(kearnes): the current gradient calculation gives NaNs for 0 inputs
    x = np.arange(0.0, 48.0).reshape(s).astype(np.float32) / 20.
    with self.test_session():
      t = tf.convert_to_tensor(x)
      su = tf.reduce_prod(t, [])
      jacob_t, _ = gradient_checker.ComputeGradient(
          t, s, su, [2, 3, 4, 2], x_init_value=x, delta=1)
      with self.assertRaisesOpError("Tensor had NaN values"):
        tf.check_numerics(jacob_t, message="_ProdGrad NaN test").op.run()
开发者ID:debaratidas1994,项目名称:tensorflow,代码行数:31,代码来源:reduction_ops_test.py


示例5: tf_bivariate_normal

def tf_bivariate_normal(y, mu, sigma, rho, n_mixtures, batch_size):
    mu = tf.verify_tensor_all_finite(mu, "Mu not finite!")
    y = tf.verify_tensor_all_finite(y, "Y not finite!")
    delta = tf.sub(tf.tile(tf.expand_dims(y, 1), [1, n_mixtures, 1]), mu)
    delta = tf.verify_tensor_all_finite(delta, "Delta not finite!")
    sigma = tf.verify_tensor_all_finite(sigma, "Sigma not finite!")
    s = tf.reduce_prod(sigma, 2)
    s = tf.verify_tensor_all_finite(s, "S not finite!")
    # -1 <= rho <= 1
    z = tf.reduce_sum(tf.square(tf.div(delta, sigma + epsilon) + epsilon), 2) - \
        2 * tf.div(tf.mul(rho, tf.reduce_prod(delta, 2)), s + epsilon)
    
    z = tf.verify_tensor_all_finite(z, "Z not finite!")
    # 0 < negRho <= 1
    rho = tf.verify_tensor_all_finite(rho, "rho in bivariate normal not finite!")
    negRho = tf.clip_by_value(1 - tf.square(rho), epsilon, 1.0)
    negRho = tf.verify_tensor_all_finite(negRho, "negRho not finite!")
    # Note that if negRho goes near zero, or z goes really large, this explodes.
    negRho = tf.verify_tensor_all_finite(negRho, "negRho in bivariate normal not finite!")
    
    result = tf.clip_by_value(tf.exp(tf.div(-z, 2 * negRho)), 1.0e-8, 1.0e8)
    result = tf.verify_tensor_all_finite(result, "Result in bivariate normal not finite!")
    denom = 2 * np.pi * tf.mul(s, tf.sqrt(negRho))
    denom = tf.verify_tensor_all_finite(denom, "Denom in bivariate normal not finite!")
    result = tf.clip_by_value(tf.div(result, denom + epsilon), epsilon, 1.0)
    result = tf.verify_tensor_all_finite(result, "Result2 in bivariate normal not finite!")
    return result, delta
开发者ID:cybercom-finland,项目名称:location_tracking_ml,代码行数:27,代码来源:model.py


示例6: disjunction_of_literals

def disjunction_of_literals(literals, label="no_label"):
    list_of_literal_tensors = [lit.tensor for lit in literals]
    literals_tensor = tf.concat(1,list_of_literal_tensors)
    if default_tnorm == "product":
        result = 1.0-tf.reduce_prod(1.0-literals_tensor, 1, keep_dims=True)
    if default_tnorm == "yager2":
        result = tf.minimum(1.0, tf.sqrt(tf.reduce_sum(tf.square(literals_tensor), 1, keep_dims=True)))
    if default_tnorm == "luk":
        print "data aggregator is lukas"
        result = tf.minimum(1.0, tf.reduce_sum(literals_tensor, 1, keep_dims=True))
        PR(result)
    if default_tnorm == "goedel":
        result = tf.reduce_max(literals_tensor, 1, keep_dims=True, name=label)
    if default_aggregator == "product":
        return tf.reduce_prod(result, keep_dims=True)
    if default_aggregator == "mean":
        print "data aggregator is mean"
        return tf.reduce_mean(result, keep_dims=True, name=label)
    if default_aggregator == "gmean":
        return tf.exp(tf.mul(tf.reduce_sum(tf.log(result), keep_dims=True),
                             tf.inv(tf.to_float(tf.size(result)))), name=label)
    if default_aggregator == "hmean":
        print "data aggregator is hmean"
        return tf.div(tf.to_float(tf.size(result)), tf.reduce_sum(tf.inv(result), keep_dims=True))
    if default_aggregator == "min":
        print "data aggregator is min"
        return tf.reduce_min(result, keep_dims=True, name=label)
    if default_aggregator == "qmean":
        print "data aggregator is qmean"
        return tf.sqrt(tf.reduce_mean(tf.square(result), keep_dims=True), name=label)
    if default_aggregator == "cmean":
        print "data aggregator is cmean"
        return tf.pow(tf.reduce_mean(tf.pow(result, 3), keep_dims=True), tf.inv(tf.to_float(3)), name=label)
开发者ID:ivanDonadello,项目名称:knowPic,代码行数:33,代码来源:logictensornetworks.py


示例7: _compareGradient

  def _compareGradient(self, x):
    with self.test_session():
      t = tf.convert_to_tensor(x)

      su = tf.reduce_prod(t, [])
      jacob_t, jacob_n = tf.test.compute_gradient(t,
                                                  x.shape,
                                                  su,
                                                  [2, 3, 4, 2],
                                                  x_init_value=x,
                                                  delta=1)
      self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)

      su = tf.reduce_prod(t, [1, 2])
      jacob_t, jacob_n = tf.test.compute_gradient(t,
                                                  x.shape,
                                                  su,
                                                  [2, 2],
                                                  x_init_value=x,
                                                  delta=1)
      self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)

      su = tf.reduce_prod(t, [0, 1, 2, 3])
      jacob_t, jacob_n = tf.test.compute_gradient(t,
                                                  x.shape,
                                                  su,
                                                  [1],
                                                  x_init_value=x,
                                                  delta=1)
      self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
开发者ID:2020zyc,项目名称:tensorflow,代码行数:30,代码来源:reduction_ops_test.py


示例8: test_docstring_example

  def test_docstring_example(self):
    # Produce the first 1000 members of the Halton sequence in 3 dimensions.
    num_results = 1000
    dim = 3
    with self.test_session():
      sample = tfp.mcmc.sample_halton_sequence(
          dim, num_results=num_results, randomized=False)

      # Evaluate the integral of x_1 * x_2^2 * x_3^3  over the three dimensional
      # hypercube.
      powers = tf.range(1., limit=dim + 1)
      integral = tf.reduce_mean(
          tf.reduce_prod(sample ** powers, axis=-1))
      true_value = 1. / tf.reduce_prod(powers + 1.)

      # Produces a relative absolute error of 1.7%.
      self.assertAllClose(integral.eval(), true_value.eval(), rtol=0.02)

      # Now skip the first 1000 samples and recompute the integral with the next
      # thousand samples. The sequence_indices argument can be used to do this.

      sequence_indices = tf.range(start=1000, limit=1000 + num_results,
                                  dtype=tf.int32)
      sample_leaped = tfp.mcmc.sample_halton_sequence(
          dim, sequence_indices=sequence_indices, randomized=False)

      integral_leaped = tf.reduce_mean(
          tf.reduce_prod(sample_leaped ** powers, axis=-1))
      self.assertAllClose(integral_leaped.eval(), true_value.eval(), rtol=0.05)
开发者ID:lewisKit,项目名称:probability,代码行数:29,代码来源:sample_halton_sequence_test.py


示例9: f_iou_box

def f_iou_box(top_left_a, bot_right_a, top_left_b, bot_right_b):
    """Computes IoU of boxes.

    Args:
        top_left_a: [B, T, 2] or [B, 2]
        bot_right_a: [B, T, 2] or [B, 2]
        top_left_b: [B, T, 2] or [B, 2]
        bot_right_b: [B, T, 2] or [B, 2]

    Returns:
        iou: [B, T]
    """
    inter_area = f_inter_box(top_left_a, bot_right_a, top_left_b, bot_right_b)
    inter_area = tf.maximum(inter_area, 1e-6)
    ndims = tf.shape(tf.shape(top_left_a))
    # area_a = tf.reduce_prod(bot_right_a - top_left_a, ndims - 1)
    # area_b = tf.reduce_prod(bot_right_b - top_left_b, ndims - 1)
    check_a = tf.reduce_prod(tf.to_float(top_left_a < bot_right_a), ndims - 1)
    area_a = check_a * tf.reduce_prod(bot_right_a - top_left_a, ndims - 1)
    check_b = tf.reduce_prod(tf.to_float(top_left_b < bot_right_b), ndims - 1)
    area_b = check_b * tf.reduce_prod(bot_right_b - top_left_b, ndims - 1)
    union_area = (area_a + area_b - inter_area + 1e-5)
    union_area = tf.maximum(union_area, 1e-5)
    iou = inter_area / union_area
    iou = tf.maximum(iou, 1e-5)
    iou = tf.minimum(iou, 1.0)

    return iou
开发者ID:lrjconan,项目名称:img-count,代码行数:28,代码来源:ris_model_base.py


示例10: logpdf

    def logpdf(self, x, mean=None, cov=1):
        """Log of the probability density function.

        Parameters
        ----------
        x : tf.Tensor
            A 1-D or 2-D tensor.
        mean : tf.Tensor, optional
            A 1-D tensor. Defaults to zero mean.
        cov : tf.Tensor, optional
            A 1-D or 2-D tensor. Defaults to identity matrix.

        Returns
        -------
        tf.Tensor
            A tensor of one dimension less than the input.
        """
        x = tf.cast(x, dtype=tf.float32)
        x_shape = get_dims(x)
        if len(x_shape) == 1:
            d = x_shape[0]
        else:
            d = x_shape[1]

        if mean is None:
            r = x
        else:
            mean = tf.cast(mean, dtype=tf.float32)
            r = x - mean

        if cov is 1:
            L_inv = tf.diag(tf.ones([d]))
            det_cov = tf.constant(1.0)
        else:
            cov = tf.cast(cov, dtype=tf.float32)
            if len(cov.get_shape()) == 1: # vector
                L_inv = tf.diag(1.0 / tf.sqrt(cov))
                det_cov = tf.reduce_prod(cov)
            else: # matrix
                L = tf.cholesky(cov)
                L_inv = tf.matrix_inverse(L)
                det_cov = tf.pow(tf.reduce_prod(tf.diag_part(L)), 2)

        lps = -0.5*d*tf.log(2*np.pi) - 0.5*tf.log(det_cov)
        if len(x_shape) == 1: # vector
            r = tf.reshape(r, shape=(d, 1))
            inner = tf.matmul(L_inv, r)
            lps -= 0.5 * tf.matmul(inner, inner, transpose_a=True)
            return tf.squeeze(lps)
        else: # matrix
            # TODO vectorize further
            out = []
            for r_vec in tf.unpack(r):
                r_vec = tf.reshape(r_vec, shape=(d, 1))
                inner = tf.matmul(L_inv, r_vec)
                out += [tf.squeeze(lps -
                        0.5 * tf.matmul(inner, inner, transpose_a=True))]

            return tf.pack(out)
开发者ID:TalkingData,项目名称:edward,代码行数:59,代码来源:distributions.py


示例11: _variance

  def _variance(self):
    with tf.control_dependencies(self._runtime_assertions):
      probs = self._marginal_hidden_probs()
      # probs :: num_steps batch_shape num_states
      means = self._observation_distribution.mean()
      # means :: observation_batch_shape[:-1] num_states
      #          observation_event_shape
      means_shape = tf.concat(
          [self.batch_shape_tensor(),
           [self._num_states],
           self._observation_distribution.event_shape_tensor()],
          axis=0)
      means = tf.broadcast_to(means, means_shape)
      # means :: batch_shape num_states observation_event_shape

      observation_event_shape = (
          self._observation_distribution.event_shape_tensor())
      batch_size = tf.reduce_prod(self.batch_shape_tensor())
      flat_probs_shape = [self._num_steps, batch_size, self._num_states]
      flat_means_shape = [
          batch_size,
          1,
          self._num_states,
          tf.reduce_prod(observation_event_shape)]

      flat_probs = tf.reshape(probs, flat_probs_shape)
      # flat_probs :: num_steps batch_size num_states
      flat_means = tf.reshape(means, flat_means_shape)
      # flat_means :: batch_size 1 num_states observation_event_size
      flat_mean = tf.einsum("ijk,jmkl->jiml", flat_probs, flat_means)
      # flat_mean :: batch_size num_steps 1 observation_event_size

      variances = self._observation_distribution.variance()
      variances = tf.broadcast_to(variances, means_shape)
      # variances :: batch_shape num_states observation_event_shape
      flat_variances = tf.reshape(variances, flat_means_shape)
      # flat_variances :: batch_size 1 num_states observation_event_size

      # For a mixture of n distributions with mixture probabilities
      # p[i], and where the individual distributions have means and
      # variances given by mean[i] and var[i], the variance of
      # the mixture is given by:
      #
      # var = sum i=1..n p[i] * ((mean[i] - mean)**2 + var[i]**2)

      flat_variance = tf.einsum("ijk,jikl->jil",
                                flat_probs,
                                (flat_means - flat_mean)**2 + flat_variances)
      # flat_variance :: batch_size num_steps observation_event_size

      unflat_mean_shape = tf.concat(
          [self.batch_shape_tensor(),
           [self._num_steps],
           observation_event_shape],
          axis=0)

      # returns :: batch_shape num_steps observation_event_shape
      return tf.reshape(flat_variance, unflat_mean_shape)
开发者ID:asudomoeva,项目名称:probability,代码行数:58,代码来源:hidden_markov_model.py


示例12: reduce_logmeanexp

def reduce_logmeanexp(input_tensor, axis=None, keep_dims=False):
  logsumexp = tf.reduce_logsumexp(input_tensor, axis, keep_dims)
  input_tensor = tf.convert_to_tensor(input_tensor)
  n = input_tensor.shape.as_list()
  if axis is None:
    n = tf.cast(tf.reduce_prod(n), logsumexp.dtype)
  else:
    n = tf.cast(tf.reduce_prod(n[axis]), logsumexp.dtype)

  return -tf.log(n) + logsumexp
开发者ID:JoyceYa,项目名称:edward,代码行数:10,代码来源:iwvi.py


示例13: get_prediction

    def get_prediction(self,items,v_arr,M):
        """ items contains either a list of item or a single item
        for instance items = [0,1,2] or items = 0
        when predicting multiple value for the same user, it is more efficient 
        to use this function with a list of some values in items instead of 
        calling this function multiple times since it prevent from computing 
        the same V for each rating prediction        
        """
        V = createV(v_arr,M,self.m, self.K)
        V = tf.sparse_tensor_to_dense(V)
        
        den = 0
        # There are two terms in the exponential that are common to each k
        Exp_c = self.h_bias
        for l in range(self.K):
            Exp_c = tf.add(Exp_c,tf.matmul(tf.reshape(V[:,l],(1,self.m)),self.weights[l,:,:]))
        
        if (type(items)==list) | (type(items)==np.ndarray) | (type(items)==range):
            R = [] 
            if (type(items)!=range):
                items = list(map(int,items)) # it must be int and not np.int32
                
            for item in items:        
            
                Gamma = tf.exp(tf.mul(V[item,:],self.v_bias[item,:]))
                
                R_tmp = 0
                for k in range(self.K):
                    tmp = tf.add(tf.exp(-Exp_c),tf.exp(V[item,k]*self.weights[k,item,:]))
                    tmp = tf.reduce_prod(tmp)
                    tmp = tf.reduce_prod(tmp)
                    tmp = Gamma[k]*tmp
                    den = den + tmp
                    R_tmp = R_tmp + (k+1)*tmp
                R_tmp = R_tmp/den
                R.extend([R_tmp])
                
        elif type(items)==int:
            R = 0
            Gamma = tf.exp(tf.mul(V[items,:],self.v_bias[items,:]))

            for k in range(self.K):
                tmp = tf.add(tf.exp(-Exp_c),tf.exp(V[items,k]*self.weights[k,items,:]))
                tmp = tf.reduce_prod(tmp)
                tmp = tf.reduce_prod(tmp)
                tmp = Gamma[k]*tmp
                den = den + tmp
                R = R + (k+1)*tmp
            R = R/den
        else:
            print('type error')
            
        return R
开发者ID:RomainWarlop,项目名称:RecSys-Learn,代码行数:53,代码来源:rbmcf.py


示例14: embedding_lookup

def embedding_lookup(params, ids, name="embedding_lookup"):
    """Provides a N dimensional version of tf.embedding_lookup.

    Ids are flattened to a 1d tensor before being passed to embedding_lookup
    then, they are unflattend to match the original ids shape plus an extra
    leading dimension of the size of the embeddings.

    Args:
        params: List of tensors of size D0 x D1 x ... x Dn-2 x Dn-1.
        ids: N-dimensional tensor of B0 x B1 x .. x Bn-2 x Bn-1.
             Must contain indexes into params.
        name: Optional name for the op.

    Returns:
        A tensor of size B0 x B1 x .. x Bn-2 x Bn-1 x D1 x ... x Dn-2 x Dn-1 containing the values from
        the params tensor(s) for indecies in ids.

    Raises:
        ValueError: if some parameters are invalid.
    """
    with tf.op_scope([params, ids], name, "embedding_lookup"):
        params = tf.convert_to_tensor(params)
        ids = tf.convert_to_tensor(ids)
        shape = tf.shape(ids)
        ids_flat = tf.reshape(ids, tf.reduce_prod(shape, keep_dims=True))
        embeds_flat = tf.nn.embedding_lookup(params, ids_flat, name)
        embed_shape = tf.concat(0, [shape, [-1]])
        embeds = tf.reshape(embeds_flat, embed_shape)
        embeds.set_shape(ids.get_shape().concatenate(params.get_shape()[1:]))
        return embeds
开发者ID:R-CodeBoy,项目名称:skflow,代码行数:30,代码来源:ops.py


示例15: _expectation

def _expectation(p, kern, feat, none1, none2, nghp=None):
    """
    Compute the expectation:
    <K_{X, Z}>_p(X)
        - K_{.,.} :: RBF kernel

    :return: NxM
    """
    with params_as_tensors_for(kern), params_as_tensors_for(feat):
        # use only active dimensions
        Xcov = kern._slice_cov(p.cov)
        Z, Xmu = kern._slice(feat.Z, p.mu)
        D = tf.shape(Xmu)[1]
        if kern.ARD:
            lengthscales = kern.lengthscales
        else:
            lengthscales = tf.zeros((D,), dtype=settings.tf_float) + kern.lengthscales

        chol_L_plus_Xcov = tf.cholesky(tf.matrix_diag(lengthscales ** 2) + Xcov)  # NxDxD

        all_diffs = tf.transpose(Z) - tf.expand_dims(Xmu, 2)  # NxDxM
        exponent_mahalanobis = tf.matrix_triangular_solve(chol_L_plus_Xcov, all_diffs, lower=True)  # NxDxM
        exponent_mahalanobis = tf.reduce_sum(tf.square(exponent_mahalanobis), 1)  # NxM
        exponent_mahalanobis = tf.exp(-0.5 * exponent_mahalanobis)  # NxM

        sqrt_det_L = tf.reduce_prod(lengthscales)
        sqrt_det_L_plus_Xcov = tf.exp(tf.reduce_sum(tf.log(tf.matrix_diag_part(chol_L_plus_Xcov)), axis=1))
        determinants = sqrt_det_L / sqrt_det_L_plus_Xcov  # N

        return kern.variance * (determinants[:, None] * exponent_mahalanobis)
开发者ID:vincentadam87,项目名称:GPflow,代码行数:30,代码来源:expectations.py


示例16: ut

def ut(u, f, wr, ww):
    """
    returns the updated usage vector given the previous one along with
    free gates and previous read and write weightings
    """
    psi_t = tf.reduce_prod(1 - f * wr, 1)
    return (u + ww - u * ww) * psi_t
开发者ID:darksigma,项目名称:Fundamentals-of-Deep-Learning-Book,代码行数:7,代码来源:mem_ops.py


示例17: entropy

    def entropy(self, mean=None, cov=1):
        """
        Note entropy does not depend on its mean.

        Arguments
        ----------
        mean: tf.Tensor, optional
            vector. Defaults to zero mean.
        cov: tf.Tensor, optional
            vector or matrix. Defaults to identity.

        Returns
        -------
        tf.Tensor
            scalar
        """
        if cov == 1:
            d = 1
            det_cov = 1.0
        else:
            cov = tf.cast(cov, dtype=tf.float32)
            d = get_dims(cov)[0]
            if len(cov.get_shape()) == 1: 
                det_cov = tf.reduce_prod(cov)
            else:
                det_cov = tf.matrix_determinant(cov)

        return 0.5 * (d + d*tf.log(2*np.pi) + tf.log(det_cov))
开发者ID:bakersfieldag,项目名称:edward,代码行数:28,代码来源:distributions.py


示例18: prob_is_largest

    def prob_is_largest(self, Y, mu, var, gh_x, gh_w):
        # work out what the mean and variance is of the indicated latent function.
        oh_on = tf.cast(tf.one_hot(tf.reshape(Y, (-1,)), self.num_classes, 1.0, 0.0), float_type)
        mu_selected = tf.reduce_sum(oh_on * mu, 1)
        var_selected = tf.reduce_sum(oh_on * var, 1)

        # generate Gauss Hermite grid
        X = tf.reshape(mu_selected, (-1, 1)) + gh_x * tf.reshape(
            tf.sqrt(tf.clip_by_value(2.0 * var_selected, 1e-10, np.inf)), (-1, 1)
        )

        # compute the CDF of the Gaussian between the latent functions and the grid (including the selected function)
        dist = (tf.expand_dims(X, 1) - tf.expand_dims(mu, 2)) / tf.expand_dims(
            tf.sqrt(tf.clip_by_value(var, 1e-10, np.inf)), 2
        )
        cdfs = 0.5 * (1.0 + tf.erf(dist / np.sqrt(2.0)))

        cdfs = cdfs * (1 - 2e-4) + 1e-4

        # blank out all the distances on the selected latent function
        oh_off = tf.cast(tf.one_hot(tf.reshape(Y, (-1,)), self.num_classes, 0.0, 1.0), float_type)
        cdfs = cdfs * tf.expand_dims(oh_off, 2) + tf.expand_dims(oh_on, 2)

        # take the product over the latent functions, and the sum over the GH grid.
        return tf.matmul(tf.reduce_prod(cdfs, reduction_indices=[1]), tf.reshape(gh_w / np.sqrt(np.pi), (-1, 1)))
开发者ID:GPflow,项目名称:GPflow,代码行数:25,代码来源:likelihoods.py


示例19: _finish_prob_for_one_fiber

 def _finish_prob_for_one_fiber(self, y, x, ildj, distribution_kwargs):
   """Finish computation of prob on one element of the inverse image."""
   x = self._maybe_rotate_dims(x, rotate_right=True)
   prob = self.distribution.prob(x, **distribution_kwargs)
   if self._is_maybe_event_override:
     prob = tf.reduce_prod(prob, self._reduce_event_indices)
   return tf.exp(tf.cast(ildj, prob.dtype)) * prob
开发者ID:asudomoeva,项目名称:probability,代码行数:7,代码来源:conditional_transformed_distribution.py


示例20: __init__

    def __init__(self, n_inputs, n_rules, learning_rate=1e-2):
        self.n = n_inputs
        self.m = n_rules
        self.inputs = tf.placeholder(tf.float32, shape=(None, n_inputs))  # Input
        self.targets = tf.placeholder(tf.float32, shape=None)  # Desired output
        mu = tf.get_variable("mu", [n_rules * n_inputs],
                             initializer=tf.random_normal_initializer(0, 1))  # Means of Gaussian MFS
        sigma = tf.get_variable("sigma", [n_rules * n_inputs],
                                initializer=tf.random_normal_initializer(0, 1))  # Standard deviations of Gaussian MFS
        y = tf.get_variable("y", [1, n_rules], initializer=tf.random_normal_initializer(0, 1))  # Sequent centers

        self.params = tf.trainable_variables()

        self.rul = tf.reduce_prod(
            tf.reshape(tf.exp(-0.5 * tf.square(tf.subtract(tf.tile(self.inputs, (1, n_rules)), mu)) / tf.square(sigma)),
                       (-1, n_rules, n_inputs)), axis=2)  # Rule activations
        # Fuzzy base expansion function:
        num = tf.reduce_sum(tf.multiply(self.rul, y), axis=1)
        den = tf.clip_by_value(tf.reduce_sum(self.rul, axis=1), 1e-12, 1e12)
        self.out = tf.divide(num, den)

        self.loss = tf.losses.huber_loss(self.targets, self.out)  # Loss function computation
        # Other loss functions for regression, uncomment to try them:
        # loss = tf.sqrt(tf.losses.mean_squared_error(target, out))
        # loss = tf.losses.absolute_difference(target, out)
        self.optimize = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.loss)  # Optimization step
        # Other optimizers, uncomment to try them:
        # self.optimize = tf.train.RMSPropOptimizer(learning_rate=learning_rate).minimize(self.loss)
        # self.optimize = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(self.loss)
        self.init_variables = tf.global_variables_initializer()  # Variable initializer
开发者ID:tiagoCuervo,项目名称:TensorANFIS,代码行数:30,代码来源:anfis.py



注:本文中的tensorflow.reduce_prod函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.reduce_sum函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.reduce_min函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap