• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensorflow.sqrt函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.sqrt函数的典型用法代码示例。如果您正苦于以下问题:Python sqrt函数的具体用法?Python sqrt怎么用?Python sqrt使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了sqrt函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: mean

def mean(mean, variance, std=False):
    '''Output mean of ReLU for general Gaussian input.

    f(x) = max(x, 0).

    This function is broadcast-able, so you can provide multiple
    input means with a single variance or multiple input variances
    with a single input mean or multiple input means and variances.

    Args:
        mean: Input mean of size (Batch, Size).
        variance: Input variance vector (Batch, Size)
            or scalar v such that variance = v * ones(Size).
        std: Whether the provided `variance` is the standard deviation.

    Returns:
        Output mean of ReLU for general Gaussian input (Batch, Size).
    '''
    std = variance if std else tf.sqrt(variance)
    zero_mean = std / tf.sqrt(2.0 * math.pi)
    if mean is None:
        return zero_mean  # efficient computation when mean is zeros
    u = mean / (math.sqrt(2.0) * std)
    bias = 0.5 * mean * (1.0 + tf.erf(u))
    return zero_mean * tf.exp(-u ** 2.0) + bias
开发者ID:ModarTensai,项目名称:network_moments,代码行数:25,代码来源:relu.py


示例2: bhattacharyya

    def bhattacharyya(self):
        """Approximate bhattacharyya distance between cover and non-cover distances.
        
        Similar to Mahalanobis distance, but for distributions with different variances.
        Assumes normality, hence approximate.

        Returns:
            tf.Tensor: bhattacharyya distance between distributions of the cover
                and non-cover pairs' distances.
            tf.Tensor: mean cover pair distance
            tf.Tensor: mean non-cover pair distance
        """
        y_A, y_B = self.subnet_A[-1], self.subnet_B[-1]
        squared_dists = tf.reduce_sum(tf.square(y_A - y_B),
                                      reduction_indices=1, )
        
        cover_pairs = tf.where(tf.equal(self.is_cover, tf.ones_like(self.is_cover)))
        non_cover_pairs = tf.where(tf.equal(self.is_cover, tf.zeros_like(self.is_cover)))

        pair_dists = tf.sqrt(tf.gather(squared_dists, tf.reshape(cover_pairs, [-1])))
        non_pair_dists = tf.sqrt(tf.gather(squared_dists, tf.reshape(non_cover_pairs, [-1])))
        
        mu_pairs, sigma2_pairs = tf.nn.moments(pair_dists, axes=[0], name='d_pairs')
        mu_non_pairs, sigma2_non_pairs = tf.nn.moments(non_pair_dists, axes=[0], name='d_non_pairs')

        bhatt = tf.add( 0.25 * tf.log(0.25 * (sigma2_pairs/sigma2_non_pairs + sigma2_non_pairs/sigma2_pairs + 2)),
                  0.25 * (mu_pairs - mu_non_pairs)**2 / (sigma2_pairs + sigma2_non_pairs), name='bhatt')
        return bhatt, mu_pairs, mu_non_pairs
开发者ID:jvbalen,项目名称:cover_id,代码行数:28,代码来源:learn.py


示例3: xavier_init

def xavier_init( n_inputs, n_outputs, uniform=True ):
    if uniform:
        init_range = tf.sqrt( 6.0 / (n_inputs + n_outputs) )
        return tf.random_uniform_initializer( -init_range, init_range )
    else:
        stddev = tf.sqrt( 3.0 / (n_inputs + n_outputs) )
        return tf.truncated_normal_initializer( stddev=stddev )
开发者ID:yeoshim,项目名称:MLWTF,代码行数:7,代码来源:DNNwXavier4MNIST.py


示例4: _build_iid_normal_model

  def _build_iid_normal_model(self, num_timesteps, latent_size,
                              observation_size, transition_variance,
                              observation_variance):
    """Build a model whose outputs are IID normal by construction."""

    transition_variance = self._build_placeholder(transition_variance)
    observation_variance = self._build_placeholder(observation_variance)

    # Use orthogonal matrices to project a (potentially
    # high-dimensional) latent space of IID normal variables into a
    # low-dimensional observation that is still IID normal.
    random_orthogonal_matrix = lambda: np.linalg.qr(
        np.random.randn(latent_size, latent_size))[0][:observation_size, :]
    observation_matrix = self._build_placeholder(random_orthogonal_matrix())

    model = tfd.LinearGaussianStateSpaceModel(
        num_timesteps=num_timesteps,
        transition_matrix=self._build_placeholder(
            np.zeros([latent_size, latent_size])),
        transition_noise=tfd.MultivariateNormalDiag(
            scale_diag=tf.sqrt(transition_variance) *
            tf.ones([latent_size], dtype=self.dtype)),
        observation_matrix=observation_matrix,
        observation_noise=tfd.MultivariateNormalDiag(
            scale_diag=tf.sqrt(observation_variance) *
            tf.ones([observation_size], dtype=self.dtype)),
        initial_state_prior=tfd.MultivariateNormalDiag(
            scale_diag=tf.sqrt(transition_variance) *
            tf.ones([latent_size], dtype=self.dtype)),
        validate_args=True)

    return model
开发者ID:asudomoeva,项目名称:probability,代码行数:32,代码来源:linear_gaussian_ssm_test.py


示例5: build_predict

 def build_predict(self, Xnew, full_cov=False):
     """
     Compute the mean and variance of the latent function at some new points
     Xnew. For a derivation of the terms in here, see the associated SGPR
     notebook. 
     """
     num_inducing = tf.shape(self.Z)[0]
     err =  self.Y - self.mean_function(self.X)
     Kuf = self.kern.K(self.Z, self.X)
     Kuu = self.kern.K(self.Z) + eye(num_inducing) * 1e-6
     Kus = self.kern.K(self.Z, Xnew)
     L = tf.cholesky(Kuu)
     A = tf.matrix_triangular_solve(L, Kuf, lower=True)*tf.sqrt(1./self.likelihood.variance)
     B = tf.matmul(A, tf.transpose(A)) + eye(num_inducing)
     LB = tf.cholesky(B)
     c = tf.matrix_triangular_solve(LB, tf.matmul(A, err), lower=True) * tf.sqrt(1./self.likelihood.variance)
     tmp1 = tf.matrix_triangular_solve(L, Kus, lower=True)
     tmp2 = tf.matrix_triangular_solve(LB, tmp1, lower=True)
     mean = tf.matmul(tf.transpose(tmp2), c)
     if full_cov:
         var = self.kern.K(Xnew) + tf.matmul(tf.transpose(tmp2), tmp2) - tf.matmul(tf.transpose(tmp1), tmp1)
         var = tf.tile(tf.expand_dims(var, 2), tf.pack([1,1, tf.shape(self.Y)[1]]))
     else:
         var = self.kern.Kdiag(Xnew) + tf.reduce_sum(tf.square(tmp2), 0) - tf.reduce_sum(tf.square(tmp1), 0)
         var = tf.tile(tf.expand_dims(var, 1), tf.pack([1, tf.shape(self.Y)[1]]))
     return mean + self.mean_function(Xnew), var
开发者ID:agarbuno,项目名称:GPflow,代码行数:26,代码来源:sgpr.py


示例6: pearsoncorrelation

def pearsoncorrelation(ypred, y):
    muy_ypred = tf.reduce_mean(ypred)
    muy_y = tf.reduce_mean(y)
    numerator = tf.reduce_sum(tf.multiply(ypred - muy_ypred, y - muy_y))
    denominator = tf.multiply(tf.sqrt(tf.reduce_sum(tf.square(ypred - muy_ypred))),
                              tf.sqrt(tf.reduce_sum(tf.square(y - muy_y)))) + 1e-10
    return numerator / denominator
开发者ID:savourylie,项目名称:fucos-tensorflow,代码行数:7,代码来源:metrics.py


示例7: prob_is_largest

    def prob_is_largest(self, Y, mu, var, gh_x, gh_w):
        # work out what the mean and variance is of the indicated latent function.
        oh_on = tf.cast(tf.one_hot(tf.reshape(Y, (-1,)), self.num_classes, 1.0, 0.0), float_type)
        mu_selected = tf.reduce_sum(oh_on * mu, 1)
        var_selected = tf.reduce_sum(oh_on * var, 1)

        # generate Gauss Hermite grid
        X = tf.reshape(mu_selected, (-1, 1)) + gh_x * tf.reshape(
            tf.sqrt(tf.clip_by_value(2.0 * var_selected, 1e-10, np.inf)), (-1, 1)
        )

        # compute the CDF of the Gaussian between the latent functions and the grid (including the selected function)
        dist = (tf.expand_dims(X, 1) - tf.expand_dims(mu, 2)) / tf.expand_dims(
            tf.sqrt(tf.clip_by_value(var, 1e-10, np.inf)), 2
        )
        cdfs = 0.5 * (1.0 + tf.erf(dist / np.sqrt(2.0)))

        cdfs = cdfs * (1 - 2e-4) + 1e-4

        # blank out all the distances on the selected latent function
        oh_off = tf.cast(tf.one_hot(tf.reshape(Y, (-1,)), self.num_classes, 0.0, 1.0), float_type)
        cdfs = cdfs * tf.expand_dims(oh_off, 2) + tf.expand_dims(oh_on, 2)

        # take the product over the latent functions, and the sum over the GH grid.
        return tf.matmul(tf.reduce_prod(cdfs, reduction_indices=[1]), tf.reshape(gh_w / np.sqrt(np.pi), (-1, 1)))
开发者ID:GPflow,项目名称:GPflow,代码行数:25,代码来源:likelihoods.py


示例8: evalFunction

def evalFunction( classVec, attributeVec, groundTruthLabels ):

    classVec = classVec/tf.sqrt(tf.reduce_sum(tf.square(classVec), 1, keep_dims=True))
    attributeVec = attributeVec / tf.sqrt(tf.reduce_sum(tf.square(attributeVec), 1, keep_dims=True))
    similarity = tf.matmul(classVec, attributeVec, transpose_b=True)

    return similarity
开发者ID:dragon9001,项目名称:attributes2classname,代码行数:7,代码来源:zsl.py


示例9: get_weight_stats

def get_weight_stats(x, axis):
  """ Compute weight statistics over the given axis.

  Args:
    x: tf.Tensor
      a batch of activations.
    axis: int
      axis to perform statistics over.
  Returns:
    tf.Tensor
      a 3-D tensor with statistics.
  """
  if x is None:
    return []

  stats = []
  l1 = tf.reduce_mean(tf.abs(x), axis=axis)
  l2 = tf.sqrt(tf.reduce_mean(x**2, axis=axis) + 1e-6)

  mean, var = tf.nn.moments(x, [axis])
  stats.extend([l1, l2, mean, tf.sqrt(var + 1e-8)])

  stats = [tf.reshape(s, [-1, 1, 1]) for s in stats]

  return stats
开发者ID:ALISCIFP,项目名称:models,代码行数:25,代码来源:more_local_weight_update.py


示例10: disjunction_of_literals

def disjunction_of_literals(literals, label="no_label"):
    list_of_literal_tensors = [lit.tensor for lit in literals]
    literals_tensor = tf.concat(1,list_of_literal_tensors)
    if default_tnorm == "product":
        result = 1.0-tf.reduce_prod(1.0-literals_tensor, 1, keep_dims=True)
    if default_tnorm == "yager2":
        result = tf.minimum(1.0, tf.sqrt(tf.reduce_sum(tf.square(literals_tensor), 1, keep_dims=True)))
    if default_tnorm == "luk":
        print "data aggregator is lukas"
        result = tf.minimum(1.0, tf.reduce_sum(literals_tensor, 1, keep_dims=True))
        PR(result)
    if default_tnorm == "goedel":
        result = tf.reduce_max(literals_tensor, 1, keep_dims=True, name=label)
    if default_aggregator == "product":
        return tf.reduce_prod(result, keep_dims=True)
    if default_aggregator == "mean":
        print "data aggregator is mean"
        return tf.reduce_mean(result, keep_dims=True, name=label)
    if default_aggregator == "gmean":
        return tf.exp(tf.mul(tf.reduce_sum(tf.log(result), keep_dims=True),
                             tf.inv(tf.to_float(tf.size(result)))), name=label)
    if default_aggregator == "hmean":
        print "data aggregator is hmean"
        return tf.div(tf.to_float(tf.size(result)), tf.reduce_sum(tf.inv(result), keep_dims=True))
    if default_aggregator == "min":
        print "data aggregator is min"
        return tf.reduce_min(result, keep_dims=True, name=label)
    if default_aggregator == "qmean":
        print "data aggregator is qmean"
        return tf.sqrt(tf.reduce_mean(tf.square(result), keep_dims=True), name=label)
    if default_aggregator == "cmean":
        print "data aggregator is cmean"
        return tf.pow(tf.reduce_mean(tf.pow(result, 3), keep_dims=True), tf.inv(tf.to_float(3)), name=label)
开发者ID:ivanDonadello,项目名称:knowPic,代码行数:33,代码来源:logictensornetworks.py


示例11: f1

 def f1(): #The tensorflow path if no jump occurs
     vector= inter_vec_temp/tf.sqrt(new_norm)
     propa = prob / tf.sqrt(new_norm)
     #we already evolved by Heff so just normalize the state and move on with the same random number
     counter=tf.constant(0)
     t=self.r  
     return t,counter,norm,propa,vector
开发者ID:MohamedAbdelhafez,项目名称:Grape-Packaged,代码行数:7,代码来源:TensorflowState.py


示例12: encoder

def encoder(inputs, training=True, scope="encoder", reuse=None):
    '''
    Args:
      inputs: A 2d tensor with shape of [N, Tx], with dtype of int32. Encoder inputs.
      training: Whether or not the layer is in training mode.
      scope: Optional scope for `variable_scope`
      reuse: Boolean, whether to reuse the weights of a previous layer
        by the same name.
    
    Returns:
      A collection of Hidden vectors. So-called memory. Has the shape of (N, Tx, e).
    '''
    with tf.variable_scope(scope, reuse=reuse):
        with tf.variable_scope("text_embedding"):
            embedding = embed(inputs, hp.vocab_size, hp.embed_size)  # (N, Tx, e)

        with tf.variable_scope("encoder_prenet"):
            tensor = fc_block(embedding, hp.enc_channels, training=training) # (N, Tx, c)

        with tf.variable_scope("encoder_conv"):
            for i in range(hp.enc_layers):
                outputs = conv_block(tensor,
                                    size=hp.enc_filter_size,
                                    rate=2**i,
                                    training=training,
                                    scope="encoder_conv_{}".format(i)) # (N, Tx, c)
                tensor = (outputs + tensor) * tf.sqrt(0.5)

        with tf.variable_scope("encoder_postnet"):
            keys = fc_block(tensor, hp.embed_size, training=training) # (N, Tx, e)
            vals = tf.sqrt(0.5) * (keys + embedding) # (N, Tx, e)

    return keys, vals
开发者ID:WeCognize,项目名称:deepvoice3,代码行数:33,代码来源:networks.py


示例13: _encode

  def _encode(self, boxes, anchors):
    """Encodes a box collection with respect to an anchor collection.

    Args:
      boxes: BoxList holding N boxes to be encoded.
      anchors: BoxList of anchors.

    Returns:
      a tensor representing N anchor-encoded boxes of the format
      [ty, tx, tl].
    """
    # Convert anchors to the center coordinate representation.
    ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes()
    la = tf.sqrt(ha * wa)
    ycenter, xcenter, h, w = boxes.get_center_coordinates_and_sizes()
    l = tf.sqrt(h * w)
    # Avoid NaN in division and log below.
    la += EPSILON
    l += EPSILON

    top = tf.abs(ycenter_a - ycenter + 0.5*h)
    bown = tf.abs(ycenter_a - ycenter - 0.5*h)
    left = tf.abs(xcenter_a - xcenter + 0.5*w)
    right = tf.abs(xcenter_a - xcenter - 0.5*w)
    # Scales location targets for joint training.
    if self._scale_factors:
      top *= self._scale_factors[0]
      bown *= self._scale_factors[0]
      left *= self._scale_factors[1]
      right *= self._scale_factors[1]
    return tf.transpose(tf.stack([top, bown, left, right]))
开发者ID:chenxiang204,项目名称:code,代码行数:31,代码来源:east_square_box_coder.py


示例14: apply_gradients

    def apply_gradients(self, grads_and_vars, global_step=None, name=None):
        ts = super().apply_gradients(grads_and_vars, global_step, name)

        mn, vn = self.get_slot_names()
        dynamics = []

        with tf.name_scope(name, 'Adam_Dynamics'):
            b1_pow, b2_pow = self._beta1_power, self._beta2_power
            lr_k = self._lr_t * tf.sqrt(1. - b2_pow) / (1. - b1_pow)

            for g, w in grads_and_vars:
                m = self.get_slot(w, mn)
                v = self.get_slot(w, vn)
                mk = tf.add(self._beta1_t * m, (1. - self._beta1_t) * g, name=m.op.name)
                vk = tf.add(self._beta2_t * v,  (1. - self._beta2_t) * g * g, name=v.op.name)

                wk = tf.subtract(w, lr_k * mk / (tf.sqrt(vk + self._epsilon_t**2)), name=w.op.name)
                # IMPORTANT NOTE: epsilon should be outside sqrt as from the original implementation,
                # but this brings to computational instability of the hypergradient.

                dynamics.extend([(w, wk), (m, mk), (v, vk)])

            b1_powk = b1_pow * self._beta1_t
            b2_powk = b2_pow * self._beta2_t
            dynamics.extend([(b1_pow, b1_powk), (b2_pow, b2_powk)])

        return ts, dynamics
开发者ID:codealphago,项目名称:FAR-HO,代码行数:27,代码来源:optimizer.py


示例15: summary_gradient_updates

def summary_gradient_updates(grads, opt, lr):
    """get summary ops for the magnitude of gradient updates"""

    # strategy:
    # make a dict of variable name -> [variable, grad, adagrad slot]
    vars_grads = {}
    for v in tf.trainable_variables():
        vars_grads[v.name] = [v, None, None]
    for g, v in grads:
        vars_grads[v.name][1] = g
        vars_grads[v.name][2] = opt.get_slot(v, 'accumulator')

    # now make summaries
    ret = []
    for vname, (v, g, a) in vars_grads.items():

        if g is None:
            continue

        if isinstance(g, tf.IndexedSlices):
            # a sparse gradient - only take norm of params that are updated
            updates = lr * g.values
            if a is not None:
                updates /= tf.sqrt(tf.gather(a, g.indices))
        else:
            updates = lr * g
            if a is not None:
                updates /= tf.sqrt(a)

        values_norm = tf.sqrt(tf.reduce_sum(v * v)) + 1.0e-7
        updates_norm = tf.sqrt(tf.reduce_sum(updates * updates))
        ret.append(tf.summary.scalar('UPDATE/' + vname.replace(":", "_"), updates_norm / values_norm))

    return ret
开发者ID:RileyShe,项目名称:DeepPavlov,代码行数:34,代码来源:train_utils.py


示例16: dense

def dense(x, num_units, nonlinearity=None, init_scale=1., counters={}, init=False, ema=None, **kwargs):
    ''' fully connected layer '''
    name = get_name('dense', counters)
    with tf.variable_scope(name):
        V = get_var_maybe_avg('V', ema, shape=[int(x.get_shape()[1]),num_units], dtype=tf.float32,
                              initializer=tf.random_normal_initializer(0, 0.05), trainable=True)
        g = get_var_maybe_avg('g', ema, shape=[num_units], dtype=tf.float32,
                              initializer=tf.constant_initializer(1.), trainable=True)
        b = get_var_maybe_avg('b', ema, shape=[num_units], dtype=tf.float32,
                              initializer=tf.constant_initializer(0.), trainable=True)

        # use weight normalization (Salimans & Kingma, 2016)
        x = tf.matmul(x, V)
        scaler = g / tf.sqrt(tf.reduce_sum(tf.square(V), [0]))
        x = tf.reshape(scaler, [1, num_units]) * x + tf.reshape(b, [1, num_units])

        if init: # normalize x
            m_init, v_init = tf.nn.moments(x, [0])
            scale_init = init_scale/tf.sqrt(v_init + 1e-10)
            with tf.control_dependencies([g.assign(g*scale_init), b.assign_add(-m_init*scale_init)]):
                x = tf.nn.l2_normalize(x, axis=0)

        # apply nonlinearity
        if nonlinearity is not None:
            x = nonlinearity(x)

        return x
开发者ID:BhaskarNallani,项目名称:gradient-checkpointing,代码行数:27,代码来源:nn.py


示例17: cosine_distance

def cosine_distance(v1, v2):
    """
    Calculate the cosine distance between the representations of the
    words of the two sentences.

    Parameters
    ----------
    v1: Tensor
        Tensor of shape (batch_size, 1, num_sentence_words, context_rnn_hidden_size)
        representing the first sentence to take the cosine similarity with.

    v2: Tensor
        Tensor of shape (batch_size, num_sentence_words, 1, context_rnn_hidden_size)
        representing the second sentence to take the cosine similarity with.
    """
    # The product of the two vectors is shape
    # (batch_size, num_sentence_words, num_sentence_words, rnn_hidden_size)
    # Taking the sum over the last axis reesults in shape:
    # (batch_size, num_sentence_words, num_sentence_words)
    cosine_numerator = tf.reduce_sum(tf.multiply(v1, v2), axis=-1)
    # Shape: (batch_size, 1, num_sentence_words)
    v1_norm = tf.sqrt(tf.maximum(tf.reduce_sum(tf.square(v1), axis=-1),
                                 EPSILON))
    # Shape: (batch_size, num_sentence_words, 1)
    v2_norm = tf.sqrt(tf.maximum(tf.reduce_sum(tf.square(v2), axis=-1),
                                 EPSILON))
    # Shape: (batch_size, num_sentence_words, num_sentence_words)
    return cosine_numerator / v1_norm / v2_norm
开发者ID:DanielSnipes,项目名称:paraphrase-id-tensorflow,代码行数:28,代码来源:matching.py


示例18: _encode

    def _encode(self, boxes, anchors):
        """Encodes a box collection with respect to an anchor collection.

        Args:
          boxes: BoxList holding N boxes to be encoded.
          anchors: BoxList of anchors.

        Returns:
          a tensor representing N anchor-encoded boxes of the format
          [ty, tx, tl].
        """
        # Convert anchors to the center coordinate representation.
        ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes()
        la = tf.sqrt(ha * wa)
        ycenter, xcenter, h, w = boxes.get_center_coordinates_and_sizes()
        l = tf.sqrt(h * w)
        # Avoid NaN in division and log below.
        la += EPSILON
        l += EPSILON

        tx = (xcenter - xcenter_a) / la
        ty = (ycenter - ycenter_a) / la
        tl = tf.log(l / la)
        # Scales location targets for joint training.
        if self._scale_factors:
            ty *= self._scale_factors[0]
            tx *= self._scale_factors[1]
            tl *= self._scale_factors[2]
        return tf.transpose(tf.stack([ty, tx, tl]))
开发者ID:Zumbalamambo,项目名称:deepcv,代码行数:29,代码来源:square_box_coder.py


示例19: p_zt

 def p_zt(self, prev_state, t):
   """Computes the model p(z_t| z_{t-1})."""
   batch_size = tf.shape(prev_state)[0]
   if t > 0:
     z_mu_p = prev_state + self.bs[t - 1]
     p_zt = tf.contrib.distributions.Normal(
         loc=z_mu_p, scale=tf.sqrt(tf.ones_like(z_mu_p) * self.variance))
     return p_zt
   else:  # p(z_0) is mixture of two Normals
     mu_pos = tf.ones([batch_size, self.state_size], dtype=self.dtype) * self.prior_mode_mean
     mu_neg = tf.ones([batch_size, self.state_size], dtype=self.dtype) * -self.prior_mode_mean
     z0_pos = tf.contrib.distributions.Normal(
         loc=mu_pos,
         scale=tf.sqrt(tf.ones_like(mu_pos) * self.variance))
     z0_neg = tf.contrib.distributions.Normal(
         loc=mu_neg,
         scale=tf.sqrt(tf.ones_like(mu_neg) * self.variance))
     mode_probs = tf.convert_to_tensor([self.mixing_coeff, 1-self.mixing_coeff], dtype=tf.float64)
     mode_probs = tf.tile(mode_probs[tf.newaxis, tf.newaxis, :], [batch_size, 1, 1])
     mode_selection_dist = tf.contrib.distributions.Categorical(probs=mode_probs)
     z0_dist = tf.contrib.distributions.Mixture(
         cat=mode_selection_dist,
         components=[z0_pos, z0_neg],
         validate_args=False)
     return z0_dist
开发者ID:812864539,项目名称:models,代码行数:25,代码来源:models.py


示例20: sample_weights

    def sample_weights(self, weights):

        log_p = 0
        log_q = 0

        sampled_weights = []
        for layer_i in range(len(self.network_architecture['decoder_net'])):

            if layer_i == 0:
                eps = tf.random_normal((self.n_z+1, self.network_architecture['decoder_net'][layer_i]), 0, 1, dtype=tf.float32)
                weights_ = tf.add(weights['l'+str(layer_i)+'mean'], tf.multiply(tf.sqrt(tf.exp(weights['l'+str(layer_i)+'logvar'])), eps))
                n_decoder_weights = (self.n_z+1) * self.network_architecture['decoder_net'][layer_i]
                log_p += self.log_p_theta(weights_, n_decoder_weights)
                log_q += self.log_q_theta(weights_, weights['l'+str(layer_i)+'mean'], weights['l'+str(layer_i)+'logvar'], n_decoder_weights)
            else:
                eps = tf.random_normal((self.network_architecture['decoder_net'][layer_i-1]+1, self.network_architecture['decoder_net'][layer_i]), 0, 1, dtype=tf.float32)
                weights_ = tf.add(weights['l'+str(layer_i)+'mean'], tf.multiply(tf.sqrt(tf.exp(weights['l'+str(layer_i)+'logvar'])), eps))
                n_decoder_weights = self.network_architecture['decoder_net'][layer_i-1]+1 * self.network_architecture['decoder_net'][layer_i]
                log_p += self.log_p_theta(weights_, n_decoder_weights)
                log_q += self.log_q_theta(weights_, weights['l'+str(layer_i)+'mean'], weights['l'+str(layer_i)+'logvar'], n_decoder_weights)

            sampled_weights.append(weights_)

        eps = tf.random_normal((self.network_architecture['decoder_net'][-1]+1, self.n_input), 0, 1, dtype=tf.float32)
        weights_ = tf.add(weights['out_mean_mean'], tf.multiply(tf.sqrt(tf.exp(weights['out_mean_logvar'])), eps))
        sampled_weights.append(weights_)
        n_decoder_weights = self.network_architecture['decoder_net'][-1]+1 * self.n_input
        log_p += self.log_p_theta(weights_, n_decoder_weights)
        log_q += self.log_q_theta(weights_, weights['out_mean_mean'], weights['out_mean_logvar'], n_decoder_weights)

        # print log_p
        # print log_q
        # fasdf

        return sampled_weights, log_p, log_q
开发者ID:chriscremer,项目名称:Other_Code,代码行数:35,代码来源:BVAE.py



注:本文中的tensorflow.sqrt函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.square函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.split函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap