• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensorflow.exp函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.exp函数的典型用法代码示例。如果您正苦于以下问题:Python exp函数的具体用法?Python exp怎么用?Python exp使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了exp函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _testSampleLogProbExact

  def _testSampleLogProbExact(
      self, concentrations, det_bounds, dim, means,
      num_samples=int(1e5), dtype=np.float32, target_discrepancy=0.1, seed=42):
    # For test methodology see the comment in
    # _testSampleConsistentLogProbInterval, except that this test
    # checks those parameter settings where the true volume is known
    # analytically.
    concentration = np.array(concentrations, dtype=dtype)
    det_bounds = np.array(det_bounds, dtype=dtype)
    means = np.array(means, dtype=dtype)
    # Add a tolerance to guard against some of the importance_weights exceeding
    # the theoretical maximum (importance_maxima) due to numerical inaccuracies
    # while lower bounding the determinant. See corresponding comment in
    # _testSampleConsistentLogProbInterval.
    high_tolerance = 1e-6

    testee_lkj = tfd.LKJ(
        dimension=dim, concentration=concentration, validate_args=True)
    x = testee_lkj.sample(num_samples, seed=seed)
    importance_weights = (
        tf.exp(-testee_lkj.log_prob(x)) * _det_ok_mask(x, det_bounds))
    importance_maxima = (1. / det_bounds) ** (concentration - 1) * tf.exp(
        testee_lkj._log_normalization())

    chk1 = st.assert_true_mean_equal_by_dkwm(
        importance_weights, low=0., high=importance_maxima + high_tolerance,
        expected=means, false_fail_rate=1e-6)
    chk2 = tf.assert_less(
        st.min_discrepancy_of_true_means_detectable_by_dkwm(
            num_samples, low=0., high=importance_maxima + high_tolerance,
            false_fail_rate=1e-6, false_pass_rate=1e-6),
        dtype(target_discrepancy))
    self.evaluate([chk1, chk2])
开发者ID:asudomoeva,项目名称:probability,代码行数:33,代码来源:lkj_test.py


示例2: build_psi_stats_rbf

def build_psi_stats_rbf(Z, kern, mu, S):

    # use only active dimensions
    mu, S = kern._slice(mu, S)  # only use the active dimensions.
    Z, _ = kern._slice(Z, None)

    # psi0
    N = tf.shape(mu)[0]
    psi0 = tf.cast(N, tf.float64) * kern.variance

    # psi1
    lengthscale2 = tf.square(kern.lengthscales)
    psi1_logdenom = tf.expand_dims(tf.reduce_sum(tf.log(S / lengthscale2 + 1.), 1), 1)  # N x 1
    d = tf.square(tf.expand_dims(mu, 1)-tf.expand_dims(Z, 0))  # N x M x Q
    psi1_log = - 0.5 * (psi1_logdenom + tf.reduce_sum(d/tf.expand_dims(S+lengthscale2, 1), 2))
    psi1 = kern.variance * tf.exp(psi1_log)

    # psi2
    psi2_logdenom = -0.5 * tf.expand_dims(tf.reduce_sum(tf.log(2.*S/lengthscale2 + 1.), 1), 1)  # N # 1
    psi2_logdenom = tf.expand_dims(psi2_logdenom, 1)
    psi2_exp1 = 0.25 * tf.reduce_sum(tf.square(tf.expand_dims(Z, 1)-tf.expand_dims(Z, 0))/lengthscale2, 2)  # M x M
    psi2_exp1 = tf.expand_dims(psi2_exp1, 0)

    Z_hat = 0.5 * (tf.expand_dims(Z, 1) + tf.expand_dims(Z, 0))  # MxMxQ
    denom = 1./(2.*S+lengthscale2)
    a = tf.expand_dims(tf.expand_dims(tf.reduce_sum(tf.square(mu)*denom, 1), 1), 1)  # N x 1 x 1
    b = tf.reduce_sum(tf.expand_dims(tf.expand_dims(denom, 1), 1) * tf.square(Z_hat), 3)  # N M M
    c = -2*tf.reduce_sum(tf.expand_dims(tf.expand_dims(mu*denom, 1), 1) * Z_hat, 3)  # N M M
    psi2_exp2 = a + b + c

    psi2 = tf.square(kern.variance) * tf.reduce_sum(tf.exp(psi2_logdenom - psi2_exp1 - psi2_exp2), 0)
    return psi0, psi1, psi2
开发者ID:blutooth,项目名称:dgp,代码行数:32,代码来源:kernel_expectations.py


示例3: transform_box

def transform_box(bbox, height, width):
    """ Transform the bounding box format 
        Args:
            bbox: [N X 4] input N bbox
                  fromat = [cx, cy, log(w/W), log(h/H)]
            height: height of original image
            width: width of original image

        Return:
            bbox: [N X 4] output rounded N bbox
                  format = [left top right bottom]
    """
    x, y, w, h = tf.split(1, 4, bbox)

    h = tf.exp(h) * height
    w = tf.exp(w) * width
    x = (x + 1) * width / 2
    y = (y + 1) * height / 2

    x1 = x - w / 2
    y1 = y - h / 2
    x2 = x + w / 2
    y2 = y + h / 2

    bbox_out = tf.concat(1, [x1, y1, x2, y2])

    return bbox_out
开发者ID:renmengye,项目名称:deep-tracker,代码行数:27,代码来源:build_deep_tracker.py


示例4: copy_net_logit_function

                def copy_net_logit_function(state):
                    state = tf.nn.dropout(state, self.dropout_placeholder)

                    # the logits for generating the next word are computed in
                    # the standard way
                    generate_logits = tf.matmul(state, decoding_w) + decoding_b

                    # Equation 8 in the paper ... in shape of source sentence
                    # (batch x time)
                    copy_logits_in_time = tf.reduce_sum(
                        projected_inputs * tf.expand_dims(state, 1), [2])

                    # mask out the padding in exponential domain
                    copy_logits_in_time_exp_masked = tf.exp(
                        tf.minimum([[80.0]], copy_logits_in_time)) * copy_mask

                    #  ... in shape of vocabulary (batch x time x vocabulary)
                    copy_logits_in_vocabulary = tf.expand_dims(
                        copy_logits_in_time_exp_masked,
                        2) * vocabulary_shaped_indices

                    # Equation 6 without normalization
                    copy_logits_exp = tf.reduce_sum(copy_logits_in_vocabulary,
                                                    [1])

                    logits_exp = copy_logits_exp \
                                 + tf.exp(tf.minimum([[80.0]], generate_logits))

                    return (tf.log(tf.maximum([[1e-40]], logits_exp)),
                            copy_logits_in_time)
开发者ID:alvaz16,项目名称:neuralmonkey,代码行数:30,代码来源:decoder.py


示例5: get_mixture_coef

    def get_mixture_coef(output):
      # returns the tf slices containing mdn dist params
      # ie, eq 18 -> 23 of http://arxiv.org/abs/1308.0850
      z = output
      z_eos = z[:, 0:1]
      z_pi, z_mu1, z_mu2, z_sigma1, z_sigma2, z_corr = tf.split(1, 6, z[:, 1:])

      # process output z's into MDN paramters

      # end of stroke signal
      z_eos = tf.sigmoid(z_eos) # should be negated, but doesn't matter.

      # softmax all the pi's:
      max_pi = tf.reduce_max(z_pi, 1, keep_dims=True)
      z_pi = tf.sub(z_pi, max_pi)
      z_pi = tf.exp(z_pi)
      normalize_pi = tf.inv(tf.reduce_sum(z_pi, 1, keep_dims=True))
      z_pi = tf.mul(normalize_pi, z_pi)

      # exponentiate the sigmas and also make corr between -1 and 1.
      z_sigma1 = tf.exp(z_sigma1)
      z_sigma2 = tf.exp(z_sigma2)
      z_corr = tf.tanh(z_corr)

      return [z_pi, z_mu1, z_mu2, z_sigma1, z_sigma2, z_corr, z_eos]
开发者ID:DanialBahrami,项目名称:write-rnn-tensorflow,代码行数:25,代码来源:model.py


示例6: softmax

def softmax(x):
  """
  Compute the softmax function in tensorflow.

  You might find the tensorflow functions tf.exp, tf.reduce_max,
  tf.reduce_sum, tf.expand_dims useful. (Many solutions are possible, so you may
  not need to use all of these functions). Recall also that many common
  tensorflow operations are sugared (e.g. x * y does a tensor multiplication
  if x and y are both tensors). Make sure to implement the numerical stability
  fixes as in the previous homework!

  Args:
    x:   tf.Tensor with shape (n_samples, n_features). Note feature vectors are
         represented by row-vectors. (For simplicity, no need to handle 1-d
         input as in the previous homework)
  Returns:
    out: tf.Tensor with shape (n_sample, n_features). You need to construct this
         tensor in this problem.
  """

  ### YOUR CODE HERE
  x -= tf.reduce_max(x, reduction_indices=1, keep_dims=True)
  out = tf.exp(x) / tf.reduce_sum(tf.exp(x), reduction_indices=1, keep_dims=True)
  ### END YOUR CODE
  
  return out 
开发者ID:lbbc1117,项目名称:CS224d-2016,代码行数:26,代码来源:q1_softmax.py


示例7: filterbank_matrices

def filterbank_matrices(g_x, g_y, delta, sigma, N, A, B):
    ''' Computer filter bank matrices. All inputs are in batches.

    Args:
        g_x, g_y: grid centers, relative to the center of the image
        delta: strides
        sigma: isotropic variance
        N: grid dimension
        A, B: input image dimensions, width and height
    Returns:
        F_x, F_y: filter banks matrices [batch, N, A] and [batch, N, B]
    '''

    rng = tf.reshape(tf.cast(tf.range(N), tf.float32), [1, -1])

    # eq 19
    mu_x = g_x + (rng - N / 2 - 0.5) * delta

    # eq 20
    mu_y = g_y + (rng - N / 2 - 0.5) * delta

    a = tf.reshape(tf.cast(tf.range(A), tf.float32), [1, 1, -1])
    b = tf.reshape(tf.cast(tf.range(B), tf.float32), [1, 1, -1])

    # reshape for broadcasting
    mu_x = tf.reshape(mu_x, [-1, N, 1])
    mu_y = tf.reshape(mu_y, [-1, N, 1])
    sigma = tf.reshape(sigma, [-1, 1, 1])

    F_x = tf.exp(-tf.square((a - mu_x) / sigma))
    F_y = tf.exp(-tf.square((b - mu_y) / sigma))

    # transform in a convenient form for further use
    return F_x, F_y
开发者ID:255BITS,项目名称:TensorFlow-VAE-GAN-DRAW,代码行数:34,代码来源:main-draw.py


示例8: _expectation

def _expectation(p, kern, feat, none1, none2, nghp=None):
    """
    Compute the expectation:
    <K_{X, Z}>_p(X)
        - K_{.,.} :: RBF kernel

    :return: NxM
    """
    with params_as_tensors_for(kern), params_as_tensors_for(feat):
        # use only active dimensions
        Xcov = kern._slice_cov(p.cov)
        Z, Xmu = kern._slice(feat.Z, p.mu)
        D = tf.shape(Xmu)[1]
        if kern.ARD:
            lengthscales = kern.lengthscales
        else:
            lengthscales = tf.zeros((D,), dtype=settings.tf_float) + kern.lengthscales

        chol_L_plus_Xcov = tf.cholesky(tf.matrix_diag(lengthscales ** 2) + Xcov)  # NxDxD

        all_diffs = tf.transpose(Z) - tf.expand_dims(Xmu, 2)  # NxDxM
        exponent_mahalanobis = tf.matrix_triangular_solve(chol_L_plus_Xcov, all_diffs, lower=True)  # NxDxM
        exponent_mahalanobis = tf.reduce_sum(tf.square(exponent_mahalanobis), 1)  # NxM
        exponent_mahalanobis = tf.exp(-0.5 * exponent_mahalanobis)  # NxM

        sqrt_det_L = tf.reduce_prod(lengthscales)
        sqrt_det_L_plus_Xcov = tf.exp(tf.reduce_sum(tf.log(tf.matrix_diag_part(chol_L_plus_Xcov)), axis=1))
        determinants = sqrt_det_L / sqrt_det_L_plus_Xcov  # N

        return kern.variance * (determinants[:, None] * exponent_mahalanobis)
开发者ID:vincentadam87,项目名称:GPflow,代码行数:30,代码来源:expectations.py


示例9: __init__

    def __init__(self, encoder, decoder):
        self.x = tf.placeholder(tf.float32, name='input')
        self.latent_shape = (encoder.output_shape[0], encoder.output_shape[1] // 2)
        self.encoder = encoder
        self.decoder = decoder
        self.batch_size = self.latent_shape[0]

        assert None not in self.latent_shape, "All dimensions must be known"
        encoded = tf.reshape(encoder(self.x), (self.batch_size, 2, self.latent_shape[1]))
        self.mu, self.log_sigma = encoded[:, 0, :], encoded[:, 1, :]
        self.mu = tf.reshape(self.mu, self.latent_shape)
        self.log_sigma = tf.reshape(self.log_sigma, self.latent_shape)

        self.eps = tf.random_normal(self.latent_shape,
                                    mean=0.0, stddev=1.0, name="eps")
        self.z = self.mu + tf.exp(self.log_sigma) * self.eps

        decoded = decoder(self.z)
        decoder_shape = decoder.output_shape
        if len(decoder_shape) == 2:
            decoded = tf.reshape(decoded, (self.batch_size, decoder_shape[1] // 2, 1, 2))
        else:
            assert decoder_shape[-1] == 2

        self.x_hat_mu, self.x_hat_log_sigma = decoded[:, :, :, 0], decoded[:, :, :, 1]
        self.x_hat_mu = tf.reshape(self.x_hat_mu, (self.batch_size, decoder_shape[1] // 2))
        self.x_hat_log_sigma = tf.reshape(self.x_hat_log_sigma, (self.batch_size, decoder_shape[1] // 2))

        self.params = encoder.trainable_weights + decoder.trainable_weights

        self.latent_loss = -0.5 * tf.reduce_mean(1 + self.log_sigma - self.mu**2 - tf.exp(self.log_sigma))
        self.reconstruction_loss = -tf.reduce_mean(((self.x_hat_mu - self.x)**2) / (2 * tf.exp(self.x_hat_log_sigma)))

        self.loss = self.latent_loss + self.reconstruction_loss
开发者ID:berleon,项目名称:tensorflow_vae,代码行数:34,代码来源:variation.py


示例10: contrastive_loss_andre

def contrastive_loss_andre(left_feature, right_feature, label, margin):
  """
  Compute the contrastive loss as in
  https://gitlab.idiap.ch/biometric/xfacereclib.cnn/blob/master/xfacereclib/cnn/scripts/experiment.py#L156
  With Y = [-1 +1] --> [POSITIVE_PAIR NEGATIVE_PAIR]
  L = log( m + exp( Y * d^2)) / N
  **Parameters**
   left_feature: First element of the pair
   right_feature: Second element of the pair
   label: Label of the pair (0 or 1)
   margin: Contrastive margin
  **Returns**
   Return the loss operation
  """

  with tf.name_scope("contrastive_loss_andre"):
    label = tf.to_float(label)
    d = compute_euclidean_distance(left_feature, right_feature)

    loss = tf.log(tf.exp(tf.mul(label, d)))
    loss = tf.reduce_mean(loss)

    # Within class part
    genuine_factor = tf.mul(label - 1, 0.5)
    within_class = tf.reduce_mean(tf.log(tf.exp(tf.mul(genuine_factor, d))))

    # Between class part
    impostor_factor = tf.mul(label + 1, 0.5)
    between_class = tf.reduce_mean(tf.log(tf.exp(tf.mul(impostor_factor, d))))

    # first_part = tf.mul(one - label, tf.square(d))  # (Y-1)*(d^2)
    return loss, between_class, within_class
开发者ID:imito,项目名称:odin,代码行数:32,代码来源:losses.py


示例11: log_normal

    def log_normal(self, position, mean, log_var, type_=1):
        '''
        Log of normal distribution

        type 1:
        position is [P, D]
        mean is [D]
        log_var is [D]
        output is [P]

        type 2:
        position is [P, D]
        mean is [P,D]
        log_var is [P,D]
        output is [P]
        '''

        n_D = tf.to_float(tf.shape(position)[1])
        term1 = n_D * tf.log(2*math.pi)

        if type_==1:
            term2 = tf.reduce_sum(log_var, 0) #sum over D [1]
            dif_cov = tf.square(position - mean) / tf.exp(log_var)
            term3 = tf.reduce_sum(dif_cov, 1) #sum over D [P]
            all_ = term1 + term2 + term3
            log_normal_ = -.5 * all_

        elif type_==2:
            term2 = tf.reduce_sum(log_var, 1) #sum over D [1]
            dif_cov = tf.square(position - mean) / tf.exp(log_var)
            term3 = tf.reduce_sum(dif_cov, 1) #sum over D [P]
            all_ = term1 + term2 + term3
            log_normal_ = -.5 * all_

        return log_normal_
开发者ID:chriscremer,项目名称:Other_Code,代码行数:35,代码来源:1d_bnn.py


示例12: __call__

  def __call__(self, inputs, state, scope=None):
    with _checked_scope(self, scope or "rwa_cell", reuse=self._reuse):
      h, n, d, a_max = state

      with vs.variable_scope("u"):
        u = _linear(inputs, self._num_units, True)

      with vs.variable_scope("g"):
        g = _linear([inputs, h], self._num_units, True)

      with vs.variable_scope("a"):
        a = _linear([inputs, h], self._num_units, False) # The bias term when factored out of the numerator and denominator cancels and is unnecessary

      z = tf.multiply(u, tanh(g))

      a_newmax = tf.maximum(a_max, a)
      exp_diff = tf.exp(a_max - a_newmax)
      exp_scaled = tf.exp(a - a_newmax)

      n = tf.multiply(n, exp_diff) + tf.multiply(z, exp_scaled)  # Numerically stable update of numerator
      d = tf.multiply(d, exp_diff) + exp_scaled  # Numerically stable update of denominator
      h_new = self._activation(tf.div(n, d))

      new_state = RWACellTuple(h_new, n, d, a_newmax)

    return h_new, new_state
开发者ID:indiejoseph,项目名称:chinese-char-rnn,代码行数:26,代码来源:rwa_cell.py


示例13: kl_divergence

 def kl_divergence(self, other):
     assert isinstance(other, Gaussian)
     l2_dist = tf.square(self.mean - other.mean)
     std_dev1 = tf.exp(x=self.log_std_dev)
     sqr_std_dev2 = tf.square(x=tf.exp(x=other.log_std_dev))
     kl_div = tf.reduce_mean(self.log_std_dev - other.log_std_dev + (std_dev1 + l2_dist) / (2 * sqr_std_dev2 + util.epsilon) - 0.5, axis=0)
     return kl_div
开发者ID:et0803,项目名称:tensorforce,代码行数:7,代码来源:gaussian.py


示例14: _decode

    def _decode(self, rel_codes, anchors):
        """Decode relative codes to boxes.

        Args:
          rel_codes: a tensor representing N anchor-encoded boxes.
          anchors: BoxList of anchors.

        Returns:
          boxes: BoxList holding N bounding boxes.
        """
        ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes()

        ty, tx, th, tw = tf.unstack(tf.transpose(rel_codes))
        if self._scale_factors:
            ty /= self._scale_factors[0]
            tx /= self._scale_factors[1]
            th /= self._scale_factors[2]
            tw /= self._scale_factors[3]
        w = tf.exp(tw) * wa
        h = tf.exp(th) * ha
        ycenter = ty * ha + ycenter_a
        xcenter = tx * wa + xcenter_a
        ymin = ycenter - h / 2.
        xmin = xcenter - w / 2.
        ymax = ycenter + h / 2.
        xmax = xcenter + w / 2.
        return box_list.BoxList(tf.transpose(tf.stack([ymin, xmin, ymax, xmax])))
开发者ID:Zumbalamambo,项目名称:deepcv,代码行数:27,代码来源:faster_rcnn_box_coder.py


示例15: rect_gaussian_kld

def rect_gaussian_kld(mean, log_var, mean0=0., log_var0=0., reduce_mean=True):
    def phi(x):
        return tf.exp(-0.5*tf.square(x))/np.sqrt(2*np.pi)
    def Phi(x):
        return 0.5 + 0.5*tf.erf(x/np.sqrt(2))

    smean = tf.square(mean)
    var = tf.exp(log_var)
    log_std = 0.5*log_var
    std = tf.exp(log_std)

    smean0 = tf.square(mean0)
    var0 = tf.exp(log_var0)
    log_std0 = 0.5*log_var0
    std0 = tf.exp(log_std0)

    tol = 1.0e-10
    pzero = Phi(-mean/std)
    kld = pzero*(tf.log(pzero+tol) - tf.log(Phi(-mean0/std0)+tol))
    kld += (1-pzero)*(log_std0 - log_std + 0.5*(smean0/var0 - smean/var))
    kld += (0.5/var0 - 0.5/var)*((smean + var)*(1-pzero) + mean*std*phi(-mean/std))
    kld -= (mean0/var0 - mean/var)*(mean*(1-pzero) + std*phi(-mean/std))
    kld = tf.reduce_sum(kld, 1)
    if reduce_mean:
        kld = tf.reduce_mean(kld)
    return kld
开发者ID:juho-lee,项目名称:tf_practice,代码行数:26,代码来源:prob.py


示例16: tf_ssd_bboxes_decode_layer

def tf_ssd_bboxes_decode_layer(feat_localizations,
                               anchors_layer,
                               prior_scaling=[0.1, 0.1, 0.2, 0.2]):
    """Compute the relative bounding boxes from the layer features and
    reference anchor bounding boxes.

    Arguments:
      feat_localizations: Tensor containing localization features.
      anchors: List of numpy array containing anchor boxes.

    Return:
      Tensor Nx4: ymin, xmin, ymax, xmax
    """
    yref, xref, href, wref = anchors_layer

    # Compute center, height and width
    cx = feat_localizations[:, :, :, :, 0] * wref * prior_scaling[0] + xref
    cy = feat_localizations[:, :, :, :, 1] * href * prior_scaling[1] + yref
    w = wref * tf.exp(feat_localizations[:, :, :, :, 2] * prior_scaling[2])
    h = href * tf.exp(feat_localizations[:, :, :, :, 3] * prior_scaling[3])
    # Boxes coordinates.
    ymin = cy - h / 2.
    xmin = cx - w / 2.
    ymax = cy + h / 2.
    xmax = cx + w / 2.
    bboxes = tf.stack([ymin, xmin, ymax, xmax], axis=-1)
    return bboxes
开发者ID:bowrian,项目名称:SDC-Vehicle-Detection,代码行数:27,代码来源:ssd_common.py


示例17: build_encoder

  def build_encoder(self):
    """Inference Network. q(h|X)"""
    with tf.variable_scope("encoder") as scope_encoder:
      self.l1_w = tf.get_variable(
                    "l1_w",
                    shape=[self.reader.vocab_size,self.embed_dim],
                    initializer=tf.contrib.layers.xavier_initializer())
      self.l2_w = tf.get_variable(
                    "l2_w",
                    shape=[self.embed_dim,self.embed_dim],
                    initializer=tf.contrib.layers.xavier_initializer())
      
      self.mean_w = tf.get_variable(
                    "mean_w",
                    shape=[self.embed_dim,self.h_dim],
                    initializer=tf.contrib.layers.xavier_initializer())
      self.sigma_w = tf.get_variable(
                    "sigma_w",
                    shape=[self.embed_dim,self.h_dim],
                    initializer=tf.contrib.layers.xavier_initializer())
      
      self.l1 = tf.nn.relu(tf.matmul(tf.expand_dims(self.x,0),self.l1_w))
      self.l2 = tf.nn.relu(tf.matmul(self.l1,self.l2_w)) 


      self.mean = tf.matmul(self.l2,self.mean_w)
      self.log_sigma = tf.matmul(self.l2,self.sigma_w)
      self.sigma = tf.exp(self.log_sigma)
      
      self.kl = -0.5 * tf.reduce_sum(1 + 2*self.log_sigma - tf.square(self.mean) - tf.exp(2*self.log_sigma))
开发者ID:wujsAct,项目名称:TeachingMachineReadAndComprehend,代码行数:30,代码来源:nvdm.py


示例18: _forward

 def _forward(self, x):
   x = self._maybe_assert_valid_x(x)
   if self.power == 0.:
     return tf.exp(x)
   # If large x accuracy is an issue, consider using:
   # (1. + x * self.power)**(1. / self.power) when x >> 1.
   return tf.exp(tf.log1p(x * self.power) / self.power)
开发者ID:asudomoeva,项目名称:probability,代码行数:7,代码来源:power_transform.py


示例19: log_prob

    def log_prob(self, xs, zs):
        """Returns a vector [log p(xs, zs[1,:]), ..., log p(xs, zs[S,:])]."""
        if self.prior == 'Lognormal':
            zs = tf.exp(zs)
        elif self.prior != 'Gaussian':
            raise NotImplementedError("prior not available.")

        log_prior = -self.prior_variance * tf.reduce_sum(zs*zs)

        s = tf.reshape(zs[:,:self.n_rows*self.K], [self.n_rows,self.K])
        t = tf.reshape(zs[:,self.n_cols*self.K:], [self.n_cols,self.K])

        xp = tf.matmul(s, t, transpose_b=True)
        if self.interaction == 'multiplicative':
            xp = tf.exp(xp)
        elif self.interaction != 'additive':
            raise NotImplementedError("interaction type unknown.")

        if self.like == 'Gaussian':
            log_lik = tf.reduce_sum(norm.logpdf(xs['x'], xp))
        elif self.like == 'Poisson':
            if not (self.interaction == "additive" or self.prior == "Lognormal"):
                raise NotImplementedError("Rate of Poisson has to be nonnegatve.")

            log_lik = tf.reduce_sum(poisson.logpmf(xs['x'], xp))
        else:
            raise NotImplementedError("likelihood not available.")

        return log_lik + log_prior
开发者ID:jf0510310315,项目名称:edward,代码行数:29,代码来源:matrix_factorization.py


示例20: encode

def encode(x, h, generator, **kwargs):
    x_transformed = kwargs["x_transformed"]
    z_dim = kwargs["z_dim"]
    phi_interm = kwargs["phi_interm"]
    prior_interm = kwargs["prior_interm"]
    weight_factor = kwargs.get("weight_factor", 1.0)
    layers_num = kwargs.get("layers_num", 1)
    batch_size = x.get_shape().as_list()[0]
    
    x_t = fun(x, nout = x_transformed, act = tf.nn.relu, name = "x_transformed", weight_factor = weight_factor, layers_num = layers_num)

    prior = fun(h, nout = prior_interm, act = tf.nn.relu, name = "prior", weight_factor = weight_factor, layers_num = layers_num)
    prior_mu = fun(prior, nout = z_dim, act = tf.identity, name = "prior_mu", weight_factor = weight_factor)
    prior_sigma = fun(prior, nout = z_dim, act = tf.nn.softplus, name = "prior_sigma", weight_factor = weight_factor)

    phi = fun(x_t, h, nout = phi_interm, act = tf.nn.relu, name = "phi", weight_factor = weight_factor, layers_num = layers_num)
    z_mu = fun(phi, nout = z_dim, act = tf.identity, name = "z_mu", weight_factor = weight_factor)
    z_sigma = fun(phi, nout = z_dim, act = tf.nn.softplus, name = "z_sigma", weight_factor = weight_factor)

    epsilon = tf.random_normal((batch_size, z_dim), name='epsilon')

    z = tf.cond(
        generator, 
        lambda: prior_mu + tf.exp(prior_sigma) * epsilon, 
        lambda: z_mu + tf.exp(z_sigma) * epsilon
    )

    return z, z_mu, z_sigma, prior_mu, prior_sigma, x_t
开发者ID:alexeyche,项目名称:alexeyche-junk,代码行数:28,代码来源:vae_model.py



注:本文中的tensorflow.exp函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.expand_dims函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.executing_eagerly函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap