• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensorflow.matrix_transpose函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.matrix_transpose函数的典型用法代码示例。如果您正苦于以下问题:Python matrix_transpose函数的具体用法?Python matrix_transpose怎么用?Python matrix_transpose使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了matrix_transpose函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _sample_conditional

def _sample_conditional(Xnew, feat, kern, f, *, full_cov=False, full_output_cov=False, q_sqrt=None, white=False, num_samples=None):
    """
    `sample_conditional` will return a sample from the conditional distribution.
    In most cases this means calculating the conditional mean m and variance v and then
    returning m + sqrt(v) * eps, with eps ~ N(0, 1).
    However, for some combinations of Mok and Mof more efficient sampling routines exists.
    The dispatcher will make sure that we use the most efficient one.

    :return: samples, mean, cov
        samples has shape [num_samples, N, P] or [N, P] if num_samples is None
        mean and cov as for conditional()
    """
    if full_cov and full_output_cov:
        raise NotImplementedError("The combination of both full_cov and full_output_cov is not "
                                  "implemented for sample_conditional.")

    logger.debug("sample conditional: InducingFeature Kernel")
    mean, cov = conditional(Xnew, feat, kern, f, q_sqrt=q_sqrt, white=white,
                            full_cov=full_cov, full_output_cov=full_output_cov)
    if full_cov:
        # mean: N x P
        # cov: P x N x N
        mean = tf.matrix_transpose(mean)  # now P x N
        samples = _sample_mvn(mean, cov, 'full', num_samples=num_samples)  # (S x) P x N
        samples = tf.matrix_transpose(samples)  # now (S x) N x P

    else:
        cov_structure = "full" if full_output_cov else "diag"
        samples = _sample_mvn(mean, cov, cov_structure, num_samples=num_samples)  # [(S,), N, P]

    return samples, mean, cov
开发者ID:sanket-kamthe,项目名称:GPflow,代码行数:31,代码来源:conditionals.py


示例2: _quadrature_expectation

def _quadrature_expectation(p, obj1, feature1, obj2, feature2, num_gauss_hermite_points):
    """
    Handling of quadrature expectations for Markov Gaussians (useful for time series)
    Fallback method for missing analytic expectations wrt Markov Gaussians
    Nota Bene: obj1 is always associated with x_n, whereas obj2 always with x_{n+1}
               if one requires e.g. <x_{n+1} K_{x_n, Z}>_p(x_{n:n+1}), compute the
               transpose and then transpose the result of the expectation
    """
    num_gauss_hermite_points = 40 if num_gauss_hermite_points is None else num_gauss_hermite_points

    warnings.warn("Quadrature is used to calculate the expectation. This means that "
                  "an analytical implementations is not available for the given combination.")

    if obj2 is None:
        eval_func = lambda x: get_eval_func(obj1, feature1)(x)
        mu, cov = p.mu[:-1], p.cov[0, :-1]  # cross covariances are not needed
    elif obj1 is None:
        eval_func = lambda x: get_eval_func(obj2, feature2)(x)
        mu, cov = p.mu[1:], p.cov[0, 1:]  # cross covariances are not needed
    else:
        eval_func = lambda x: (get_eval_func(obj1, feature1, np.s_[:, :, None])(tf.split(x, 2, 1)[0]) *
                               get_eval_func(obj2, feature2, np.s_[:, None, :])(tf.split(x, 2, 1)[1]))
        mu = tf.concat((p.mu[:-1, :], p.mu[1:, :]), 1)  # Nx2D
        cov_top = tf.concat((p.cov[0, :-1, :, :], p.cov[1, :-1, :, :]), 2)  # NxDx2D
        cov_bottom = tf.concat((tf.matrix_transpose(p.cov[1, :-1, :, :]), p.cov[0, 1:, :, :]), 2)
        cov = tf.concat((cov_top, cov_bottom), 1)  # Nx2Dx2D

    return mvnquad(eval_func, mu, cov, num_gauss_hermite_points)
开发者ID:vincentadam87,项目名称:GPflow,代码行数:28,代码来源:expectations.py


示例3: testNonBatchMatrix

 def testNonBatchMatrix(self):
   matrix = [[1, 2, 3], [4, 5, 6]]  # Shape (2, 3)
   expected_transposed = [[1, 4], [2, 5], [3, 6]]  # Shape (3, 2)
   with self.test_session():
     transposed = tf.matrix_transpose(matrix)
     self.assertEqual((3, 2), transposed.get_shape())
     self.assertAllEqual(expected_transposed, transposed.eval())
开发者ID:Qstar,项目名称:tensorflow,代码行数:7,代码来源:array_ops_test.py


示例4: testNonBatchMatrixDynamicallyDefined

 def testNonBatchMatrixDynamicallyDefined(self):
     matrix = [[1, 2, 3], [4, 5, 6]]  # Shape (2, 3)
     expected_transposed = [[1, 4], [2, 5], [3, 6]]  # Shape (3, 2)
     with self.test_session():
         matrix_ph = tf.placeholder(tf.int32)
         transposed = tf.matrix_transpose(matrix_ph)
         self.assertAllEqual(expected_transposed, transposed.eval(feed_dict={matrix_ph: matrix}))
开发者ID:ppwwyyxx,项目名称:tensorflow,代码行数:7,代码来源:array_ops_test.py


示例5: _dot

 def _dot(self, slist1, slist2, tf_embs):
     """
     Simple dot product between two vectors of embeddings.
     This returns a matrix of positive real numbers.
     """
     matlist1 = tf.gather(tf_embs, slist1, name='matlist1')
     matlist2 = tf.matrix_transpose(tf.gather(tf_embs, slist2, name='matlist2'))
     return tf.batch_matmul(matlist1, matlist2)
开发者ID:beckdaniel,项目名称:flakes,代码行数:8,代码来源:sk_tf_batch.py


示例6: _expectation

def _expectation(p, kern, feat, mean, none, nghp=None):
    """
    Compute the expectation:
    expectation[n] = <K_{Z, x_n} m(x_n)>_p(x_n)
    or the equivalent for MarkovGaussian

    :return: NxMxQ
    """
    return tf.matrix_transpose(expectation(p, mean, (kern, feat), nghp=nghp))
开发者ID:vincentadam87,项目名称:GPflow,代码行数:9,代码来源:expectations.py


示例7: _conditional

def _conditional(Xnew, feat, kern, f, *, full_cov=False, full_output_cov=False, q_sqrt=None, white=False):
    """
    Most efficient routine to project L independent latent gps through a mixing matrix W.
    The mixing matrix is a member of the `SeparateMixedMok` and has shape P x L.

    The covariance matrices used to calculate the conditional have the following shape:
    - Kuu: L x M x M
    - Kuf: L x M x N
    - Kff: L x N or L x N x N

    Further reference
    -----------------
    - See `gpflow.conditionals._conditional` for a detailed explanation of
      conditional in the single-output case.
    - See the multiouput notebook for more information about the multiouput framework.

    """
    logger.debug("conditional: (MixedKernelSharedMof, MixedKernelSeparateMof), SeparateMixedMok")
    independent_cond = conditional.dispatch(object, SeparateIndependentMof, SeparateIndependentMok, object)
    gmu, gvar = independent_cond(Xnew, feat, kern, f, full_cov=full_cov, q_sqrt=q_sqrt,
                                 full_output_cov=False, white=white)  # N x L, L x N x N or N x L

    gmu = tf.matrix_transpose(gmu)  # L x N
    if not full_cov:
        gvar = tf.matrix_transpose(gvar)  # L x N (x N)

    Wgmu = tf.tensordot(gmu, kern.W, [[0], [1]])  # N x P

    if full_output_cov:
        Wt_expanded = tf.matrix_transpose(kern.W)[:, None, :]  # L x 1 x P
        if full_cov:
            Wt_expanded = tf.expand_dims(Wt_expanded, axis=-1)  # L x 1 x P x 1

        gvarW = tf.expand_dims(gvar, axis=2) * Wt_expanded  # L x N x P (x N)
        WgvarW = tf.tensordot(gvarW, kern.W, [[0], [1]])  # N x P (x N) x P
    else:
        if not full_cov:
            WgvarW = tf.tensordot(gvar, kern.W ** 2, [[0], [1]])  # N x P
        else:
            WgvarW = tf.tensordot(kern.W ** 2, gvar, [[1], [0]])  # P x N (x N)

    return Wgmu, WgvarW
开发者ID:sanket-kamthe,项目名称:GPflow,代码行数:42,代码来源:conditionals.py


示例8: testBatchMatrix

 def testBatchMatrix(self):
   matrix_0 = [[1, 2, 3], [4, 5, 6]]
   matrix_0_t = [[1, 4], [2, 5], [3, 6]]
   matrix_1 = [[11, 22, 33], [44, 55, 66]]
   matrix_1_t = [[11, 44], [22, 55], [33, 66]]
   batch_matrix = [matrix_0, matrix_1]  # Shape (2, 2, 3)
   expected_transposed = [matrix_0_t, matrix_1_t]  # Shape (2, 3, 2)
   with self.test_session():
     transposed = tf.matrix_transpose(batch_matrix)
     self.assertEqual((2, 3, 2), transposed.get_shape())
     self.assertAllEqual(expected_transposed, transposed.eval())
开发者ID:Qstar,项目名称:tensorflow,代码行数:11,代码来源:array_ops_test.py


示例9: testBatchMatrixDynamicallyDefined

 def testBatchMatrixDynamicallyDefined(self):
     matrix_0 = [[1, 2, 3], [4, 5, 6]]
     matrix_0_t = [[1, 4], [2, 5], [3, 6]]
     matrix_1 = [[11, 22, 33], [44, 55, 66]]
     matrix_1_t = [[11, 44], [22, 55], [33, 66]]
     batch_matrix = [matrix_0, matrix_1]  # Shape (2, 2, 3)
     expected_transposed = [matrix_0_t, matrix_1_t]  # Shape (2, 3, 2)
     with self.test_session():
         batch_matrix_ph = tf.placeholder(tf.int32)
         transposed = tf.matrix_transpose(batch_matrix_ph)
         self.assertAllEqual(expected_transposed, transposed.eval(feed_dict={batch_matrix_ph: batch_matrix}))
开发者ID:ppwwyyxx,项目名称:tensorflow,代码行数:11,代码来源:array_ops_test.py


示例10: _scaled_square_dist

    def _scaled_square_dist(self, X, X2):
        """
        Returns ((X - X2ᵀ)/lengthscales)².
        Due to the implementation and floating-point imprecision, the
        result may actually be very slightly negative for entries very
        close to each other.
        """
        X = X / self.lengthscales
        Xs = tf.reduce_sum(tf.square(X), axis=-1, keepdims=True)

        if X2 is None:
            dist = -2 * tf.matmul(X, X, transpose_b=True)
            dist += Xs + tf.matrix_transpose(Xs)
            return dist

        X2 = X2 / self.lengthscales
        X2s = tf.reduce_sum(tf.square(X2), axis=-1, keepdims=True)
        dist = -2 * tf.matmul(X, X2, transpose_b=True)
        dist += Xs + tf.matrix_transpose(X2s)
        return dist
开发者ID:sanket-kamthe,项目名称:GPflow,代码行数:20,代码来源:kernels.py


示例11: create

  def create(self,
             fixed_embeddings,
             linked_embeddings,
             context_tensor_arrays,
             attention_tensor,
             during_training,
             stride=None):
    """Requires |stride|; otherwise see base class."""
    check.NotNone(stride,
                  'BiaffineDigraphNetwork requires "stride" and must be called '
                  'in the bulk feature extractor component.')

    # TODO(googleuser): Add dropout during training.
    del during_training

    # Retrieve (possibly averaged) weights.
    weights_arc = self._component.get_variable('weights_arc')
    weights_source = self._component.get_variable('weights_source')
    root = self._component.get_variable('root')

    # Extract the source and target token activations.  Use |stride| to collapse
    # batch and beam into a single dimension.
    sources = network_units.lookup_named_tensor('sources', linked_embeddings)
    targets = network_units.lookup_named_tensor('targets', linked_embeddings)
    source_tokens_bxnxs = tf.reshape(sources.tensor,
                                     [stride, -1, self._source_dim])
    target_tokens_bxnxt = tf.reshape(targets.tensor,
                                     [stride, -1, self._target_dim])
    num_tokens = tf.shape(source_tokens_bxnxs)[1]

    # Compute the arc, source, and root potentials.
    arcs_bxnxn = digraph_ops.ArcPotentialsFromTokens(
        source_tokens_bxnxs, target_tokens_bxnxt, weights_arc)
    sources_bxnxn = digraph_ops.ArcSourcePotentialsFromTokens(
        source_tokens_bxnxs, weights_source)
    roots_bxn = digraph_ops.RootPotentialsFromTokens(
        root, target_tokens_bxnxt, weights_arc, weights_source)

    # Combine them into a single matrix with the roots on the diagonal.
    adjacency_bxnxn = digraph_ops.CombineArcAndRootPotentials(
        arcs_bxnxn + sources_bxnxn, roots_bxn)

    # The adjacency matrix currently has sources on rows and targets on columns,
    # but we want targets on rows so that maximizing within a row corresponds to
    # selecting sources for a given target.
    adjacency_bxnxn = tf.matrix_transpose(adjacency_bxnxn)

    return [tf.reshape(adjacency_bxnxn, [-1, num_tokens])]
开发者ID:ALISCIFP,项目名称:models,代码行数:48,代码来源:biaffine_units.py


示例12: _updated_mat

  def _updated_mat(self, mat, v, diag):
    # Get dense matrix defined by its square root, which is an update of `mat`:
    # A = (mat + v D v^T) (mat + v D v^T)^T
    # D is the diagonal matrix with `diag` on the diagonal.

    # If diag is None, then it defaults to the identity matrix, so DV^T = V^T
    if diag is None:
      diag_vt = tf.matrix_transpose(v)
    else:
      diag_mat = tf.matrix_diag(diag)
      diag_vt = tf.matmul(diag_mat, v, adjoint_b=True)

    v_diag_vt = tf.matmul(v, diag_vt)
    sqrt = mat + v_diag_vt
    a = tf.matmul(sqrt, sqrt, adjoint_b=True)
    return a.eval()
开发者ID:ComeOnGetMe,项目名称:tensorflow,代码行数:16,代码来源:operator_pd_vdvt_update_test.py


示例13: _arccosine

 def _arccosine(self, slist1, slist2, tf_embs):
     """
     Uses an arccosine kernel of degree 0 to calculate
     the similarity matrix between two vectors of embeddings. 
     This is just cosine similarity projected into the [0,1] interval.
     """
     dot = self._dot(slist1, slist2, tf_embs)
     # This calculation corresponds to an arc-cosine with 
     # degree 0. It can be interpreted as cosine
     # similarity but projected into a [0,1] interval.
     # TODO: arc-cosine with degree 1.
     tf_pi = tf.constant(np.pi, dtype=tf.float64)
     tf_norms = tf.constant(self.norms, dtype=tf.float64, name='norms')
     normlist1 = tf.gather(tf_norms, slist1, name='normlist1')
     normlist2 = tf.matrix_transpose(tf.gather(tf_norms, slist2, name='normlist2'))
     norms = tf.batch_matmul(normlist1, normlist2)
     cosine = tf.clip_by_value(tf.truediv(dot, norms), -1, 1)
     angle = tf.acos(cosine)
     angle = tf.select(tf.is_nan(angle), tf.ones_like(angle) * tf_pi, angle)
     return 1 - (angle / tf_pi)
开发者ID:beckdaniel,项目名称:flakes,代码行数:20,代码来源:sk_tf_batch.py


示例14: K

    def K(self, X, X2=None, presliced=False):
        if not presliced:
            X, X2 = self._slice(X, X2)

        X_denominator = tf.sqrt(self._weighted_product(X))
        if X2 is None:
            X2 = X
            X2_denominator = X_denominator
        else:
            X2_denominator = tf.sqrt(self._weighted_product(X2))

        numerator = self._weighted_product(X, X2)
        X_denominator = tf.expand_dims(X_denominator, -1)
        X2_denominator = tf.matrix_transpose(tf.expand_dims(X2_denominator, -1))
        cos_theta = numerator / X_denominator / X2_denominator
        jitter = 1e-15
        theta = tf.acos(jitter + (1 - 2 * jitter) * cos_theta)

        return self.variance * (1. / np.pi) * self._J(theta) * \
               X_denominator ** self.order * \
               X2_denominator ** self.order
开发者ID:sanket-kamthe,项目名称:GPflow,代码行数:21,代码来源:kernels.py


示例15: _validate_correlationness

 def _validate_correlationness(self, x):
   if not self.validate_args:
     return x
   checks = [
       tf.assert_less_equal(
           tf.cast(-1., dtype=x.dtype.base_dtype),
           x,
           message='Correlations must be >= -1.'),
       tf.assert_less_equal(
           x,
           tf.cast(1., x.dtype.base_dtype),
           message='Correlations must be <= 1.'),
       tf.assert_near(
           tf.matrix_diag_part(x),
           tf.cast(1., x.dtype.base_dtype),
           message='Self-correlations must be = 1.'),
       tf.assert_near(
           x, tf.matrix_transpose(x),
           message='Correlation matrices must be symmetric')
   ]
   with tf.control_dependencies(checks):
     return tf.identity(x)
开发者ID:asudomoeva,项目名称:probability,代码行数:22,代码来源:lkj.py


示例16: _uniform_correlation_like_matrix

def _uniform_correlation_like_matrix(num_rows, batch_shape, dtype, seed):
  """Returns a uniformly random `Tensor` of "correlation-like" matrices.

  A "correlation-like" matrix is a symmetric square matrix with all entries
  between -1 and 1 (inclusive) and 1s on the main diagonal.  Of these,
  the ones that are positive semi-definite are exactly the correlation
  matrices.

  Args:
    num_rows: Python `int` dimension of the correlation-like matrices.
    batch_shape: `Tensor` or Python `tuple` of `int` shape of the
      batch to return.
    dtype: `dtype` of the `Tensor` to return.
    seed: Random seed.

  Returns:
    matrices: A `Tensor` of shape `batch_shape + [num_rows, num_rows]`
      and dtype `dtype`.  Each entry is in [-1, 1], and each matrix
      along the bottom two dimensions is symmetric and has 1s on the
      main diagonal.
  """
  num_entries = num_rows * (num_rows + 1) / 2
  ones = tf.ones(shape=[num_entries], dtype=dtype)
  # It seems wasteful to generate random values for the diagonal since
  # I am going to throw them away, but `fill_triangular` fills the
  # diagonal, so I probably need them.
  # It's not impossible that it would be more efficient to just fill
  # the whole matrix with random values instead of messing with
  # `fill_triangular`.  Then would need to filter almost half out with
  # `matrix_band_part`.
  unifs = uniform.Uniform(-ones, ones).sample(batch_shape, seed=seed)
  tril = util.fill_triangular(unifs)
  symmetric = tril + tf.matrix_transpose(tril)
  diagonal_ones = tf.ones(
      shape=util.pad(batch_shape, axis=0, back=True, value=num_rows),
      dtype=dtype)
  return tf.matrix_set_diag(symmetric, diagonal_ones)
开发者ID:asudomoeva,项目名称:probability,代码行数:37,代码来源:correlation_matrix_volumes_lib.py


示例17: lanczos_bidiag


#.........这里部分代码省略.........
        clear_after_read=False)

  # Reads a row-vector at location i in tarray and returns it as a
  # column-vector.
  def read_colvec(tarray, i):
    return tf.expand_dims(tarray.read(i), -1)

  # Writes an column-vector as a row-vecor at location i in tarray.
  def write_colvec(tarray, colvec, i):
    return tarray.write(i, tf.squeeze(colvec))

  # Ephemeral class holding Lanczos bidiagonalization state:
  #   u = left Lanczos vectors
  #   v = right Lanczos vectors
  #   alpha = diagonal of B_k.
  #   beta = subdiagonal of B_k.
  # Notice that we store the left and right Lanczos vectors as the _rows_
  # of u and v. This is done because tensors are stored row-major and
  # TensorArray only supports packing along dimension 0.
  lanzcos_bidiag_state = collections.namedtuple("LanczosBidiagState",
                                                ["u", "v", "alpha", "beta"])

  def update_state(old, i, u, v, alpha, beta):
    return lanzcos_bidiag_state(
        write_colvec(old.u, u, i + 1),
        write_colvec(old.v, v, i),
        old.alpha.write(i, alpha),
        old.beta.write(i, beta))

  def gram_schmidt_step(j, basis, v):
    """Makes v orthogonal to the j'th vector in basis."""
    v_shape = v.get_shape()
    basis_vec = read_colvec(basis, j)
    v -= tf.batch_matmul(basis_vec, v, adj_x=True) * basis_vec
    v.set_shape(v_shape)
    return j + 1, basis, v

  def orthogonalize_once(i, basis, v):
    j = tf.constant(0, dtype=tf.int32)
    _, _, v = tf.while_loop(lambda j, basis, v: j < i, gram_schmidt_step,
                            [j, basis, v])
    return util.l2normalize(v)

  # Iterated modified Gram-Schmidt orthogonalization adapted from PROPACK.
  # TODO(rmlarsen): This is possibly the slowest implementation of
  # iterated Gram-Schmidt orthogonalization since the abacus. Move to C++.
  def orthogonalize_(i, basis, v):
    v_norm = util.l2norm(v)
    v_new, v_new_norm = orthogonalize_once(i, basis, v)
    # If the norm decreases more than 1/sqrt(2), run a second
    # round of MGS. See proof in:
    #   B. N. Parlett, ``The Symmetric Eigenvalue Problem'',
    #   Prentice-Hall, Englewood Cliffs, NJ, 1980. pp. 105-109
    return tf.cond(v_new_norm < 0.7071 * v_norm,
                   lambda: orthogonalize_once(i, basis, v),
                   lambda: (v_new, v_new_norm))

  def stopping_criterion(i, _):
    # TODO(rmlarsen): Stop if an invariant subspace is detected.
    return i < k

  def lanczos_bidiag_step(i, ls):
    """Extends the Lanczos bidiagonalization ls by one step."""
    u = read_colvec(ls.u, i)
    r = operator.apply_adjoint(u)
    # The shape inference doesn't work across cond, save and reapply the shape.
    r_shape = r.get_shape()
    r = tf.cond(
        i > 0,
        lambda: r - ls.beta.read(i - 1) * read_colvec(ls.v, i - 1),
        lambda: r)
    r.set_shape(r_shape)
    if orthogonalize:
      v, alpha = orthogonalize_(i - 1, ls.v, r)
    else:
      v, alpha = util.l2normalize(r)
    p = operator.apply(v) - alpha * u
    if orthogonalize:
      u, beta = orthogonalize_(i, ls.u, p)
    else:
      u, beta = util.l2normalize(p)

    return i + 1, update_state(ls, i, u, v, alpha, beta)

  with tf.name_scope(name):
    dtype = operator.dtype
    if starting_vector is None:
      starting_vector = tf.random_uniform(
          operator.shape[:1], -1, 1, dtype=dtype)
    u0, _ = util.l2normalize(starting_vector)
    ls = lanzcos_bidiag_state(
        u=write_colvec(tarray(k + 1, dtype, "u"), u0, 0),
        v=tarray(k, dtype, "v"),
        alpha=tarray(k, dtype, "alpha"),
        beta=tarray(k, dtype, "beta"))
    i = tf.constant(0, dtype=tf.int32)
    _, ls = tf.while_loop(stopping_criterion, lanczos_bidiag_step, [i, ls])
    return lanzcos_bidiag_state(
        tf.matrix_transpose(ls.u.pack()),
        tf.matrix_transpose(ls.v.pack()), ls.alpha.pack(), ls.beta.pack())
开发者ID:tonydeep,项目名称:tensorflow,代码行数:101,代码来源:lanczos.py


示例18: gen_decoder

def gen_decoder(hparams,
                inputs,
                targets,
                targets_present,
                encoding_state,
                is_training,
                is_validating,
                reuse=None):
  """Define the Decoder graph. The Decoder will now impute tokens that
      have been masked from the input seqeunce.
  """
  config = get_config()
  gen_decoder_rnn_size = hparams.gen_rnn_size

  if FLAGS.seq2seq_share_embedding:
    with tf.variable_scope('decoder/rnn', reuse=True):
      embedding = tf.get_variable('embedding',
                                  [FLAGS.vocab_size, gen_decoder_rnn_size])

  with tf.variable_scope('decoder', reuse=reuse):
    # Neural architecture search cell.
    cell = custom_cell.Alien(config.hidden_size)

    if is_training:
      [h2h_masks, _, _,
       output_mask] = variational_dropout.generate_variational_dropout_masks(
           hparams, config.keep_prob)
    else:
      output_mask = None

    cell_gen = custom_cell.GenericMultiRNNCell([cell] * config.num_layers)

    # Hidden encoder states.
    hidden_vector_encodings = encoding_state[0]

    # Carry forward the final state tuple from the encoder.
    # State tuples.
    state_gen = encoding_state[1]

    if FLAGS.attention_option is not None:
      (attention_keys, attention_values, _,
       attention_construct_fn) = attention_utils.prepare_attention(
           hidden_vector_encodings,
           FLAGS.attention_option,
           num_units=gen_decoder_rnn_size,
           reuse=reuse)

    with tf.variable_scope('rnn'):
      sequence, logits, log_probs = [], [], []

      if not FLAGS.seq2seq_share_embedding:
        embedding = tf.get_variable('embedding',
                                    [FLAGS.vocab_size, gen_decoder_rnn_size])
      softmax_w = tf.matrix_transpose(embedding)
      softmax_b = tf.get_variable('softmax_b', [FLAGS.vocab_size])

      rnn_inputs = tf.nn.embedding_lookup(embedding, inputs)

      if is_training and FLAGS.keep_prob < 1:
        rnn_inputs = tf.nn.dropout(rnn_inputs, FLAGS.keep_prob)

      for t in xrange(FLAGS.sequence_length):
        if t > 0:
          tf.get_variable_scope().reuse_variables()

        # Input to the Decoder.
        if t == 0:
          # Always provide the real input at t = 0.
          rnn_inp = rnn_inputs[:, t]

        # If the input is present, read in the input at t.
        # If the input is not present, read in the previously generated.
        else:
          real_rnn_inp = rnn_inputs[:, t]
          fake_rnn_inp = tf.nn.embedding_lookup(embedding, fake)

          # While validating, the decoder should be operating in teacher
          # forcing regime.  Also, if we're just training with cross_entropy
          # use teacher forcing.
          if is_validating or (is_training and
                               FLAGS.gen_training_strategy == 'cross_entropy'):
            rnn_inp = real_rnn_inp
          else:
            rnn_inp = tf.where(targets_present[:, t - 1], real_rnn_inp,
                               fake_rnn_inp)

        if is_training:
          state_gen = list(state_gen)
          for layer_num, per_layer_state in enumerate(state_gen):
            per_layer_state = LSTMTuple(
                per_layer_state[0], per_layer_state[1] * h2h_masks[layer_num])
            state_gen[layer_num] = per_layer_state

        # RNN.
        rnn_out, state_gen = cell_gen(rnn_inp, state_gen)

        if is_training:
          rnn_out = output_mask * rnn_out

        if FLAGS.attention_option is not None:
#.........这里部分代码省略.........
开发者ID:ALISCIFP,项目名称:models,代码行数:101,代码来源:seq2seq_nas.py


示例19: testMultivariateFromScalarBatchScalarEvent

  def testMultivariateFromScalarBatchScalarEvent(self):
    with self.test_session() as sess:
      shift = np.array([-1, 0, 1], dtype=np.float32)
      scale = la.LinearOperatorTriL(
          [[[-1., 0, 0],
            [2, 1, 0],
            [3, 2, 1]],
           [[2, 0, 0],
            [3, -2, 0],
            [4, 3, 2]]],
          is_non_singular=True,
          is_positive_definite=False)

      # Overriding shapes must be compatible w/bijector; most bijectors are
      # batch_shape agnostic and only care about event_ndims.
      # In the case of `Affine`, if we got it wrong then it would fire an
      # exception due to incompatible dimensions.
      fake_mvn = ds.TransformedDistribution(
          distribution=ds.Normal(mu=0., sigma=1.),
          bijector=bs.AffineLinearOperator(shift, scale),
          batch_shape=scale.batch_shape,               # [2]
          event_shape=[scale.domain_dimension.value],  # [3]
          validate_args=True)

      # Note: Affine ellided this tile.
      actual_mean = np.tile(shift, [2, 1])
      # Since LinOp.apply doesn't support `adjoint_b` nor composition,
      # we cannot do: scale.apply(scale, adjoint_b=True).eval()
      actual_cov = scale.apply(tf.matrix_transpose(scale.to_dense())).eval()

      actual_mvn = ds.MultivariateNormalFull(mu=actual_mean, sigma=actual_cov)

      # Ensure sample works by checking first, second moments.
      n = 5e3
      y = fake_mvn.sample(int(n), seed=0)
      sample_mean = tf.reduce_mean(y, 0)
      centered_y = tf.transpose(y - sample_mean, [1, 2, 0])
      sample_cov = tf.matmul(centered_y, centered_y, transpose_b=True) / n
      [sample_mean_, sample_cov_] = sess.run([sample_mean, sample_cov])
      self.assertAllClose(actual_mean, sample_mean_, atol=0.1, rtol=0.1)
      self.assertAllClose(actual_cov, sample_cov_, atol=0., rtol=0.1)

      # Ensure all other functions work as intended.
      x = fake_mvn.sample(5, seed=0).eval()
      self.assertAllEqual([5, 2, 3], x.shape)
      self.assertAllEqual(actual_mvn.get_event_shape(),
                          fake_mvn.get_event_shape())
      self.assertAllEqual(actual_mvn.event_shape().eval(),
                          fake_mvn.event_shape().eval())
      self.assertAllEqual(actual_mvn.get_batch_shape(),
                          fake_mvn.get_batch_shape())
      self.assertAllEqual(actual_mvn.batch_shape().eval(),
                          fake_mvn.batch_shape().eval())
      self.assertAllClose(actual_mvn.log_prob(x).eval(),
                          fake_mvn.log_prob(x).eval(),
                          atol=0., rtol=1e-7)
      self.assertAllClose(actual_mvn.prob(x).eval(),
                          fake_mvn.prob(x).eval(),
                          atol=0., rtol=1e-6)
      self.assertAllClose(actual_mvn.entropy().eval(),
                          fake_mvn.entropy().eval(),
                          atol=0., rtol=1e-6)
      for unsupported_fn in (fake_mvn.log_cdf,
                             fake_mvn.cdf,
                             fake_mvn.survival_function,
                             fake_mvn.log_survival_function):
        with self.assertRaisesRegexp(
            NotImplementedError, "not implemented when overriding event_shape"):
          self.assertRaisesRegexp(unsupported_fn(x))
开发者ID:BinRoot,项目名称:Tensorflow,代码行数:69,代码来源:transformed_distribution_test.py


示例20: testTensorWithStaticRankLessThanTwoRaisesBecauseNotAMatrix

 def testTensorWithStaticRankLessThanTwoRaisesBecauseNotAMatrix(self):
   vector = [1, 2, 3]
   with self.test_session():
     with self.assertRaisesRegexp(ValueError, "should be a "):
       tf.matrix_transpose(vector)
开发者ID:Qstar,项目名称:tensorflow,代码行数:5,代码来源:array_ops_test.py



注:本文中的tensorflow.matrix_transpose函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.matrix_triangular_solve函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.matrix_solve函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap