• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensorflow.matrix_band_part函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.matrix_band_part函数的典型用法代码示例。如果您正苦于以下问题:Python matrix_band_part函数的具体用法?Python matrix_band_part怎么用?Python matrix_band_part使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了matrix_band_part函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _random_cholesky_array

 def _random_cholesky_array(self, shape):
   mat = self._rng.rand(*shape)
   chol = distribution_util.matrix_diag_transform(
       mat, transform=tf.nn.softplus)
   # Zero the upper triangle because we're using this as a true Cholesky factor
   # in our tests.
   return tf.matrix_band_part(chol, -1, 0).eval()
开发者ID:821760408-sp,项目名称:tensorflow,代码行数:7,代码来源:operator_pd_cholesky_test.py


示例2: gauss_kl

def gauss_kl(q_mu, q_sqrt, K):
    """
    Compute the KL divergence from

          q(x) = N(q_mu, q_sqrt^2)
    to
          p(x) = N(0, K)

    We assume multiple independent distributions, given by the columns of
    q_mu and the last dimension of q_sqrt.

    q_mu is a matrix, each column contains a mean.

    q_sqrt is a 3D tensor, each matrix within is a lower triangular square-root
        matrix of the covariance of q.

    K is a positive definite matrix: the covariance of p.
    """
    L = tf.cholesky(K)
    alpha = tf.matrix_triangular_solve(L, q_mu, lower=True)
    KL = 0.5 * tf.reduce_sum(tf.square(alpha))  # Mahalanobis term.
    num_latent = tf.cast(tf.shape(q_sqrt)[2], float_type)
    KL += num_latent * 0.5 * tf.reduce_sum(tf.log(tf.square(tf.diag_part(L))))  # Prior log-det term.
    KL += -0.5 * tf.cast(tf.reduce_prod(tf.shape(q_sqrt)[1:]), float_type)  # constant term
    Lq = tf.matrix_band_part(tf.transpose(q_sqrt, (2, 0, 1)), -1, 0)  # force lower triangle
    KL += -0.5*tf.reduce_sum(tf.log(tf.square(tf.matrix_diag_part(Lq))))  # logdet
    L_tiled = tf.tile(tf.expand_dims(L, 0), tf.pack([tf.shape(Lq)[0], 1, 1]))
    LiLq = tf.matrix_triangular_solve(L_tiled, Lq, lower=True)
    KL += 0.5 * tf.reduce_sum(tf.square(LiLq))  # Trace term
    return KL
开发者ID:GPflow,项目名称:GPflow,代码行数:30,代码来源:kullback_leiblers.py


示例3: random_tril_matrix

def random_tril_matrix(
    shape, dtype, force_well_conditioned=False, remove_upper=True):
  """[batch] lower triangular matrix.

  Args:
    shape:  `TensorShape` or Python `list`.  Shape of the returned matrix.
    dtype:  `TensorFlow` `dtype` or Python dtype
    force_well_conditioned:  Python `bool`. If `True`, returned matrix will have
      eigenvalues with modulus in `(1, 2)`.  Otherwise, eigenvalues are unit
      normal random variables.
    remove_upper:  Python `bool`.
      If `True`, zero out the strictly upper triangle.
      If `False`, the lower triangle of returned matrix will have desired
      properties, but will not not have the strictly upper triangle zero'd out.

  Returns:
    `Tensor` with desired shape and dtype.
  """
  with tf.name_scope("random_tril_matrix"):
    # Totally random matrix.  Has no nice properties.
    tril = random_normal(shape, dtype=dtype)
    if remove_upper:
      tril = tf.matrix_band_part(tril, -1, 0)

    # Create a diagonal with entries having modulus in [1, 2].
    if force_well_conditioned:
      maxval = tf.convert_to_tensor(np.sqrt(2.), dtype=dtype.real_dtype)
      diag = random_sign_uniform(
          shape[:-1], dtype=dtype, minval=1., maxval=maxval)
      tril = tf.matrix_set_diag(tril, diag)

    return tril
开发者ID:curtiszimmerman,项目名称:tensorflow,代码行数:32,代码来源:linear_operator_test_util.py


示例4: _operator_and_mat_and_feed_dict

  def _operator_and_mat_and_feed_dict(self, shape, dtype, use_placeholder):
    shape = list(shape)
    diag_shape = shape[:-1]

    # Upper triangle will be ignored.
    # Use a diagonal that ensures this matrix is well conditioned.
    tril = tf.random_normal(shape=shape, dtype=dtype.real_dtype)
    diag = tf.random_uniform(
        shape=diag_shape, dtype=dtype.real_dtype, minval=2., maxval=3.)
    if dtype.is_complex:
      tril = tf.complex(
          tril, tf.random_normal(shape, dtype=dtype.real_dtype))
      diag = tf.complex(
          diag, tf.random_uniform(
              shape=diag_shape, dtype=dtype.real_dtype, minval=2., maxval=3.))

    tril = tf.matrix_set_diag(tril, diag)

    tril_ph = tf.placeholder(dtype=dtype)

    if use_placeholder:
      # Evaluate the tril here because (i) you cannot feed a tensor, and (ii)
      # tril is random and we want the same value used for both mat and
      # feed_dict.
      tril = tril.eval()
      operator = linalg.LinearOperatorTriL(tril_ph)
      feed_dict = {tril_ph: tril}
    else:
      operator = linalg.LinearOperatorTriL(tril)
      feed_dict = None

    mat = tf.matrix_band_part(tril, -1, 0)

    return operator, mat, feed_dict
开发者ID:Hwhitetooth,项目名称:tensorflow,代码行数:34,代码来源:linear_operator_tril_test.py


示例5: call

    def call(self, x, mask=None):
        x1 ,x2 = x
        outer = tf.matmul(tf.expand_dims(x1, axis=2), tf.expand_dims(x2, axis=1))
        outer = tf.matrix_band_part(outer, 0, self.ans_limit)
        output1 = tf.reshape(tf.cast(tf.argmax(tf.reduce_max(outer, axis=2), axis=1), tf.float32),(-1,1))
        output2 = tf.reshape(tf.cast(tf.argmax(tf.reduce_max(outer, axis=1), axis=1), tf.float32),(-1,1))

        return [output1, output2]
开发者ID:sunlinyu1993,项目名称:Machine-Learning-Toolbox,代码行数:8,代码来源:QAoutputBlock.py


示例6: _forward

 def _forward(self, x):
   if self.validate_args:
     is_matrix = tf.assert_rank_at_least(x, 2)
     shape = tf.shape(x)
     is_square = tf.assert_equal(shape[-2], shape[-1])
     x = control_flow_ops.with_dependencies([is_matrix, is_square], x)
   # For safety, explicitly zero-out the upper triangular part.
   x = tf.matrix_band_part(x, -1, 0)
   return tf.matmul(x, x, adjoint_b=True)
开发者ID:asudomoeva,项目名称:probability,代码行数:9,代码来源:cholesky_outer_product.py


示例7: CheckUnitary

 def CheckUnitary(self, x):
   # Tests that x[...,:,:]^H * x[...,:,:] is close to the identity.
   xx = tf.matmul(x, x, adjoint_a=True)
   identity = tf.matrix_band_part(tf.ones_like(xx), 0, 0)
   if is_single:
     tol = 1e-5
   else:
     tol = 1e-14
   self.assertAllClose(identity.eval(), xx.eval(), atol=tol)
开发者ID:lijiankou,项目名称:tensorflow,代码行数:9,代码来源:svd_op_test.py


示例8: _sample_n

  def _sample_n(self, n, seed):
    batch_shape = self.batch_shape_tensor()
    event_shape = self.event_shape_tensor()
    batch_ndims = tf.shape(batch_shape)[0]

    ndims = batch_ndims + 3  # sample_ndims=1, event_ndims=2
    shape = tf.concat([[n], batch_shape, event_shape], 0)
    stream = seed_stream.SeedStream(seed, salt="Wishart")

    # Complexity: O(nbk**2)
    x = tf.random_normal(
        shape=shape, mean=0., stddev=1., dtype=self.dtype, seed=stream())

    # Complexity: O(nbk)
    # This parametrization is equivalent to Chi2, i.e.,
    # ChiSquared(k) == Gamma(alpha=k/2, beta=1/2)
    expanded_df = self.df * tf.ones(
        self.scale_operator.batch_shape_tensor(),
        dtype=self.df.dtype.base_dtype)

    g = tf.random_gamma(
        shape=[n],
        alpha=self._multi_gamma_sequence(0.5 * expanded_df, self.dimension),
        beta=0.5,
        dtype=self.dtype,
        seed=stream())

    # Complexity: O(nbk**2)
    x = tf.matrix_band_part(x, -1, 0)  # Tri-lower.

    # Complexity: O(nbk)
    x = tf.matrix_set_diag(x, tf.sqrt(g))

    # Make batch-op ready.
    # Complexity: O(nbk**2)
    perm = tf.concat([tf.range(1, ndims), [0]], 0)
    x = tf.transpose(x, perm)
    shape = tf.concat([batch_shape, [event_shape[0]], [-1]], 0)
    x = tf.reshape(x, shape)

    # Complexity: O(nbM) where M is the complexity of the operator solving a
    # vector system. For LinearOperatorLowerTriangular, each matmul is O(k^3) so
    # this step has complexity O(nbk^3).
    x = self.scale_operator.matmul(x)

    # Undo make batch-op ready.
    # Complexity: O(nbk**2)
    shape = tf.concat([batch_shape, event_shape, [n]], 0)
    x = tf.reshape(x, shape)
    perm = tf.concat([[ndims - 1], tf.range(0, ndims - 1)], 0)
    x = tf.transpose(x, perm)

    if not self.input_output_cholesky:
      # Complexity: O(nbk**3)
      x = tf.matmul(x, x, adjoint_b=True)

    return x
开发者ID:asudomoeva,项目名称:probability,代码行数:57,代码来源:wishart.py


示例9: get_right_context_mask

def get_right_context_mask(time_steps):
    """ Generates the mask preventing the decoder from attending to unseen positions. """
    # Generate mask that limits decoder self-attention up to and including the current position
    attn_mask = tf.matrix_band_part(tf.ones([time_steps, time_steps]), -1, 0)
    # Expand mask to 4d. so as to be compatible with attention weights
    attn_mask = tf.expand_dims(tf.expand_dims(attn_mask, 0), 0)
    # Illegal connections will be set to -inf when fed into the softmax function
    # Padding for non-masked positions is applied to prevent NaNs
    attn_mask = -1e9 * (1.0 - attn_mask)
    return attn_mask
开发者ID:rsennrich,项目名称:nematus,代码行数:10,代码来源:transformer_layers.py


示例10: Test

 def Test(self):
   shape = batch_shape_ + shape_
   x = tf.constant(np.random.rand(*shape), dtype=dtype_)
   with self.test_session(use_gpu=True):
     for lower in -1, 0, 1, shape_[-2] - 1:
       for upper in -1, 0, 1, shape_[-1] - 1:
         y = tf.matrix_band_part(x, lower, upper)
         error = tf.test.compute_gradient_error(x, x.get_shape().as_list(), y,
                                                y.get_shape().as_list())
         self.assertLess(error, 1e-4)
开发者ID:821760408-sp,项目名称:tensorflow,代码行数:10,代码来源:matrix_band_part_op_test.py


示例11: mask_leq

def mask_leq(target_length, source_length):
  """A mask with 1.0 wherever source_pos <= target_pos and 0.0 elsewhere.

  Args:
    target_length: an integer
    source_length: an integer
  Returns:
    a Tensor with shape [1, target_length, source_length]
  """
  return tf.expand_dims(
      tf.matrix_band_part(tf.ones([target_length, source_length]), -1, 0), 0)
开发者ID:TrunksLegendary,项目名称:tensor2tensor,代码行数:11,代码来源:common_layers.py


示例12: attention_bias_lower_triangle

def attention_bias_lower_triangle(length):
    """ Create a bias tensor to be added to attention logits.

      Allows a query to attend to all positions up to and including its own.
    Args:
        length: A scalar.

    Returns: A float Tensor of shape [1, 1, length, length], with -1e9 in
      padding positions and 0 in non-padding positions.

    """
    lower_triangle = tf.matrix_band_part(tf.ones([length, length]), -1, 0)
    ret = FLOAT_MIN * (1. - lower_triangle)
    return tf.reshape(ret, [1, 1, length, length])
开发者ID:KIngpon,项目名称:NJUNMT-tf,代码行数:14,代码来源:common_attention.py


示例13: get_decoder_self_attention_bias

def get_decoder_self_attention_bias(length):
  """Calculate bias for decoder that maintains model's autoregressive property.

  Creates a tensor that masks out locations that correspond to illegal
  connections, so prediction at position i cannot draw information from future
  positions.

  Args:
    length: int length of sequences in batch.

  Returns:
    float tensor of shape [1, 1, length, length]
  """
  with tf.name_scope("decoder_self_attention_bias"):
    valid_locs = tf.matrix_band_part(tf.ones([length, length]), -1, 0)
    valid_locs = tf.reshape(valid_locs, [1, 1, length, length])
    decoder_bias = _NEG_INF * (1.0 - valid_locs)
  return decoder_bias
开发者ID:812864539,项目名称:models,代码行数:18,代码来源:model_utils.py


示例14: _assertions

 def _assertions(self, x):
   if not self.validate_args:
     return []
   x_shape = tf.shape(x)
   is_matrix = tf.assert_rank_at_least(
       x, 2,
       message="Input must have rank at least 2.")
   is_square = tf.assert_equal(
       x_shape[-2], x_shape[-1],
       message="Input must be a square matrix.")
   diag_part_x = tf.matrix_diag_part(x)
   is_lower_triangular = tf.assert_equal(
       tf.matrix_band_part(x, 0, -1),  # Preserves triu, zeros rest.
       tf.matrix_diag(diag_part_x),
       message="Input must be lower triangular.")
   is_positive_diag = tf.assert_positive(
       diag_part_x,
       message="Input must have all positive diagonal entries.")
   return [is_matrix, is_square, is_lower_triangular, is_positive_diag]
开发者ID:lewisKit,项目名称:probability,代码行数:19,代码来源:cholesky_to_inv_cholesky.py


示例15: testNonDefaultsYieldCorrectShapesAndValues

  def testNonDefaultsYieldCorrectShapesAndValues(self):
    batch_shape = [4, 3]
    x_size = 3
    mvn_size = 5
    x_ = np.random.randn(*np.concatenate([batch_shape, [x_size]]))

    x = tf.constant(x_)
    mvn = tfp.trainable_distributions.multivariate_normal_tril(
        x,
        dims=mvn_size,
        loc_fn=tf.zeros_like,
        scale_fn=lambda x: tfd.fill_triangular(tf.ones_like(x)))
    scale = mvn.scale.to_dense()
    expected_scale = tf.matrix_band_part(
        tf.ones(np.concatenate([batch_shape, [mvn_size, mvn_size]]),
                scale.dtype),
        num_lower=-1,
        num_upper=0)

    self.evaluate(tf.global_variables_initializer())
    [
        batch_shape_,
        event_shape_,
        loc_,
        scale_,
        expected_scale_,
    ] = self.evaluate([
        mvn.batch_shape_tensor(),
        mvn.event_shape_tensor(),
        mvn.loc,
        scale,
        expected_scale,
    ])

    self.assertAllEqual(batch_shape, mvn.batch_shape)
    self.assertAllEqual(batch_shape, batch_shape_)

    self.assertAllEqual([mvn_size], mvn.event_shape)
    self.assertAllEqual([mvn_size], event_shape_)

    self.assertAllEqual(np.zeros_like(loc_), loc_)
    self.assertAllEqual(expected_scale_, scale_)
开发者ID:asudomoeva,项目名称:probability,代码行数:42,代码来源:trainable_distributions_test.py


示例16: get_multivariate_gaussian_energy_fn

def get_multivariate_gaussian_energy_fn(x_dim=2):
  """Get energy function for 2d strongly correlated Gaussian."""

  mu = tf.random_normal(shape=[x_dim])
  # Lower triangularize and positive diagonal
  l = tf.sigmoid(
      tf.matrix_band_part(tf.random_normal(shape=[x_dim, x_dim]), -1, 0))
  # Exploit Cholesky decomposition
  sigma = tf.matmul(l, tf.transpose(l))
  sigma *= 100.  # Small covariance causes extreme numerical instability
  sigma_inv = tf.matrix_inverse(sigma)

  def energy(x):
    """Unnormalized log density/energy of 2d strongly correlated Gaussian."""

    xmmu = x - mu
    return .5 * tf.diag_part(
        tf.matmul(tf.matmul(xmmu, sigma_inv), tf.transpose(xmmu)))

  return energy
开发者ID:zakizhou,项目名称:tensorflow,代码行数:20,代码来源:l2hmc.py


示例17: base_conditional

def base_conditional(Kmn, Kmm, Knn, f, *, full_cov=False, q_sqrt=None, white=False):
    # compute kernel stuff
    num_func = tf.shape(f)[1]  # K
    Lm = tf.cholesky(Kmm)

    # Compute the projection matrix A
    A = tf.matrix_triangular_solve(Lm, Kmn, lower=True)

    # compute the covariance due to the conditioning
    if full_cov:
        fvar = Knn - tf.matmul(A, A, transpose_a=True)
        shape = tf.stack([num_func, 1, 1])
    else:
        fvar = Knn - tf.reduce_sum(tf.square(A), 0)
        shape = tf.stack([num_func, 1])
    fvar = tf.tile(tf.expand_dims(fvar, 0), shape)  # K x N x N or K x N

    # another backsubstitution in the unwhitened case
    if not white:
        A = tf.matrix_triangular_solve(tf.transpose(Lm), A, lower=False)

    # construct the conditional mean
    fmean = tf.matmul(A, f, transpose_a=True)

    if q_sqrt is not None:
        if q_sqrt.get_shape().ndims == 2:
            LTA = A * tf.expand_dims(tf.transpose(q_sqrt), 2)  # K x M x N
        elif q_sqrt.get_shape().ndims == 3:
            L = tf.matrix_band_part(q_sqrt, -1, 0)  # K x M x M
            A_tiled = tf.tile(tf.expand_dims(A, 0), tf.stack([num_func, 1, 1]))
            LTA = tf.matmul(L, A_tiled, transpose_a=True)  # K x M x N
        else:  # pragma: no cover
            raise ValueError("Bad dimension for q_sqrt: %s" %
                             str(q_sqrt.get_shape().ndims))
        if full_cov:
            fvar = fvar + tf.matmul(LTA, LTA, transpose_a=True)  # K x N x N
        else:
            fvar = fvar + tf.reduce_sum(tf.square(LTA), 1)  # K x N
    fvar = tf.transpose(fvar)  # N x K or N x N x K

    return fmean, fvar
开发者ID:vincentadam87,项目名称:GPflow,代码行数:41,代码来源:conditionals.py


示例18: _operator_and_mat_and_feed_dict

  def _operator_and_mat_and_feed_dict(self, shape, dtype, use_placeholder):
    # Upper triangle will be nonzero, but ignored.
    # Use a diagonal that ensures this matrix is well conditioned.
    tril = linear_operator_test_util.random_tril_matrix(
        shape, dtype=dtype, force_well_conditioned=True, remove_upper=False)

    if use_placeholder:
      tril_ph = tf.placeholder(dtype=dtype)
      # Evaluate the tril here because (i) you cannot feed a tensor, and (ii)
      # tril is random and we want the same value used for both mat and
      # feed_dict.
      tril = tril.eval()
      operator = linalg.LinearOperatorTriL(tril_ph)
      feed_dict = {tril_ph: tril}
    else:
      operator = linalg.LinearOperatorTriL(tril)
      feed_dict = None

    mat = tf.matrix_band_part(tril, -1, 0)

    return operator, mat, feed_dict
开发者ID:curtiszimmerman,项目名称:tensorflow,代码行数:21,代码来源:linear_operator_tril_test.py


示例19: _assertions

 def _assertions(self, x):
   if not self.validate_args:
     return []
   shape = tf.shape(x)
   is_matrix = tf.assert_rank_at_least(
       x, 2, message="Input must have rank at least 2.")
   is_square = tf.assert_equal(
       shape[-2], shape[-1], message="Input must be a square matrix.")
   above_diagonal = tf.matrix_band_part(
       tf.matrix_set_diag(x, tf.zeros(shape[:-1], dtype=tf.float32)), 0, -1)
   is_lower_triangular = tf.assert_equal(
       above_diagonal,
       tf.zeros_like(above_diagonal),
       message="Input must be lower triangular.")
   # A lower triangular matrix is nonsingular iff all its diagonal entries are
   # nonzero.
   diag_part = tf.matrix_diag_part(x)
   is_nonsingular = tf.assert_none_equal(
       diag_part,
       tf.zeros_like(diag_part),
       message="Input must have all diagonal entries nonzero.")
   return [is_matrix, is_square, is_lower_triangular, is_nonsingular]
开发者ID:lewisKit,项目名称:probability,代码行数:22,代码来源:matrix_inverse_tril.py


示例20: gauss_kl_white

def gauss_kl_white(q_mu, q_sqrt):
    """
    Compute the KL divergence from

          q(x) = N(q_mu, q_sqrt^2)
    to
          p(x) = N(0, I)

    We assume multiple independent distributions, given by the columns of
    q_mu and the last dimension of q_sqrt.

    q_mu is a matrix, each column contains a mean

    q_sqrt is a 3D tensor, each matrix within is a lower triangular square-root
        matrix of the covariance.
    """
    KL = 0.5 * tf.reduce_sum(tf.square(q_mu))  # Mahalanobis term
    KL += -0.5 * tf.cast(tf.reduce_prod(tf.shape(q_sqrt)[1:]), float_type)  # constant term
    L = tf.matrix_band_part(tf.transpose(q_sqrt, (2, 0, 1)), -1, 0)  # force lower triangle
    KL -= 0.5 * tf.reduce_sum(tf.log(tf.square(tf.matrix_diag_part(L))))  # logdet
    KL += 0.5 * tf.reduce_sum(tf.square(L))  # Trace term.
    return KL
开发者ID:GPflow,项目名称:GPflow,代码行数:22,代码来源:kullback_leiblers.py



注:本文中的tensorflow.matrix_band_part函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.matrix_determinant函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.matmul函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap