• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensorflow.matrix_inverse函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.matrix_inverse函数的典型用法代码示例。如果您正苦于以下问题:Python matrix_inverse函数的具体用法?Python matrix_inverse怎么用?Python matrix_inverse使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了matrix_inverse函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: hnet_transformation

def hnet_transformation(gt_pts, transformation_coeffcient, name):
    """

    :param gt_pts:
    :param transformation_coeffcient:
    :param name:
    :return:
    """
    with tf.variable_scope(name):
        # 首先映射原始标签点对
        transformation_coeffcient = tf.concat([transformation_coeffcient, [1.0]], axis=-1)
        H_indices = tf.constant([[0], [1], [2], [4], [5], [7], [8]])
        H_shape = tf.constant([9])
        H = tf.scatter_nd(H_indices, transformation_coeffcient, H_shape)
        H = tf.reshape(H, shape=[3, 3])

        gt_pts = tf.transpose(gt_pts)
        pts_projects = tf.matmul(H, gt_pts)

        # 求解最小二乘二阶多项式拟合参数矩阵
        Y = tf.transpose(pts_projects[1, :])
        X = tf.transpose(pts_projects[0, :])
        Y_One = tf.add(tf.subtract(Y, Y), tf.constant(1.0, tf.float32))
        Y_stack = tf.stack([tf.pow(Y, 3), tf.pow(Y, 2), Y, Y_One], axis=1)
        w = tf.matmul(tf.matmul(tf.matrix_inverse(tf.matmul(tf.transpose(Y_stack), Y_stack)),
                                tf.transpose(Y_stack)),
                      tf.expand_dims(X, -1))

        # 利用二阶多项式参数求解拟合位置
        x_preds = tf.matmul(Y_stack, w)
        preds = tf.transpose(tf.stack([tf.squeeze(x_preds, -1), Y, Y_One], axis=1))
        preds_fit = tf.stack([tf.squeeze(x_preds, -1), Y], axis=1)
        x_transformation_back = tf.matmul(tf.matrix_inverse(H), preds)

    return x_transformation_back
开发者ID:dandancat123,项目名称:bilibli_notes2,代码行数:35,代码来源:lanenet_hnet_loss.py


示例2: hnet_loss

def hnet_loss(gt_pts, transformation_coeffcient, name):
    """
    
    :param gt_pts: 原始的标签点对 [x, y, 1] 
    :param transformation_coeffcient: 映射矩阵参数(6参数矩阵) [[a, b, c], [0, d, e], [0, f, 1]]
    :param name:
    :return: 
    """
    with tf.variable_scope(name):
        # 首先映射原始标签点对
        transformation_coeffcient = tf.concat([transformation_coeffcient, [1.0]], axis=-1)
        H_indices = tf.constant([[0], [1], [2], [4], [5], [7], [8]])
        H_shape = tf.constant([9])
        H = tf.scatter_nd(H_indices, transformation_coeffcient, H_shape)
        H = tf.reshape(H, shape=[3, 3])

        gt_pts = tf.transpose(gt_pts)
        pts_projects = tf.matmul(H, gt_pts)

        # 求解最小二乘二阶多项式拟合参数矩阵
        Y = tf.transpose(pts_projects[1, :])
        X = tf.transpose(pts_projects[0, :])
        Y_One = tf.add(tf.subtract(Y, Y), tf.constant(1.0, tf.float32))
        Y_stack = tf.stack([tf.pow(Y, 3), tf.pow(Y, 2), Y, Y_One], axis=1)
        w = tf.matmul(tf.matmul(tf.matrix_inverse(tf.matmul(tf.transpose(Y_stack), Y_stack)),
                                tf.transpose(Y_stack)),
                      tf.expand_dims(X, -1))
        # 利用二阶多项式参数求解拟合位置并反算到原始投影空间计算损失
        x_preds = tf.matmul(Y_stack, w)
        preds = tf.transpose(tf.stack([tf.squeeze(x_preds, -1), Y, Y_One], axis=1))
        x_transformation_back = tf.matmul(tf.matrix_inverse(H), preds)

        loss = tf.reduce_mean(tf.pow(gt_pts[0, :] - x_transformation_back[0, :], 2))

    return loss
开发者ID:dandancat123,项目名称:bilibli_notes2,代码行数:35,代码来源:lanenet_hnet_loss.py


示例3: testNotInvertible

 def testNotInvertible(self):
   # The input should be invertible.
   with self.test_session():
     with self.assertRaisesOpError("Input is not invertible."):
       # All rows of the matrix below add to zero.
       tensor3 = tf.constant([[1., 0., -1.], [-1., 1., 0.], [0., -1., 1.]])
       tf.matrix_inverse(tensor3).eval()
开发者ID:niclar,项目名称:tensorflow,代码行数:7,代码来源:matrix_inverse_op_test.py


示例4: solve

def solve(a, b):
    if b.ndim == 1:
        return tf.reshape(tf.matmul(tf.matrix_inverse(a), tf.expand_dims(b, -1)), [-1])
    elif b.ndim == 2:
        return tf.matmul(tf.matrix_inverse(a), b)
    else:
        import ipdb; ipdb.set_trace()
开发者ID:AliceLane,项目名称:tensorfuse,代码行数:7,代码来源:slinalg.py


示例5: tf_prod_gauss

def tf_prod_gauss(mu1, mu2, sig1, sig2):
	#assumes precision
	prec1 = tf.matrix_inverse(sig1)
	prec2 = tf.matrix_inverse(sig2)
	prec = tf.add(prec1, prec2)
	sig = tf.matrix_inverse(prec)
	mu = tf.add( tf.matmul(prec1, mu1), tf.matmul(prec2, mu1) ) 

	return mu, sig
开发者ID:verajohne,项目名称:SEP_autoencoder,代码行数:9,代码来源:util.py


示例6: F2_bound

def F2_bound(y,Kmm,Knm,Knn,mu,Sigma):
    Eye=tf.constant(np.eye(N,N), shape=[N,N],dtype=tf.float32)
    sigEye=tf.mul(1.0,Eye)
    print(s.run(tf.matrix_inverse(sigEye)))
    #sigEye=tf.mul(tf.square(sigma),Eye)
    Kmn=tf.transpose(Knm)
    prec=Matrix_Inversion_Lemma(sigEye,Knm,Kmm,Kmn)
    zeros=tf.constant(np.zeros(N),shape=[N,1],dtype=tf.float32)
    log_den=log_density(y,zeros,prec)
    Kmm_inv=tf.matrix_inverse(Kmm)
    trace_term=tf.trace(Knn-Mul(Knm,Kmm_inv,Kmn))*(0.5)
    return log_den-trace_term
开发者ID:blutooth,项目名称:gp,代码行数:12,代码来源:maintf.py


示例7: invertible_1x1_conv

def invertible_1x1_conv(z, logdet, reverse=False, name=None, use_bias=False):
    with tf.variable_scope(name, "invconv"):
        shape = z.get_shape().as_list()
        w_shape = [shape[3], shape[3]]

        # Sample a random orthogonal matrix:
        w_init = np.linalg.qr(np.random.randn(*w_shape))[0].astype('float32')
        w = tf.get_variable("W", dtype=tf.float32, initializer=w_init)

        det_w = tf.matrix_determinant(tf.cast(w, 'float64'))
        dlogdet = tf.cast(tf.log(abs(det_w)), 'float32') * shape[1] * shape[2]

        if use_bias:
            b = tf.get_variable("bias", [1, 1, 1, shape[3]])

        if not reverse:
            _w = w[tf.newaxis, tf.newaxis, ...]
            z = tf.nn.conv2d(z, _w, [1, 1, 1, 1], 'SAME', data_format='NHWC')
            logdet += dlogdet
            if use_bias:
                z += b
        else:
            if use_bias:
                z -= b
            w_inv = tf.matrix_inverse(w)
            _w = w_inv[tf.newaxis, tf.newaxis, ...]
            z = tf.nn.conv2d(z, _w, [1, 1, 1, 1], 'SAME', data_format='NHWC')
            logdet -= dlogdet
        return z, logdet
开发者ID:gdahia,项目名称:DLF,代码行数:29,代码来源:ops.py


示例8: get_log_likelihood

	def get_log_likelihood(self):

		I = tf.constant(self.sigx*np.identity(self.dimx))
		mu = tf.constant(np.zeros(self.dimx).reshape((self.dimx,1)))

		ll = tf.constant(0, tf.float64)
		
		n_samples = 1
		for i in xrange(self.n):
			x = self.observed[i]
			x = tf.constant(x, shape = [3,1], dtype = tf.float64)

			mc = tf.Variable(0, dtype = tf.float64)
			for j in xrange(n_samples):
				w = self.sample_theta()
				
				var = tf.matmul(w, tf.transpose(w))

				var = tf.add(var, I)
				prec = tf.matrix_inverse(var)
				pw = util.tf_evaluate_gauss(mu, prec, x)

				mc+= pw

			mc = tf.div(mc,float(n_samples) )
			mc = tf.log(mc)
			ll = tf.add(ll, mc)
		self.llh = ll
		return ll
开发者ID:ml-lab,项目名称:Autoencoding_SEP,代码行数:29,代码来源:Fa.py


示例9: initialize

  def initialize(self, *args, **kwargs):
    # Store latent variables in a temporary attribute; MAP will
    # optimize `PointMass` random variables, which subsequently
    # optimizes mean parameters of the normal approximations.
    latent_vars_normal = self.latent_vars.copy()
    self.latent_vars = {z: PointMass(params=qz.loc)
                        for z, qz in six.iteritems(latent_vars_normal)}

    super(Laplace, self).initialize(*args, **kwargs)

    hessians = tf.hessians(self.loss, list(six.itervalues(self.latent_vars)))
    self.finalize_ops = []
    for z, hessian in zip(six.iterkeys(self.latent_vars), hessians):
      qz = latent_vars_normal[z]
      if isinstance(qz, (MultivariateNormalDiag, Normal)):
        scale_var = get_variables(qz.variance())[0]
        scale = 1.0 / tf.diag_part(hessian)
      else:  # qz is MultivariateNormalTriL
        scale_var = get_variables(qz.covariance())[0]
        scale = tf.matrix_inverse(tf.cholesky(hessian))

      self.finalize_ops.append(scale_var.assign(scale))

    self.latent_vars = latent_vars_normal.copy()
    del latent_vars_normal
开发者ID:wujsAct,项目名称:edward,代码行数:25,代码来源:laplace.py


示例10: inverse

def inverse(pBatch,opt,name=None):
	with tf.name_scope("inverse"):
		pMtrxBatch = vec2mtrxBatch(pBatch,opt)
		pInvMtrxBatch = tf.matrix_inverse(pMtrxBatch)
		pInvBatch = mtrx2vecBatch(pInvMtrxBatch,opt)
		pInvBatch = tf.identity(pInvBatch,name=name)
	return pInvBatch
开发者ID:sunshinezhe,项目名称:IC-STN,代码行数:7,代码来源:warp.py


示例11: compute_rigid_flow

def compute_rigid_flow(depth, pose, intrinsics, reverse_pose=False):
  """Compute the rigid flow from target image plane to source image

  Args:
    depth: depth map of the target image [batch, height_t, width_t]
    pose: target to source (or source to target if reverse_pose=True) 
          camera transformation matrix [batch, 6], in the order of 
          tx, ty, tz, rx, ry, rz; 
    intrinsics: camera intrinsics [batch, 3, 3]
  Returns:
    Rigid flow from target image to source image [batch, height_t, width_t, 2]
  """
  batch, height, width = depth.get_shape().as_list()
  # Convert pose vector to matrix
  pose = pose_vec2mat(pose)
  if reverse_pose:
    pose = tf.matrix_inverse(pose)
  # Construct pixel grid coordinates
  pixel_coords = meshgrid(batch, height, width)
  tgt_pixel_coords = tf.transpose(pixel_coords[:,:2,:,:], [0, 2, 3, 1])
  # Convert pixel coordinates to the camera frame
  cam_coords = pixel2cam(depth, pixel_coords, intrinsics)
  # Construct a 4x4 intrinsic matrix
  filler = tf.constant([0.0, 0.0, 0.0, 1.0], shape=[1, 1, 4])
  filler = tf.tile(filler, [batch, 1, 1])
  intrinsics = tf.concat([intrinsics, tf.zeros([batch, 3, 1])], axis=2)
  intrinsics = tf.concat([intrinsics, filler], axis=1)
  # Get a 4x4 transformation matrix from 'target' camera frame to 'source'
  # pixel frame.
  proj_tgt_cam_to_src_pixel = tf.matmul(intrinsics, pose)
  src_pixel_coords = cam2pixel(cam_coords, proj_tgt_cam_to_src_pixel)
  rigid_flow = src_pixel_coords - tgt_pixel_coords
  return rigid_flow
开发者ID:yang330624,项目名称:GeoNet,代码行数:33,代码来源:utils.py


示例12: _define_distance_to_clusters

  def _define_distance_to_clusters(self, data):
    """Defines the Mahalanobis distance to the assigned Gaussian."""
    # TODO(xavigonzalvo): reuse (input - mean) * cov^-1 * (input -
    # mean) from log probability function.
    self._all_scores = []
    for shard in data:
      all_scores = []
      shard = tf.expand_dims(shard, 0)
      for c in xrange(self._num_classes):
        if self._covariance_type == FULL_COVARIANCE:
          cov = self._covs[c, :, :]
        elif self._covariance_type == DIAG_COVARIANCE:
          cov = tf.diag(self._covs[c, :])
        inverse = tf.matrix_inverse(cov + self._min_var)
        inv_cov = tf.tile(
            tf.expand_dims(inverse, 0),
            tf.pack([self._num_examples, 1, 1]))
        diff = tf.transpose(shard - self._means[c, :, :], perm=[1, 0, 2])
        m_left = tf.batch_matmul(diff, inv_cov)
        all_scores.append(tf.sqrt(tf.batch_matmul(
            m_left, tf.transpose(diff, perm=[0, 2, 1])
        )))
      self._all_scores.append(tf.reshape(
          tf.concat(1, all_scores),
          tf.pack([self._num_examples, self._num_classes])))

    # Distance to the associated class.
    self._all_scores = tf.concat(0, self._all_scores)
    assignments = tf.concat(0, self.assignments())
    rows = tf.to_int64(tf.range(0, self._num_examples))
    indices = tf.concat(1, [tf.expand_dims(rows, 1),
                            tf.expand_dims(assignments, 1)])
    self._scores = tf.gather_nd(self._all_scores, indices)
开发者ID:DavidNemeskey,项目名称:tensorflow,代码行数:33,代码来源:gmm_ops.py


示例13: update_centers

    def update_centers(self, img_dataset):
        '''
        Optimize:
            self.C = (U * hu^T + V * hv^T) (hu * hu^T + hv * hv^T)^{-1}
            self.C^T = (hu * hu^T + hv * hv^T)^{-1} (hu * U^T + hv * V^T)
            but all the C need to be replace with C^T :
            self.C = (hu * hu^T + hv * hv^T)^{-1} (hu^T * U + hv^T * V)
        '''
        old_C_value = self.sess.run(self.C)

        h = self.img_b_all
        U = self.img_output_all
        smallResidual = tf.constant(
            np.eye(self.subcenter_num * self.subspace_num, dtype=np.float32) * 0.001)
        Uh = tf.matmul(tf.transpose(h), U)
        hh = tf.add(tf.matmul(tf.transpose(h), h), smallResidual)
        compute_centers = tf.matmul(tf.matrix_inverse(hh), Uh)

        update_C = self.C.assign(compute_centers)
        C_value = self.sess.run(update_C, feed_dict={
            self.img_output_all: img_dataset.output,
            self.img_b_all: img_dataset.codes,
        })

        C_sums = np.sum(np.square(C_value), axis=1)
        C_zeros_ids = np.where(C_sums < 1e-8)
        C_value[C_zeros_ids, :] = old_C_value[C_zeros_ids, :]
        self.sess.run(self.C.assign(C_value))
开发者ID:AllenMao,项目名称:DeepHash,代码行数:28,代码来源:dqn.py


示例14: run_training

def run_training(train_X, train_Y):
    X = tf.placeholder(tf.float32, [m, n+1])
    Y = tf.placeholder(tf.float32, [m, 1])

    # add the column of 1s to X
    ones = np.ones([m, 1], dtype=np.float32)
    train_X = np.concatenate((ones, train_X), axis=1)

    # normal equations
    theta = tf.matmul(tf.matmul(tf.matrix_inverse(tf.matmul(tf.transpose(X), X)), tf.transpose(X)), Y)

    with tf.Session() as sess:

        theta = sess.run(theta, feed_dict={X: train_X, Y: train_Y})

        print "Predict.... (Predict a house with 1650 square feet and 3 bedrooms.)"
        # Do not forget to add the column of 1s
        predict_X = np.array([1, 1650, 3], dtype=np.float32).reshape((1, 3))

        predict_Y = tf.matmul(predict_X, theta)

        print "House price(Y) =", sess.run(predict_Y)
        print "theta"
        print theta

        sess.close()
开发者ID:e-lin,项目名称:tensorflow_practices,代码行数:26,代码来源:multiple_linear_regression_closedform.py


示例15: logdet_grad

def logdet_grad(op, grad):
    a = op.inputs[0]
    a_adj_inv = tf.check_numerics(
                    tf.matrix_inverse(a, adjoint=True), 
                    'zero determinant')
    out_shape = tf.concat([tf.shape(a)[:-2], [1, 1]], axis=0)
    return tf.reshape(grad, out_shape) * a_adj_inv
开发者ID:EverettYou,项目名称:EFL,代码行数:7,代码来源:EFL.py


示例16: logpdf

    def logpdf(self, x, mean=None, cov=1):
        """Log of the probability density function.

        Parameters
        ----------
        x : tf.Tensor
            A 1-D or 2-D tensor.
        mean : tf.Tensor, optional
            A 1-D tensor. Defaults to zero mean.
        cov : tf.Tensor, optional
            A 1-D or 2-D tensor. Defaults to identity matrix.

        Returns
        -------
        tf.Tensor
            A tensor of one dimension less than the input.
        """
        x = tf.cast(x, dtype=tf.float32)
        x_shape = get_dims(x)
        if len(x_shape) == 1:
            d = x_shape[0]
        else:
            d = x_shape[1]

        if mean is None:
            r = x
        else:
            mean = tf.cast(mean, dtype=tf.float32)
            r = x - mean

        if cov is 1:
            L_inv = tf.diag(tf.ones([d]))
            det_cov = tf.constant(1.0)
        else:
            cov = tf.cast(cov, dtype=tf.float32)
            if len(cov.get_shape()) == 1: # vector
                L_inv = tf.diag(1.0 / tf.sqrt(cov))
                det_cov = tf.reduce_prod(cov)
            else: # matrix
                L = tf.cholesky(cov)
                L_inv = tf.matrix_inverse(L)
                det_cov = tf.pow(tf.reduce_prod(tf.diag_part(L)), 2)

        lps = -0.5*d*tf.log(2*np.pi) - 0.5*tf.log(det_cov)
        if len(x_shape) == 1: # vector
            r = tf.reshape(r, shape=(d, 1))
            inner = tf.matmul(L_inv, r)
            lps -= 0.5 * tf.matmul(inner, inner, transpose_a=True)
            return tf.squeeze(lps)
        else: # matrix
            # TODO vectorize further
            out = []
            for r_vec in tf.unpack(r):
                r_vec = tf.reshape(r_vec, shape=(d, 1))
                inner = tf.matmul(L_inv, r_vec)
                out += [tf.squeeze(lps -
                        0.5 * tf.matmul(inner, inner, transpose_a=True))]

            return tf.pack(out)
开发者ID:TalkingData,项目名称:edward,代码行数:59,代码来源:distributions.py


示例17: predict2

def predict2():
    # predicitions
    cov=h.Mul(K_mm_2,tf.matrix_inverse(K_mm_2+K_mnnm_2/tf.square(sigma_2)),K_mm_2)
    cov_chol=tf.cholesky(cov)
    mu=h.Mul(K_mm_2,tf.cholesky_solve(cov_chol,K_mn_2),Ytr)/tf.square(sigma_2)
    mean=h.Mul(K_nm_2,tf.matrix_solve(K_mm_1,mu))
    variance=K_nn_2-h.Mul(K_nm_2,h.safe_chol(K_mm_2,tf.transpose(K_nm_2)))
    var_terms=2*tf.sqrt(tf.reshape(tf.diag_part(variance)+tf.square(sigma_2),[N,1]))
    return mean, var_terms
开发者ID:blutooth,项目名称:gp,代码行数:9,代码来源:deepGP.py


示例18: predict

def predict(K_mn,sigma,K_mm,K_nn):
    # predicitions
    K_nm=tf.transpose(K_mn)
    K_mm_I=tf.matrix_inverse(K_mm)
    Sig_Inv=1e-1*np.eye(M)+K_mm+K_mnnm_2/tf.square(sigma)
    mu_post=h.Mul(tf.matrix_solve(Sig_Inv,K_mn),Ytr)/tf.square(sigma)
    mean=h.Mul(K_nm,mu_post)
    variance=K_nn-h.Mul(K_nm,h.safe_chol(K_mm,K_mn))+h.Mul(K_nm,tf.matrix_solve(Sig_Inv,K_mn))
    var_terms=2*tf.sqrt(tf.reshape(tf.diag_part(variance)+tf.square(sigma),[N,1]))
    return mean, var_terms
开发者ID:blutooth,项目名称:gp,代码行数:10,代码来源:deepGP.py


示例19: tf_exponent_gauss

def tf_exponent_gauss(mu1, prec, n):
	'''
	takes in precision, and returns cov
	'''
	prec = tf.mul(n, prec)

	sig = tf.matrix_inverse(prec)
	mu = tf.matmul(prec, mu1)
	mu = tf.mul(n, mu)

	mu = tf.matmul(sig, mu)
	return mu, sig
开发者ID:verajohne,项目名称:SEP_autoencoder,代码行数:12,代码来源:util.py


示例20: get_q

	def get_q(self):
		n = tf.constant(self.n, dtype = tf.float64)
		prec = tf.mul(self.V, n)
		
		prec = tf.add(prec, self.SEP_prior_prec)
		qsig = tf.matrix_inverse(prec)

		mu = tf.matmul(self.V, self.u)
		mu = tf.mul(n, mu)
		qmu = tf.matmul(qsig, mu)

		return qmu, qsig
开发者ID:verajohne,项目名称:SEP_autoencoder,代码行数:12,代码来源:Fa.py



注:本文中的tensorflow.matrix_inverse函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.matrix_solve函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.matrix_diag_part函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap