• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python nlinalg.matrix_inverse函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.tensor.nlinalg.matrix_inverse函数的典型用法代码示例。如果您正苦于以下问题:Python matrix_inverse函数的具体用法?Python matrix_inverse怎么用?Python matrix_inverse使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了matrix_inverse函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: compute_S

 def compute_S(idx, Sp1, zAA, zBB):
     Sm = ifelse(T.eq(idx, nT-2), 
                 T.dot(zBB[iib[-1]], Tla.matrix_inverse(zAA[iia[-1]])),
                 T.dot(zBB[iib[idx]],Tla.matrix_inverse(zAA[iia[T.min([idx+1,nT-2])]] 
                 - T.dot(Sp1,T.transpose(zBB[iib[T.min([idx+1,nT-2])]]))))
             )
     return Sm
开发者ID:dhern,项目名称:vilds,代码行数:7,代码来源:sym_blk_tridiag_inv.py


示例2: likelihood

def likelihood(f, l, R, mu, eps, sigma2, lambda_1=1e-4):
    # The similarity matrix W is a linear combination of the slices in R
    W = T.tensordot(R, mu, axes=1)

    # The following indices correspond to labeled and unlabeled examples
    labeled = T.eq(l, 1).nonzero()

    # Calculating the graph Laplacian of W
    D = T.diag(W.sum(axis=0))
    L = D - W

    # The Covariance (or Kernel) matrix is the inverse of the (regularized) Laplacian
    epsI = eps * T.eye(L.shape[0])
    rL = L + epsI
    Sigma = nlinalg.matrix_inverse(rL)

    # The marginal density of labeled examples uses Sigma_LL as covariance (sub-)matrix
    Sigma_LL = Sigma[labeled][:, labeled][:, 0, :]

    # We also consider additive Gaussian noise with variance sigma2
    K_L = Sigma_LL + (sigma2 * T.eye(Sigma_LL.shape[0]))

    # Calculating the inverse and the determinant of K_L
    iK_L = nlinalg.matrix_inverse(K_L)
    dK_L = nlinalg.det(K_L)

    f_L = f[labeled]

    # The (L1-regularized) log-likelihood is given by the summation of the following four terms
    term_A = - (1 / 2) * f_L.dot(iK_L.dot(f_L))
    term_B = - (1 / 2) * T.log(dK_L)
    term_C = - (1 / 2) * T.log(2 * np.pi)
    term_D = - lambda_1 * T.sum(abs(mu))

    return term_A + term_B + term_C + term_D
开发者ID:pminervini,项目名称:gaussian-processes,代码行数:35,代码来源:propagation.py


示例3: __init__

    def __init__(self, GenerativeParams, xDim, yDim, srng = None, nrng = None):

        super(LDS, self).__init__(GenerativeParams,xDim,yDim,srng,nrng)

        # parameters
        if 'A' in GenerativeParams:
            self.A      = theano.shared(value=GenerativeParams['A'].astype(theano.config.floatX), name='A'     ,borrow=True)     # dynamics matrix
        else:
            # TBD:MAKE A BETTER WAY OF SAMPLING DEFAULT A
            self.A      = theano.shared(value=.5*np.diag(np.ones(xDim).astype(theano.config.floatX)), name='A'     ,borrow=True)     # dynamics matrix

        if 'QChol' in GenerativeParams:
            self.QChol  = theano.shared(value=GenerativeParams['QChol'].astype(theano.config.floatX), name='QChol' ,borrow=True)     # cholesky of innovation cov matrix
        else:
            self.QChol  = theano.shared(value=(np.eye(xDim)).astype(theano.config.floatX), name='QChol' ,borrow=True)     # cholesky of innovation cov matrix

        if 'Q0Chol' in GenerativeParams:
            self.Q0Chol = theano.shared(value=GenerativeParams['Q0Chol'].astype(theano.config.floatX), name='Q0Chol',borrow=True)     # cholesky of starting distribution cov matrix
        else:
            self.Q0Chol = theano.shared(value=(np.eye(xDim)).astype(theano.config.floatX), name='Q0Chol',borrow=True)     # cholesky of starting distribution cov matrix

        if 'RChol' in GenerativeParams:
            self.RChol  = theano.shared(value=np.ndarray.flatten(GenerativeParams['RChol'].astype(theano.config.floatX)), name='RChol' ,borrow=True)     # cholesky of observation noise cov matrix
        else:
            self.RChol  = theano.shared(value=np.random.randn(yDim).astype(theano.config.floatX)/10, name='RChol' ,borrow=True)     # cholesky of observation noise cov matrix

        if 'x0' in GenerativeParams:
            self.x0     = theano.shared(value=GenerativeParams['x0'].astype(theano.config.floatX), name='x0'    ,borrow=True)     # set to zero for stationary distribution
        else:
            self.x0     = theano.shared(value=np.zeros((xDim,)).astype(theano.config.floatX), name='x0'    ,borrow=True)     # set to zero for stationary distribution

        if 'NN_XtoY_Params' in GenerativeParams:
            self.NN_XtoY = GenerativeParams['NN_XtoY_Params']['network']
        else:
            # Define a neural network that maps the latent state into the output
            gen_nn = lasagne.layers.InputLayer((None, xDim))
            self.NN_XtoY = lasagne.layers.DenseLayer(gen_nn, yDim, nonlinearity=lasagne.nonlinearities.linear, W=lasagne.init.Orthogonal())

        # set to our lovely initial values
        if 'C' in GenerativeParams:
            self.NN_XtoY.W.set_value(GenerativeParams['C'].astype(theano.config.floatX))
        if 'd' in GenerativeParams:
            self.NN_XtoY.b.set_value(GenerativeParams['d'].astype(theano.config.floatX))

        # we assume diagonal covariance (RChol is a vector)
        self.Rinv    = 1./(self.RChol**2) #Tla.matrix_inverse(T.dot(self.RChol ,T.transpose(self.RChol)))
        self.Lambda  = Tla.matrix_inverse(T.dot(self.QChol ,self.QChol.T))
        self.Lambda0 = Tla.matrix_inverse(T.dot(self.Q0Chol,self.Q0Chol.T))

        # Call the neural network output a rate, basically to keep things consistent with the PLDS class
        self.rate = lasagne.layers.get_output(self.NN_XtoY, inputs = self.Xsamp)
开发者ID:dhern,项目名称:vilds,代码行数:51,代码来源:GenerativeModel.py


示例4: compute_D

    def compute_D(idx, Dm1, zS, zAA, zBB):
        D = ifelse(T.eq(idx, nT-1),
                   T.dot(Tla.matrix_inverse(zAA[iia[-1]]), 
		       III + T.dot(T.transpose(zBB[iib[idx-1]]),
			   T.dot(Dm1,S[0])))
                   , 
                   ifelse(T.eq(idx, 0), 
                          Tla.matrix_inverse(zAA[iia[0]]
			      - T.dot(zBB[iib[0]], T.transpose(S[-1]))),
                          T.dot(Tla.matrix_inverse(zAA[iia[idx]] 
                                - T.dot(zBB[iib[T.min([idx,nT-2])]],T.transpose(S[T.max([-idx-1,-nT+1])]))),
			        III + T.dot(T.transpose(zBB[iib[T.min([idx-1,nT-2])]]),
				  T.dot(Dm1,S[-idx])))
                      )
               )
        return D
开发者ID:dhern,项目名称:vilds,代码行数:16,代码来源:sym_blk_tridiag_inv.py


示例5: get_bivariate_normal_spec

def get_bivariate_normal_spec():
    X1,X2,mu,sigma = [T.scalar('X1'),T.scalar('X2'), T.vector('mu'), T.matrix('sigma')]
    GaussianDensitySpec = FunctionSpec(variables=[X1, X2, mu, sigma],
                                       output_expression = -0.5*T.dot(T.dot((T.concatenate([X1.dimshuffle('x'),X2.dimshuffle('x')])-mu).T,
                                                                            nlinalg.matrix_inverse(sigma)),
                                                                      (T.concatenate([X1.dimshuffle('x'),X2.dimshuffle('x')])-mu)))
    return GaussianDensitySpec
开发者ID:grahamsdoman,项目名称:pysterior,代码行数:7,代码来源:energy.py


示例6: __call__

 def __call__(self, A, b, inference=False):
     if inference is True:
         solve = slinalg.Solve()
         x = solve(A, b)
     else:
         x = nlinalg.matrix_inverse(A).dot(b)
     return x
开发者ID:pminervini,项目名称:knowledge-propagation,代码行数:7,代码来源:linearsystem.py


示例7: propagate

def propagate(f, l, R, mu, eps):
    # The similarity matrix W is a linear combination of the slices in R
    W = T.tensordot(R, mu, axes=1)

    # The following indices correspond to labeled and unlabeled examples
    labeled = T.eq(l, 1).nonzero()
    unlabeled = T.eq(l, 0).nonzero()

    # Calculating the graph Laplacian of W
    D = T.diag(W.sum(axis=0))
    L = D - W

    # Computing L_UU (the Laplacian over unlabeled examples)
    L_UU = L[unlabeled][:, unlabeled][:, 0, :]

    # Computing the inverse of the (regularized) Laplacian iA = (L_UU + epsI)^-1
    epsI = eps * T.eye(L_UU.shape[0])
    rL_UU = L_UU + epsI
    iA = nlinalg.matrix_inverse(rL_UU)

    # Computing W_UL (the similarity matrix between unlabeled and labeled examples)
    W_UL = W[unlabeled][:, labeled][:, 0, :]
    f_L = f[labeled]

    # f* = (L_UU + epsI)^-1 W_UL f_L
    f_star = iA.dot(W_UL.dot(f_L))

    return f_star
开发者ID:pminervini,项目名称:gaussian-processes,代码行数:28,代码来源:propagation.py


示例8: test_gpu_matrix_inverse_inplace_opt

 def test_gpu_matrix_inverse_inplace_opt(self):
     A = theano.tensor.fmatrix("A")
     fn = theano.function([A], matrix_inverse(A), mode=mode_with_gpu)
     assert any([
         node.op.inplace
         for node in fn.maker.fgraph.toposort() if
         isinstance(node.op, GpuMagmaMatrixInverse)
     ])
开发者ID:zabin10,项目名称:Theano,代码行数:8,代码来源:test_linalg.py


示例9: _calc_caylay_delta

 def _calc_caylay_delta(step_size, param, gradient):
     A = Tensor.dot(((step_size / 2) * gradient).T, param) - Tensor.dot(param.T, ((step_size / 2) * gradient))
     I = Tensor.identity_like(A)
     temp = I + A
     # Q = Tensor.dot(batched_inv(temp.dimshuffle('x',0,1))[0], (I - A))
     Q = Tensor.dot(matrix_inverse(temp), I - A)
     update = Tensor.dot(param, Q)
     delta = (step_size / 2) * Tensor.dot((param + update), A)
     return update, delta
开发者ID:aviveise,项目名称:double_encoder,代码行数:9,代码来源:trainer.py


示例10: invLogDet

def invLogDet(C):
    # Return inv(A) and log det A where A = C . C^T
    iC = nlinalg.matrix_inverse(C)
    iC.name = "i" + C.name
    iA = T.dot(iC.T, iC)
    iA.name = "i" + C.name[1:]
    logDetA = 2.0 * T.sum(T.log(T.abs_(T.diag(C))))
    logDetA.name = "logDet" + C.name[1:]
    return (iA, logDetA)
开发者ID:roryjbeard,项目名称:GP-LVM-Conditional-MF,代码行数:9,代码来源:utils.py


示例11: test_inverse_singular

def test_inverse_singular():
    singular = numpy.array([[1, 0, 0]] + [[0, 1, 0]] * 2, dtype=theano.config.floatX)
    a = tensor.matrix()
    f = function([a], matrix_inverse(a))
    try:
        f(singular)
    except numpy.linalg.LinAlgError:
        return
    assert False
开发者ID:ZhangAustin,项目名称:attention-lvcsr,代码行数:9,代码来源:test_nlinalg.py


示例12: logp

    def logp(self, X):
        n = self.n
        p = self.p
        V = self.V

        IVI = det(V)
        IXI = det(X)

        return bound(
            ((n - p - 1) * log(IXI) - trace(matrix_inverse(V).dot(X)) -
                n * p * log(2) - n * log(IVI) - 2 * multigammaln(n / 2., p)) / 2,
             n > (p - 1))
开发者ID:cosmoharrigan,项目名称:pymc3,代码行数:12,代码来源:multivariate.py


示例13: invert_weight_matrix_symb

def invert_weight_matrix_symb(w):
  invw = []
  for i in range(len(w)):
    # layer_weight = w[-(i+1)]
    if i%2 == 1:
      layer_weight = w[-(i+1)]
      print("inv val", -(i+1+1), "of length", len(w))
      invw.append(matrix_inverse(layer_weight))
    else:
      layer_weight = w[-(i+1)]
      print("bias inv val", -(i+1-1), "of length", len(w))
      invw.append(-layer_weight)

  return invw
开发者ID:zenna,项目名称:ig,代码行数:14,代码来源:hand.py


示例14: blk_chol_inv

def blk_chol_inv(A, B, b, lower = True, transpose = False):
    '''
    Solve the equation Cx = b for x, where C is assumed to be a 
    block-bi-diagonal matrix ( where only the first (lower or upper) 
    off-diagonal block is nonzero.

    Inputs:
    A - [T x n x n]   tensor, where each A[i,:,:] is the ith block diagonal matrix 
    B - [T-1 x n x n] tensor, where each B[i,:,:] is the ith (upper or lower) 
        1st block off-diagonal matrix
    
    lower (default: True) - boolean specifying whether to treat B as the lower
          or upper 1st block off-diagonal of matrix C
    transpose (default: False) - boolean specifying whether to transpose the 
          off-diagonal blocks B[i,:,:] (useful if you want to compute solve 
          the problem C^T x = b with a representation of C.) 

    Outputs: 
    x - solution of Cx = b

    '''    
    if transpose:
        A = A.dimshuffle(0, 2, 1)
        B = B.dimshuffle(0, 2, 1)
    if lower:
        x0 = Tla.matrix_inverse(A[0]).dot(b[0])
        def lower_step(Akp1, Bk, bkp1, xk):
            return Tla.matrix_inverse(Akp1).dot(bkp1-Bk.dot(xk))
        X = theano.scan(fn = lower_step, sequences=[A[1:], B, b[1:]], outputs_info=[x0])[0]
        X = T.concatenate([T.shape_padleft(x0), X])
    else:
        xN = Tla.matrix_inverse(A[-1]).dot(b[-1])
        def upper_step(Akm1, Bkm1, bkm1, xk):
            return Tla.matrix_inverse(Akm1).dot(bkm1-(Bkm1).dot(xk))
        X = theano.scan(fn = upper_step, sequences=[A[:-1][::-1], B[::-1], b[:-1][::-1]], outputs_info=[xN])[0]
        X = T.concatenate([T.shape_padleft(xN), X])[::-1]
    return X
开发者ID:dhern,项目名称:vilds,代码行数:37,代码来源:blk_tridiag_chol_tools.py


示例15: logp

    def logp(self, X):
        n = self.n
        p = self.p
        V = self.V

        IVI = det(V)
        IXI = det(X)

        return bound(((n - p - 1) * tt.log(IXI)
                      - trace(matrix_inverse(V).dot(X))
                      - n * p * tt.log(2) - n * tt.log(IVI)
                      - 2 * multigammaln(n / 2., p)) / 2,
                     matrix_pos_def(X),
                     tt.eq(X, X.T),
                     n > (p - 1))
开发者ID:hvasbath,项目名称:pymc3,代码行数:15,代码来源:multivariate.py


示例16: logp

    def logp(self, X):
        n = self.n
        p = self.p
        V = self.V

        IVI = det(V)
        IXI = det(X)

        return bound(
            ((n - p - 1) * log(IXI) - trace(matrix_inverse(V).dot(X)) -
                n * p * log(2) - n * log(IVI) - 2 * multigammaln(n / 2., p)) / 2,
            gt(n, (p - 1)),
            all(gt(eigh(X)[0], 0)),
            eq(X, X.T)
        )
开发者ID:PaulSorenson,项目名称:pymc3,代码行数:15,代码来源:multivariate.py


示例17: cholInvLogDet

def cholInvLogDet(A, dim, jitter, fast=False):

    A_jitter = A + jitter * T.eye(dim)
    cA = myCholesky()(A_jitter)
    cA.name = "c" + A.name

    if fast:
        (iA, logDetA) = invLogDet(cA)
    else:
        iA = nlinalg.matrix_inverse(A_jitter)
        # logDetA = T.log( nlinalg.Det()(A_jitter) )
        logDetA = 2.0 * T.sum(T.log(T.abs_(T.diag(cA))))
        iA.name = "i" + A.name
        logDetA.name = "logDet" + A.name

    return (cA, iA, logDetA)
开发者ID:roryjbeard,项目名称:GP-LVM-Conditional-MF,代码行数:16,代码来源:utils.py


示例18: test_inverse_correctness

def test_inverse_correctness():
    rng = numpy.random.RandomState(utt.fetch_seed())

    r = rng.randn(4, 4).astype(theano.config.floatX)

    x = tensor.matrix()
    xi = matrix_inverse(x)

    ri = function([x], xi)(r)
    assert ri.shape == r.shape
    assert ri.dtype == r.dtype

    rir = numpy.dot(ri, r)
    rri = numpy.dot(r, ri)

    assert _allclose(numpy.identity(4), rir), rir
    assert _allclose(numpy.identity(4), rri), rri
开发者ID:AI-Cdrone,项目名称:Theano,代码行数:17,代码来源:test_nlinalg.py


示例19: logp

    def logp(self, X):
        nu = self.nu
        p = self.p
        V = self.V

        IVI = det(V)
        IXI = det(X)

        return bound(((nu - p - 1) * tt.log(IXI)
                      - trace(matrix_inverse(V).dot(X))
                      - nu * p * tt.log(2) - nu * tt.log(IVI)
                      - 2 * multigammaln(nu / 2., p)) / 2,
                     matrix_pos_def(X),
                     tt.eq(X, X.T),
                     nu > (p - 1),
                     broadcast_conditions=False
        )
开发者ID:aasensio,项目名称:pymc3,代码行数:17,代码来源:multivariate.py


示例20: logp

    def logp(self, X):
        n = self.n
        p = self.p
        V = self.V

        IVI = det(V)
        IXI = det(X)

        return bound(
            (
                (n - p - 1) * T.log(IXI)
                - trace(matrix_inverse(V).dot(X))
                - n * p * T.log(2)
                - n * T.log(IVI)
                - 2 * multigammaln(n / 2.0, p)
            )
            / 2,
            T.all(eigh(X)[0] > 0),
            T.eq(X, X.T),
            n > (p - 1),
        )
开发者ID:ingmarschuster,项目名称:pymc3,代码行数:21,代码来源:multivariate.py



注:本文中的theano.tensor.nlinalg.matrix_inverse函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python nnet.conv2d函数代码示例发布时间:2022-05-27
下一篇:
Python nlinalg.det函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap