• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python linalg.pinv函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中numpy.linalg.pinv函数的典型用法代码示例。如果您正苦于以下问题:Python pinv函数的具体用法?Python pinv怎么用?Python pinv使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了pinv函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: t_stat

def t_stat(data, X_matrix):
    """
    Return the estimated betas, t-values, degrees of freedom, and p-values for the glm_multi regression
    
    Parameters
    ----------
    data_4d: numpy array of 4 dimensions 
             The image data of one subject, one run
    X_matrix: numpy array 
       The design matrix for glm_multi
    Note that the fourth dimension of `data_4d` (time or the number 
    of volumes) must be the same as the number of rows that X has. 
    
    Returns
    -------
    beta: estimated beta values
    
    t: t-values of the betas
    
    df: degrees of freedom
    
    p: p-values corresponding to the t-values and degrees of freedom
    """

    beta = glm_beta(data, X_matrix)

    # Calculate the parameters - b hat
    beta = np.reshape(beta, (-1, beta.shape[-1])).T

    fitted = X_matrix.dot(beta)
    # Residual error
    y = np.reshape(data, (-1, data.shape[-1]))
    errors = y.T - fitted
    # Residual sum of squares
    RSS = (errors**2).sum(axis=0)
 
    df = X_matrix.shape[0] - npl.matrix_rank(X_matrix)
    # Mean residual sum of squares
    MRSS = RSS / df
    # calculate bottom half of t statistic
    Cov_beta=npl.pinv(X_matrix.T.dot(X_matrix))

    SE =np.zeros(beta.shape)
    for i in range(X_matrix.shape[-1]):
        c = np.zeros(X_matrix.shape[-1])
        c[i]=1
        c = np.atleast_2d(c).T
        SE[i,:]= np.sqrt(MRSS* c.T.dot(npl.pinv(X_matrix.T.dot(X_matrix)).dot(c)))


    zeros = np.where(SE==0)
    SE[zeros] = 1
    t = beta / SE

    t[:,zeros] =0
    # Get p value for t value using CDF of t didstribution
    ltp = t_dist.cdf(abs(t), df)
    p = 1 - ltp # upper tail
    
    return beta.T, t, df, p
开发者ID:ye-zhi,项目名称:project-epsilon,代码行数:60,代码来源:t_test.py


示例2: fit_to_model

def fit_to_model(imchunk,model, mode = 'pinv',fit_pix_mask = None,baseline = None):
    import numpy as np
    #im_array = (imchunk-baseline)#/baseline
    if not(baseline is None):
    	im_array = imchunk-baseline#/baseline
    else:
    	im_array = imchunk
    imshape = np.shape(im_array[0])
    im_array = im_array.reshape((-1,imshape[0]*imshape[1]))
    if mode == 'nnls':
        fits = np.empty((np.shape(model)[0],np.shape(im_array)[0]))
        for i,im2 in enumerate(im_array):
            im = im2.copy()
            im[~np.isfinite(im)] = 0
            from scipy.optimize import nnls
            if not(fit_pix_mask is None):
                fits[:,i] = nnls(model[:,fit_pix_mask].T,im[fit_pix_mask])[0]
            else:
                fits[:,i] = nnls(model.T,im)[0]
    else:
        im = im_array
        print np.shape(im_array)
        from numpy.linalg import pinv
        if not(fit_pix_mask is None):
            fits = np.dot(pinv(model[:,fit_pix_mask]).T,im[:,fit_pix_mask].T)
        else:
            fits = np.dot(pinv(model).T,im)
    return fits
开发者ID:psilentp,项目名称:planotaxis,代码行数:28,代码来源:viewer.py


示例3: td_solver

def td_solver(P, r, X, gm, lm, d=None):
    """Compute the TD solution for an MDP under linear function approximation.

    Args:
      P : The transition matrix under a given policy.
      r : The expected immediate reward for each state under the policy.
      X : The feature matrix (one row for each state)
      gm : The discount parameter, gamma
      lm : The bootstrapping parameter, lambda
      d (optional): The stationary distribution to use.

    Returns:
      theta: the weight vector found by the TD solution.
    """
    ns = len(P) # number of states
    I = np.eye(ns)
    # TODO: Check for validity of P, r, X (size and values)
    # TODO: Provide a way to handle terminal states

    # account for scalar, vector, or matrix parameters
    G = parameter_matrix(gm)
    L = parameter_matrix(lm)

    # compute the stationary distribution if unspecified
    if d is None:
        d = stationary(P)
    # the stationary distribution as a matrix
    D = np.diag(d)

    # Solve the equation
    A = X.T @ D @ pinv(I - P @ G @ L) @ (I - P @ G) @ X
    b = X.T @ D @ pinv(I - P @ G @ L) @ r
    return pinv(A) @ b
开发者ID:rldotai,项目名称:rlbench,代码行数:33,代码来源:mdpsolver.py


示例4: update_b

def update_b(i_index, b, alpha, beta, gamma, sigma2, lambda_D,
             N_g, uni_id, uni_diet, id_g, p, W, X, Z, y):
    i = uni_id[i_index]

    for g_search, i_search in id_g.iteritems():
        if np.any(i_search == i):
            g = g_search

    g_index = np.where(uni_diet == g)[0][0]

    if np.all(gamma[g_index] == 0):  #check if all gamma's are 0
        V2 = lambda_D + np.dot(Z[i].T, Z[i])/sigma2
        mean2 = np.dot(pinv(V2), np.dot(Z[i].T, y[i]-W[i].dot(alpha)))/sigma2
    else:
        V2 = lambda_D + np.dot(Z[i].T, Z[i])/sigma2
        temp1 = XXsum(g_index, uni_diet, id_g, gamma, X)
        temp1 = pinv(temp1)
        V2 = V2 + np.dot(np.dot(np.dot(Z[i].T, X[i][:,gamma[g_index]!=0]), temp1), (np.dot(X[i][:,gamma[g_index]!=0].T, Z[i])))/(sigma2*N_g[g])
        mean2 = np.dot(Z[i].T, y[i] - W[i].dot(alpha) - np.dot(X[i][:,gamma[g_index]!=0], beta[g_index][gamma[g_index]!=0].reshape(np.sum(gamma[g_index]),1)))
        temp2 = np.dot(X[i][:,gamma[g_index]!=0].T, Z[i].dot(b[i_index].reshape(p, 1)))
        for j in id_g[g]:
            j_index = np.where(uni_id == j)[0][0]
            temp2 += np.dot(X[j][:,gamma[g_index]!=0].T, y[j] - W[j].dot(alpha) - Z[j].dot(b[j_index].reshape(p, 1)))
        mean2 = mean2 + np.dot(np.dot(Z[i].T.dot(X[i][:,gamma[g_index]!=0]), temp1), temp2)/N_g[g]
        mean2 = np.dot(pinv(V2), mean2)/sigma2

    #update
    b_new = np.random.multivariate_normal(mean2.reshape(p,), pinv(V2)).reshape(p, )
    return b_new
开发者ID:LeiG,项目名称:MouseWeights,代码行数:29,代码来源:posterior.py


示例5: test_entity_array_variable_get_cond_mean_and_var

 def test_entity_array_variable_get_cond_mean_and_var(self):
   groups = self.groups
   group = groups['u']
   variable = [v for v in groups['u'].iter_variables() if v.entity_id
       == 'v1' ][0]
   related_votes = [
       {'review': 'r1', 'author': 'a1', 'voter': 'v1', 'vote': 4},
       {'review': 'r2', 'author': 'a1', 'voter': 'v1', 'vote': 5},
       {'review': 'r5', 'author': 'a3', 'voter': 'v1', 'vote': 5},
   ]
   v_values = [[v for v in groups['v'].iter_variables() if v.entity_id ==
       vote['review']][0].value for vote in related_votes]
   v_sum = sum([v.dot(v.T) for v in v_values]) / self.var_H.value
   var_matrix = group.var_param.value * identity(const.K)
   inv_var = pinv(var_matrix)
   true_var = pinv(inv_var + v_sum)
   rest_term = sum([variable.get_rest_value(groups, related_votes[i]) * 
       v_values[i] for i in xrange(len(related_votes))]) / \
       group.var_H.value
   dot_term = inv_var.dot(group.weight_param.value) \
       .dot(variable.features)
   true_mean = true_var.dot(rest_term + dot_term)
   res_mean, res_var = variable.get_cond_mean_and_var(groups, self.votes)
   ntest.assert_allclose(true_var, res_var, rtol=1, atol=1e-7)
   ntest.assert_allclose(true_mean, res_mean, rtol=1, atol=1e-7)
开发者ID:wubin7019088,项目名称:review_recommendation,代码行数:25,代码来源:test_cap_models.py


示例6: t_stat

 def t_stat(self):
     """ betas, t statistic and significance test given data,
     design matix, contrast
     This is OLS estimation; we assume the errors to have independent
     and identical normal distributions around zero for each $i$ in
     $\e_i$ (i.i.d).
     """
     if self.design is None:
         self.get_design_matrix()
     if self.t_values is None:
         y = self.data.T
         X = self.design
         c = [0, 0, 1]
         c = np.atleast_2d(c).T
         beta = npl.pinv(X).dot(y)
         fitted = X.dot(beta)
         errors = y - fitted
         RSS = (errors**2).sum(axis=0)
         df = X.shape[0] - npl.matrix_rank(X)
         MRSS = RSS / df
         SE = np.sqrt(MRSS * c.T.dot(npl.pinv(X.T.dot(X)).dot(c)))
         try:
             SE[SE == 0] = np.amin(SE[SE != 0])
         except ValueError:
             pass
         t = c.T.dot(beta) / SE
         self.t_values = abs(t[0])
     self.t_indices = np.array(self.t_values).argsort(
     )[::-1][:self.t_values.size]
     return self.t_indices
开发者ID:jodreen,项目名称:project-lambda,代码行数:30,代码来源:linear_modeling.py


示例7: predict_all

 def predict_all(self):
     """Returns the predictions and the reconstructions for all training / test data."""
     outputs, hidden = self.feed_forward(self.pca_transformer.transform(self.X))
     hidden_expected = dot(self._inverse_activation(outputs), pinv(self.W_output))[:, :-1]
     hidden_reconstruction = self.pca_transformer.inverse_transform(
         dot(self._inverse_activation(hidden_expected), pinv(self.W_hidden))[:, :-1])
     return outputs.argmax(axis=1), hidden_reconstruction.reshape(self.app.dataset['images'].shape)
开发者ID:ttsuchi,项目名称:neural-network-demo,代码行数:7,代码来源:neural_network.py


示例8: _em_one_pass

 def _em_one_pass(self, centered=None, numcmpt=1, thresh=1e-16, out=None):
     """
     With numcmpt = 1, computes the first principal component
     of the data. Otherwise computes an unnormalized, non-orthogonal
     spanning set for the first numcmpt principal components. Assumes
     rows are variables, columns are data points.
     """
     csize = (self.ndim, numcmpt)
     if out != None:
         assert out.shape == csize
         comp = out
         comp[:] = random.normal(size=csize)
     else:
         comp = random.normal(size=csize)
     
     # Initialize 'old' array to infinity
     comp_old = np.empty(csize) + np.inf
     
     if centered == None:
         # Center the data with respect to the dataset mean
         centered = self._data - self._mean
         
     # Compensate for the shape of the data
     if not self._rowvar:
         centered = centered.T
     
     while linalg.norm(comp_old - comp, np.inf) > thresh:
         pinvc_times_data = np.dot(linalg.pinv(comp), centered)
         comp_old[:] = comp
         comp[:] = np.dot(centered, linalg.pinv(pinvc_times_data))
     
     # Normalize the eigenvectors we obtained.
     comp /= np.apply_along_axis(linalg.norm, 0, comp)[np.newaxis, :]
开发者ID:bthirion,项目名称:yeast-cycle,代码行数:33,代码来源:pca.py


示例9: supf

def supf(y, x, p):
    T = y.shape[0]
    range = np.floor(np.array([T * p, T * (1 - p)]))
    range = np.arange(range[0], range[1] + 1, dtype=np.int32)
    # Demean since intercept doesnt break
    x = x - np.mean(x)
    y = y - np.mean(y)
    b = pinv(x).dot(y)
    e = y - x.dot(b)
    # Compute full sample R2
    R2_r = 1 - e.dot(e) / y.dot(y)
    k = x.shape[1]
    F_stat = np.zeros(T)
    for t in range:
        X1 = x[:t]
        X2 = x[t:]
        # Parameters and errors before the break
        b = pinv(X1).dot(y[:t])
        e[:t] = y[:t] - X1.dot(b)
        # Parameters and errors after the break
        b = pinv(X2).dot(y[t:])
        e[t:] = y[t:] - X2.dot(b)
        # R2 from model with break
        R2_u = 1 - e.dot(e) / y.dot(y)
        # F stat for break at t
        F_stat[t] = ((R2_u - R2_r) / k) / ((1 - R2_u) / (T - 2* k - 1))
    # Only return maximum F stat
    return F_stat.max()
开发者ID:CeasarSS,项目名称:books,代码行数:28,代码来源:supf.py


示例10: test_ridge_regression_py

def test_ridge_regression_py(X, Y, _lambda):

    X = X.astype(np.float32)
    Y = Y.astype(np.float32)
    
    t1 = time.time()
    
    n, p = X.shape

    if n < p:
        tmp = np.dot(X, X.T)
        if _lambda:
            tmp += _lambda*n*np.eye(n)
        tmp = la.pinv(tmp)

        beta_out = np.dot(np.dot(X.T, tmp), Y.reshape(-1, 1))
    else:
        tmp = np.dot(X.T, X)
        if _lambda:
            tmp += _lambda*n*np.eye(p)
        tmp = la.pinv(tmp)

        beta_out = np.dot(tmp, np.dot(X.T, Y.reshape(-1, 1)))
    
    t2 = time.time()
    dt = t2-t1
    
    print("total time (Python): {}".format(dt))
    print(beta_out[:20,0])
开发者ID:slipguru,项目名称:l1l2py,代码行数:29,代码来源:test_ridge_regression.py


示例11: ls

def ls(R,W,d):
  (n,m) = R.shape
  sigma = 0.0001
  Id = np.identity(d)
  U0 = np.zeros((d,n))
  
  V = np.random.rand(d, m)
  for i in range(1000):
      U = U0
      for g in range(n):
          VV = np.zeros(d)
          for w in W[g]:
             VV = VV+np.dot(V[:,w],V[:,w].T)
          X = nlin.pinv(sigma*Id+VV)
          #X = sigma*Id + VV
          for v in W[g]:
             U[:,g] = U[:,g] + R[g,v]*np.dot(V[:,v].T,X)               
             #U[:,g] = U[:,g] + R[g,v]*slin.solve(X ,V[:,v].T)
  
      Y = np.dot(U,U.T)
      Y = nlin.pinv(sigma*Id+Y)
      Y = np.dot(U.T,Y)
      #Y = np.linalg.solve( U.T, sigma*Id+Y)
      #Y = np.linalg.lstsq(U.T, sigma*Id+Y)
      for v in range(m):
         V[:,v] = np.dot(R[:,v].T,Y)

  return (U,V)
开发者ID:meehande,项目名称:MatrixFactorizationRecommender,代码行数:28,代码来源:test.py


示例12: AND

def AND(C, B):
	
	dim, col = C.shape
	tolerance = 1e-14

	UC, SC, UtC = svd(C)
	UB, SB, UtB = svd(B)

	diag_SC = diag(SC)
	diag_SB = diag(SB)

	# sum up how many elements on diagonal 
	# are bigger than tolerance
	numRankC =  (1.0 * (diag_SC > tolerance)).sum()
	numRankB =  (1.0 * (diag_SB > tolerance)).sum()

	UC0 = matrix(UC[:, numRankC:])
	UB0 = matrix(UB[:, numRankB:])
	W, Sigma, Wt = svd(UC0 * UC0.transpose() + UB0 * UB0.transpose())
	numRankSigma =  (1.0 * (diag(Sigma) > tolerance)).sum()
	Wgk = matrix(W[:, numRankSigma:])
	I = matrix(identity(dim))
	CandB = \
	  Wgk * inv(Wgk.transpose() *  \
	  ( pinv(C, tolerance) + pinv(B, tolerance) - \
	    I) * Wgk) *Wgk.transpose()
	return CandB
开发者ID:trondarild,项目名称:ikaros,代码行数:27,代码来源:conceptor.py


示例13: sample_posterior_gibbs

	def sample_posterior_gibbs(self, X, num_steps=10, Y=None, Z=None):
		"""
		B{References:}
			- Doucet, A. (2010). I{A Note on Efficient Conditional Simulation of
			Gaussian Distributions.}
		"""

		# filter matrix and filter responses
		W = pinv(self.A)
		WX = dot(W, X)

		# nullspace projection matrix
		Q = eye(self.num_hiddens) - dot(W, self.A)

		# initial hidden state
		if Z is None:
			Y = WX + dot(Q, Y) if Y is not None else \
				WX + dot(Q, self.sample_prior(X.shape[1]))
		else:
			V = pinv(self.nullspace_basis())
			Y = WX + dot(V, Z)

		# Gibbs sample between S and Y given X
		for step in range(num_steps):
			# update scales
			S = self.sample_scales(Y)

			# update hidden states
			Y = self._sample_posterior_cond(Y, X, S, W, WX, Q)

			if Distribution.VERBOSITY > 1:
				print '{0:6}\t{1:10.2f}'.format(step + 1, mean(self.prior_energy(Y)))

		return asarray(Y)
开发者ID:lucastheis,项目名称:isa,代码行数:34,代码来源:isa.py


示例14: update

 def update(self):
     '''Initialize other arrays from fundamental arrays'''
     #The sparse matrices are treated a little differently because they are not rectangular
     with warnings.catch_warnings():
         warnings.filterwarnings("ignore",category=DeprecationWarning)
         if self.AtAi.size == 0:
             self.AtAi = la.pinv(self.At.dot(self.At.T).todense(),rcond=1e-6).astype(np.float32)#(AtA)^-1
             self.BtBi = la.pinv(self.Bt.dot(self.Bt.T).todense(),rcond=1e-6).astype(np.float32)#(BtB)^-1
开发者ID:domagalski,项目名称:omnical,代码行数:8,代码来源:info.py


示例15: noise

def noise(L, jump_op, ss, pops, Q, freq):
    noise = np.zeros(freq.size, dtype=complex128)
    for i in range(len(freq)):
        R_plus = np.dot(Q, np.dot(npla.pinv(1.j*freq[i]*np.eye(L.shape[0])-L), Q))
        R_minus = np.dot(Q, np.dot(npla.pinv(-1.j*freq[i]*np.eye(L.shape[0])-L), Q))
        noise[i] = np.dot(pops, np.dot(jump_op, ss)) \
                        + np.dot(pops, np.dot(np.dot(np.dot(jump_op, R_plus), jump_op) \
                                                   + np.dot(np.dot(jump_op, R_minus), jump_op), ss))
    return noise
开发者ID:rstones,项目名称:counting_statistics,代码行数:9,代码来源:optimized_funcs.py


示例16: solve

 def solve(self, eps=1e-8):
     '''
     Implement a LCQP solver, with numerical threshold eps.
     '''
     Cp = npl.pinv(self.C, eps)
     xopt = Cp * self.d
     P = eye(self.nx * self.N) - Cp * self.C
     xopt += npl.pinv(self.A * P, eps) * (self.b - self.A * xopt)
     return xopt
开发者ID:nim65s,项目名称:pinocchio,代码行数:9,代码来源:factor.py


示例17: nipals_xy

def nipals_xy(X, Y, mode="PLS", max_iter=500, tol=1e-06):
    """
    NIPALS algorithm; returns the first left and rigth singular
    vectors of X'Y.

    :param X, Y: data matrix
    :type X, Y: :class:`numpy.array`

    :param mode: possible values "PLS" (default) or "CCA" 
    :type mode: string

    :param max_iter: maximal number of iterations (default: 500)
    :type max_iter: int

    :param tol: tolerance parameter; if norm of difference
        between two successive left singular vectors is less than tol,
        iteration is stopped
    :type tol: a not negative float
            
    """
    yScore, uOld, ite = Y[:, [0]], 0, 1
    Xpinv = Ypinv = None
    # Inner loop of the Wold algo.
    while True and ite < max_iter:
        # Update u: the X weights
        if mode == "CCA":
            if Xpinv is None:
                Xpinv = linalg.pinv(X) # compute once pinv(X)
            u = dot(Xpinv, yScore)
        else: # mode PLS
        # Mode PLS regress each X column on yScore
            u = dot(X.T, yScore) / dot(yScore.T, yScore)
        # Normalize u
        u /= numpy.sqrt(dot(u.T, u))
        # Update xScore: the X latent scores
        xScore = dot(X, u)

        # Update v: the Y weights
        if mode == "CCA":
            if Ypinv is None:
                Ypinv = linalg.pinv(Y) # compute once pinv(Y)
            v = dot(Ypinv, xScore)
        else:
            # Mode PLS regress each X column on yScore
            v = dot(Y.T, xScore) / dot(xScore.T, xScore)
        # Normalize v
        v /= numpy.sqrt(dot(v.T, v))
        # Update yScore: the Y latent scores
        yScore = dot(Y, v)

        uDiff = u - uOld
        if dot(uDiff.T, uDiff) < tol or Y.shape[1] == 1:
            break
        uOld = u
        ite += 1
    return u, v
开发者ID:pauloortins,项目名称:Computer-Vision-Classes---UFBA,代码行数:56,代码来源:pls.py


示例18: predict

def predict(x):
	maximum = "null"
	for param in params: # iterate through all distributions
		(m, u, v, name) = param
		mtx = pinv(v)*(np.transpose(np.subtract(x, m)))*(pinv(u))*(np.subtract(x, m))
		trace = np.trace(mtx) #butterfy has a small trace
		l = 1.0e300*np.exp(-0.5*trace)/((la.norm(v)**(n/2.0))*(la.norm(u)**(p/2.0))) # likelihood, excluding the "2pi" term and multiplying by a large positive number (we get overflow otherwise)
		if maximum == "null":
			maximum = (l, name)
		elif l > maximum[0]:
			maximum = (l, name)
			print maximum
	return maximum
开发者ID:ericwu09,项目名称:ECT-classification,代码行数:13,代码来源:test.py


示例19: sample_posterior_ais

	def sample_posterior_ais(self, X, num_steps=10, annealing_weights=[]):
		"""
		Sample posterior distribution over hidden states using annealed importance
		sampling with Gibbs sampling transition operator.
		"""

		if not annealing_weights:
			annealing_weights = linspace(0, 1, num_steps + 1)[1:]

		# initialize proposal distribution to be Gaussian
		model = deepcopy(self)
		for gsm in model.subspaces:
			gsm.scales[:] = 1.

		# filter matrix and filter responses
		W = pinv(self.A)
		WX = dot(W, X)

		# nullspace basis and projection matrix
		B = self.nullspace_basis()
		Q = dot(B.T, B)

		# initialize proposal samples (Z is initially Gaussian and independent of X)
		Z = dot(B, randn(self.num_hiddens, X.shape[1]))
		Y = WX + dot(pinv(B), Z)

		# initialize importance weights (log-determinant of dot(B.T, B) not needed here)
		log_is_weights = sum(multiply(Z, dot(inv(dot(B, B.T)), Z)), 0) / 2. \
			+ (self.num_hiddens - self.num_visibles) / 2. * log(2. * pi)
		log_is_weights.resize(1, X.shape[1])

		for step, beta in enumerate(annealing_weights):
			# tune proposal distribution
			for i in range(len(self.subspaces)):
				# adjust standard deviations
				model.subspaces[i].scales = (1. - beta) + beta * self.subspaces[i].scales

			log_is_weights -= model.prior_energy(Y)

			# apply Gibbs sampling transition operator
			S = model.sample_scales(Y)
			Y = model._sample_posterior_cond(Y, X, S, W, WX, Q)

			log_is_weights += model.prior_energy(Y)

			if Distribution.VERBOSITY > 1:
				print '{0:6}\t{1:10.2f}'.format(step + 1, mean(self.prior_energy(Y)))

		log_is_weights += self.prior_loglikelihood(Y) + slogdet(dot(W.T, W))[1] / 2.

		return Y, log_is_weights
开发者ID:lucastheis,项目名称:isa,代码行数:51,代码来源:isa.py


示例20: _cov_

 def _cov_(self, data, params, priors):
     '''Calculate covariance matrix of the normal posterior dist.'''
     V1 = np.zeros([data.p, data.p])
     for gdx in range(data.grp):
         g = data.unidiets[gdx]
         nzro_gamma = (params.gamma[gdx,:]!=0)
         for i in data.grp_uniids[g]:
             V1 += np.dot(data.id_W[i].T, data.id_W[i])
         if nzro_gamma.any():
             V1 += np.dot(self.__nxTw__[g].T,
                          np.dot(pinv(self.__nxTx__[g]),
                                 self.__nxTw__[g]))
     V1 = V1/params.sigma2 + priors.d4
     self.cov = pinv(V1)
开发者ID:LeiG,项目名称:MouseWeights,代码行数:14,代码来源:postdist.py



注:本文中的numpy.linalg.pinv函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python linalg.qr函数代码示例发布时间:2022-05-27
下一篇:
Python linalg.norm函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap