• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python linalg.det函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中scipy.linalg.det函数的典型用法代码示例。如果您正苦于以下问题:Python det函数的具体用法?Python det怎么用?Python det使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了det函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: lnmixgaussian

def lnmixgaussian(x, params):
    """
    NAME:
       lnmixgaussian
    PURPOSE:
       returns the log of a mixture of two two-dimensional gaussian
    INPUT:
       x - 2D point to evaluate the Gaussian at
       params - mean and variances ([mean_array,inverse variance matrix, mean_array, inverse variance, amp1])
    OUTPUT:
       log N(mean,var)
    HISTORY:
       2009-10-30 - Written - Bovy (NYU)
    """
    return sc.log(
        params[4]
        / 2.0
        / sc.pi
        * sc.sqrt(linalg.det(params[1]))
        * sc.exp(-0.5 * sc.dot(x - params[0], sc.dot(params[1], x - params[0])))
        + (1.0 - params[4])
        / 2.0
        / sc.pi
        * sc.sqrt(linalg.det(params[3]))
        * sc.exp(-0.5 * sc.dot(x - params[2], sc.dot(params[3], x - params[2])))
    )
开发者ID:ritabanc,项目名称:bovy_mcmc,代码行数:26,代码来源:test_bovy_mv_mcmc.py


示例2: LL

   def LL(self,h,X=None,stack=True,REML=False):

      """
	 Computes the log-likelihood for a given heritability (h).  If X==None, then the 
	 default X0t will be used.  If X is set and stack=True, then X0t will be matrix concatenated with
	 the input X.  If stack is false, then X is used in place of X0t in the LL calculation.
	 REML is computed by adding additional terms to the standard LL and can be computed by setting REML=True.
      """

      if X is None: X = self.X0t
      elif stack: 
	 self.X0t_stack[:,(self.q)] = matrixMult(self.Kve.T,X)[:,0]
	 X = self.X0t_stack

      n = float(self.N)
      q = float(X.shape[1])
      beta,sigma,Q,XX_i,XX = self.getMLSoln(h,X)
      LL = n*np.log(2*np.pi) + np.log(h*self.Kva + (1.0-h)).sum() + n + n*np.log(1.0/n * Q)
      LL = -0.5 * LL

      if REML:
	 LL_REML_part = q*np.log(2.0*np.pi*sigma) + np.log(det(matrixMult(X.T,X))) - np.log(det(XX))
	 LL = LL + 0.5*LL_REML_part


      LL = LL.sum()
      return LL,beta,sigma,XX_i
开发者ID:genenetwork,项目名称:pylmm,代码行数:27,代码来源:lmm2.py


示例3: __init__

	def __init__(self,n, dimz = 2, dimx = 3):
		
		self.n = n
		self.W = RS.normal(0,1, size = (dimx,dimz))
		self.sigx = 0.000000000001#RS.normal(0,1)
		self.dimz = dimz
		self.dimx = dimx
		
		data = util.generate_data(n, self.W, self.sigx, dimx, dimz)
		self.observed = data[0]
		self.latent = data[1]
		
		self.prec = (1/self.sigx)*np.dot(self.W.transpose(), self.W)
		self.cov = np.linalg.inv(self.prec)
		
		'''
		values for normalisation computation-- messy!
		'''
		temp1 = (2*np.pi)**(dimz/2.0)*np.sqrt(det(self.cov))
		temp2 = det(2*np.pi*self.sigx*np.identity(dimz))
		self.pc_norm1 = temp1/temp2
		temp3 = np.linalg.inv(np.dot(self.W.transpose(), self.W))
		self.wtwinv = temp3
		temp3 = np.dot(self.W, temp3)
		self.pc_norm2 = np.dot(temp3, self.W.transpose())
开发者ID:ml-lab,项目名称:Autoencoding_SEP,代码行数:25,代码来源:FAnalysis.py


示例4: find_best_rotation

def find_best_rotation(q1, q2):
    """
    This function calculates the best rotation between two srvfs using
    procustes rigid alignment

    :param q1: numpy ndarray of shape (2,M) of M samples
    :param q2: numpy ndarray of shape (2,M) of M samples

    :rtype: numpy ndarray
    :return q2new: optimal rotated q2 to q1
    :return R: rotation matrix

    """
    eps = finfo(double).eps
    n, T = q1.shape
    A = q1.dot(q2.T)
    U, s, V = svd(A)

    if (abs(det(U) * det(V) - 1) < 10 * eps):
        S = eye(n)
    else:
        S = eye(n)
        S[:, -1] = -S[:, -1]

    R = U.dot(S).dot(V.T)
    q2new = R.dot(q2)

    return (q2new, R)
开发者ID:jdtuck,项目名称:fdasrsf_python,代码行数:28,代码来源:curve_functions.py


示例5: __init__

	def __init__(self,n, dimz = 2, dimx = 3):
		
		self.n = n
		self.W = RS.normal(0,1, size = (dimx,dimz))
		self.sigx = 0.000000000001#RS.normal(0,1)
		self.dimz = dimz
		self.dimx = dimx
		
		data = self.generate_data(n)
		self.observed = data[0]
		#we keep this to test
		self.latent = data[1]
		
		self.prec = (1/self.sigx)*np.dot(self.W.transpose(), self.W)
		self.cov = np.linalg.inv(self.prec)
		
		'''
		values for normalisation computation
		'''
		temp1 = (2*np.pi)**(dimz/2.0)*np.sqrt(det(self.cov))
		temp2 = det(2*np.pi*self.sigx*np.identity(dimz))
		self.pc_norm1 = temp1/temp2
		temp3 = np.linalg.inv(np.dot(self.W.transpose(), self.W))
		self.wtwinv = temp3
		temp3 = np.dot(self.W, temp3)
		self.pc_norm2 = np.dot(temp3, self.W.transpose())
		
		'''
		prior for n factors will be product of n priors~ N(0,I)
		'''
		#I = np.identity(dimz)
		#product_priors = MVN(0, n*I)
		'''
开发者ID:verajohne,项目名称:SEP_autoencoder,代码行数:33,代码来源:FAnalysis.py


示例6: __init__

    def __init__(self, pos, neg, k=5):
        self.k = k
        pos = pos.T
        neg = neg.T

        totals = pos.sum(axis=1)
        popular = numpy.argsort(totals, axis=0)[::-1, :][1 : 1 + k, :]
        popular = numpy.array(popular.T)[0]
        self.popular = popular

        pos = pos[popular, :].todense()
        neg = neg[popular, :].todense()

        self.posmu = pos.mean(axis=1)
        self.negmu = neg.mean(axis=1)

        p = pos - self.posmu
        n = neg - self.negmu

        self.pcov = p * p.T / p.shape[1]
        self.ncov = n * n.T / n.shape[1]

        self.pdet = dlinalg.det(self.pcov)
        self.ndet = dlinalg.det(self.ncov)

        assert self.pdet != 0
        assert self.ndet != 0

        self.picov = dlinalg.inv(self.pcov)
        self.nicov = dlinalg.inv(self.ncov)
开发者ID:BLKStone,项目名称:pacumen,代码行数:30,代码来源:multinormal.py


示例7: matrix_normal_density

def matrix_normal_density(X, M, U, V):
    """Sample from a matrix normal distribution"""
    norm = - 0.5*np.log(la.det(2*np.pi*U)) - 0.5*np.log(la.det(2*np.pi*V))
    XM = X-M
    pptn = -0.5*np.trace( np.dot(la.solve(U,XM),la.solve(V,XM.T)) )
    pdf = norm + pptn
    return pdf
开发者ID:drpeteb,项目名称:linear-system-toolkit,代码行数:7,代码来源:sampling.py


示例8: from_matrix44

 def from_matrix44(self, aff):
     """
     Convert a 4x4 matrix describing an affine transform into a
     12-sized vector of natural affine parameters: translation,
     rotation, log-scale, pre-rotation (to allow for shearing when
     combined with non-unitary scales). In case the transform has a
     negative determinant, set the `_direct` attribute to False.
     """
     vec12 = np.zeros((12,))
     vec12[0:3] = aff[:3, 3]
     # Use SVD to find orthogonal and diagonal matrices such that
     # aff[0:3,0:3] == R*S*Q
     R, s, Q = spl.svd(aff[0:3, 0:3]) 
     if spl.det(R) < 0:
         R = -R
         Q = -Q
     r = rotation_mat2vec(R)
     if spl.det(Q) < 0:
         Q = -Q
         self._direct = False
     q = rotation_mat2vec(Q)
     vec12[3:6] = r
     vec12[6:9] = np.log(np.maximum(s, TINY))
     vec12[9:12] = q
     self._vec12 = vec12
开发者ID:rfdougherty,项目名称:nipy,代码行数:25,代码来源:affine.py


示例9: testQuaternion

def testQuaternion():
  dtheta = 30.0
  quat = Quaternion()
  print quat.q
  print quat.toRot()
  print det(quat.toRot())

  figm = mlab.figure(bgcolor=(1,1,1))
  for i in range(100):
    print quat.sampleUnif(0.5*np.pi)
    k,theta = quat.toAxisAngle()
    print theta*180.0/np.pi
    plotCosy(figm, quat.toRot())

  figm = mlab.figure(bgcolor=(1,1,1))
  for i in range(100):
    print quat.sample(dtheta)
    k,theta = quat.toAxisAngle()
    print theta*180.0/np.pi
    plotCosy(figm, quat.toRot())

  figm1 = mlab.figure(bgcolor=(1,1,0.0))
  for i in range(100):
    # sample rotation axis
    k = np.random.rand(3)-0.5
    # sample uiniformly from +- 5 degrees
    theta =  (np.asscalar(np.random.rand(1)) + dtheta - 0.5) *np.pi/180.0 # (np.a      sscalar(np.random.rand(1))-0.5)*np.pi/(180.0/(2.0*dtheta))
    print 'perturbation: {} theta={}'.format(k/norm(k),theta*180.0/np.pi)
    dR = RodriguesRotation(k/norm(k),theta)
    plotCosy(figm1, dR)
    
  mlab.show()
开发者ID:jstraub,项目名称:js,代码行数:32,代码来源:sphere.py


示例10: _update_precisions

    def _update_precisions(self):
        """Update the variational distributions for the precisions"""
        if self.cvtype == 'spherical':
            self._a = 0.5 * self.n_features * np.sum(self._z, axis=0)
            for k in xrange(self.n_components):
                # XXX: how to avoid this huge temporary matrix in memory
                dif = (self._X - self._means[k])
                self._b[k] = 1.
                d = np.sum(dif * dif, axis=1)
                self._b[k] += 0.5 * np.sum(
                    self._z.T[k] * (d + self.n_features))
                self._bound_prec[k] = (
                    0.5 * self.n_features * (
                        digamma(self._a[k]) - np.log(self._b[k])))
            self._precs = self._a / self._b

        elif self.cvtype == 'diag':
            for k in xrange(self.n_components):
                self._a[k].fill(1. + 0.5 * np.sum(self._z.T[k], axis=0))
                ddif = (self._X - self._means[k])  # see comment above
                for d in xrange(self.n_features):
                    self._b[k, d] = 1.
                    dd = ddif.T[d] * ddif.T[d]
                    self._b[k, d] += 0.5 * np.sum(self._z.T[k] * (dd + 1))
                self._precs[k] = self._a[k] / self._b[k]
                self._bound_prec[k] = 0.5 * np.sum(digamma(self._a[k])
                                                    - np.log(self._b[k]))
                self._bound_prec[k] -= 0.5 * np.sum(self._precs[k])

        elif self.cvtype == 'tied':
            self._a = 2 + self._X.shape[0] + self.n_features
            self._B = (self._X.shape[0] + 1) * np.identity(self.n_features)
            for i in xrange(self._X.shape[0]):
                for k in xrange(self.n_components):
                    dif = self._X[i] - self._means[k]
                    self._B += self._z[i, k] * np.dot(dif.reshape((-1, 1)),
                                                      dif.reshape((1, -1)))
            self._B = linalg.pinv(self._B)
            self._precs = self._a * self._B
            self._detB = linalg.det(self._B)
            self._bound_prec = 0.5 * detlog_wishart(
                self._a, self._B, self._detB, self.n_features)
            self._bound_prec -= 0.5 * self._a * np.trace(self._B)

        elif self.cvtype == 'full':
            for k in xrange(self.n_components):
                T = np.sum(self._z.T[k])
                self._a[k] = 2 + T + self.n_features
                self._B[k] = (T + 1) * np.identity(self.n_features)
                dx = self._X - self._means[k]
                self._B[k] += np.dot((self._z[:, k] * dx.T), dx)
                self._B[k] = linalg.inv(self._B[k])
                self._precs[k] = self._a[k] * self._B[k]
                self._detB[k] = linalg.det(self._B[k])
                self._bound_prec[k] = 0.5 * detlog_wishart(self._a[k],
                                                           self._B[k],
                                                           self._detB[k],
                                                           self.n_features)
                self._bound_prec[k] -= 0.5 * self._a[k] * np.trace(self._B[k])
开发者ID:Scott-Alex,项目名称:scikit-learn,代码行数:59,代码来源:dpgmm.py


示例11: KL_normal

def KL_normal(m1, sigma1, m2, sigma2):
    """
    Calculates the KL divergence between two normal distributions specified by
    N(``mu1``, ``sigma1``), N(``mu2``, ``sigma2``)
    """

    return 1. / 2. * (math.log(det(sigma2) / det(sigma1)) - len(m1) + trace(mdot(inv(sigma2), sigma1)) + \
    mdot((m2 - m1).T, inv(sigma2) , m2- m1))
开发者ID:Karl-Krauth,项目名称:Sparse-GP,代码行数:8,代码来源:util.py


示例12: Matusita_kernel

def Matusita_kernel(cov_1, cov_2):
    
    p = np.shape(cov_1)[0]    
    
    det_1 = la.det(cov_1)
    det_2 = la.det(cov_2)
    det_sum = la.det(cov_1 + cov_2)
    return ((2 ** (p/2.0)) * (det_1 ** 0.25) * (det_2 ** 0.25))/(det_sum ** 0.5)
开发者ID:jmyoung36,项目名称:basic_connectivity,代码行数:8,代码来源:run_classification_Mausita.py


示例13: _ar_model_select

def _ar_model_select(R, m, ne, p_range):
    """model order selection

    :Parameters:
        R : ndarray
            upper triangular mx from QR
        m : int
            state vector dimension
        ne : int
            number of bock equations of size m used in the estimation
        p_range : list
            list of model order to select from
    """

    # inits
    p_max = max(p_range)
    p_len = len(p_range)

    sbc = N.zeros(p_len)
    fpe = N.zeros(p_len)
    ldp = N.zeros(p_len)

    np = N.zeros(p_len)
    np[-1] = m * p_max

    # get lower right triangle of R
    #
    #     | R11  R12 |
    # R = |          |
    #     |  0   R22 |
    #
    R22 = R[np[-1]:np[-1] + m, :][:, np[-1]:np[-1] + m]
    invR22 = NL.inv(R22)
    Mp = N.dot(invR22, invR22.T)

    # model selection
    ldp[-1] = 2.0 * N.log(NL.det(R22))
    for i in reversed(xrange(p_len)):
        np[i] = m * p_range[i]
        if p_range[i] < p_max:

            # downdated part of R
            Rp = R[np[i]:np[i] + m, :][:, np[-1]:np[-1] + m]

            # woodbury formular
            L = NL.cholesky(N.eye(m) + N.dot(N.dot(Rp, Mp), Rp.T), lower=True)
            Np = N.dot(N.dot(NL.inv(L), Rp), Mp)
            Mp = Mp - N.dot(Np.T, Np)

            ldp[i] = ldp[i + 1] + 2.0 * N.log(NL.det(L))

        # selector metrics
        sbc[i] = ldp[i] / m - N.log(ne) * (ne - np[i]) / ne
        fpe[i] = ldp[i] / m - N.log(ne * (ne - np[i]) / (ne + np[i]))

    # return
    return sbc, fpe, ldp, np
开发者ID:mtambos,项目名称:Neural-Simulation,代码行数:57,代码来源:ar_model.py


示例14: main1

def main1():
    N = 4       # Число срезов во времени
    n = 1
    r = 1
    p = 1
    m = 1
    s = 2

    q = 1      # Число точек плана

    solver = IMFSolver(n=n, r=r, p=p, m=m, s=s, N=N)

    theta = [1.0, 1.0]

    solver.set_Phi([[theta[0]]])
    solver.set_diff_Phi_theta([[1.0]], 0)
    solver.set_diff_Phi_theta([[0.0]], 1)

    solver.set_Psi([[theta[1]]])
    solver.set_diff_Psi_theta([[0.0]], 0)
    solver.set_diff_Psi_theta([[1.0]], 1)

    solver.set_Gamma([[1.0]])
    solver.set_diff_Gamma_theta([[0.0]], 0)
    solver.set_diff_Gamma_theta([[0.0]], 1)

    solver.set_H([[1.0]])
    solver.set_diff_H_theta([[0.0]], 0)
    solver.set_diff_H_theta([[0.0]], 1)

    solver.set_Q([[0.1]])
    solver.set_diff_Q_theta([[0.0]], 0)
    solver.set_diff_Q_theta([[0.0]], 1)

    solver.set_R([[0.3]])
    solver.set_diff_R_theta([[0.0]], 0)
    solver.set_diff_R_theta([[0.0]], 1)

    solver.set_x0([[0.0]])
    solver.set_diff_x0_theta([[0.0]], 0)
    solver.set_diff_x0_theta([[0.0]], 1)

    solver.set_P0([[0.1]])
    solver.set_diff_P0_theta([[0.0]], 0)
    solver.set_diff_P0_theta([[0.0]], 1)

    solver.set_u([[1.0]], 0)
    solver.set_u([[1.0]], 1)
    solver.set_u([[1.0]], 2)
    solver.set_u([[2.0]], 3)

    M = solver.get_inf_matrix()
    print "M"
    print M
    print "la.det(M) = ", la.det(M)
    print "-np.log(la.det(M)) = ", -np.log(la.det(M))
开发者ID:aamihailov,项目名称:metplan-labs,代码行数:56,代码来源:IMF.py


示例15: give_me_an_array

def give_me_an_array(A, B, C):
    output = np.zeros((2,2))
### START YOUR CODE HERE ###
    output[0,0] = linalg.det(A+B.dot(linalg.inv(C)))
    output[0,1] = linalg.det(B-C.dot(linalg.inv(A)))
    output[1,0] = linalg.det(B-A.dot(linalg.inv(C)))
    output[1,1] = linalg.det(A+C.dot(linalg.inv(B)))

#### END YOUR CODE HERE ####
    return output
开发者ID:PinHeWang,项目名称:NTU_NumericalAnalysisAndProgramming,代码行数:10,代码来源:7_5.py


示例16: _update_precisions

    def _update_precisions(self, X, z):
        """Update the variational distributions for the precisions"""
        n_features = X.shape[1]
        if self.covariance_type == 'spherical':
            self.dof_ = 0.5 * n_features * np.sum(z, axis=0)
            for k in xrange(self.n_components):
                # could be more memory efficient ?
                sq_diff = np.sum((X - self.means_[k]) ** 2, axis=1)
                self.scale_[k] = 1.
                self.scale_[k] += 0.5 * np.sum(z.T[k] * (sq_diff + n_features))
                self.bound_prec_[k] = (
                    0.5 * n_features * (
                        digamma(self.dof_[k]) - np.log(self.scale_[k])))
            self.precs_ = np.tile(self.dof_ / self.scale_, [n_features, 1]).T

        elif self.covariance_type == 'diag':
            for k in xrange(self.n_components):
                self.dof_[k].fill(1. + 0.5 * np.sum(z.T[k], axis=0))
                sq_diff = (X - self.means_[k]) ** 2  # see comment above
                self.scale_[k] = np.ones(n_features) + 0.5 * np.dot(
                    z.T[k], (sq_diff + 1))
                self.precs_[k] = self.dof_[k] / self.scale_[k]
                self.bound_prec_[k] = 0.5 * np.sum(digamma(self.dof_[k])
                                                    - np.log(self.scale_[k]))
                self.bound_prec_[k] -= 0.5 * np.sum(self.precs_[k])

        elif self.covariance_type == 'tied':
            self.dof_ = 2 + X.shape[0] + n_features
            self.scale_ = (X.shape[0] + 1) * np.identity(n_features)
            for k in xrange(self.n_components):
                    diff = X - self.means_[k]
                    self.scale_ += np.dot(diff.T, z[:, k:k + 1] * diff)
            self.scale_ = linalg.pinv(self.scale_)
            self.precs_ = self.dof_ * self.scale_
            self.det_scale_ = linalg.det(self.scale_)
            self.bound_prec_ = 0.5 * wishart_log_det(
                self.dof_, self.scale_, self.det_scale_, n_features)
            self.bound_prec_ -= 0.5 * self.dof_ * np.trace(self.scale_)

        elif self.covariance_type == 'full':
            for k in xrange(self.n_components):
                sum_resp = np.sum(z.T[k])
                self.dof_[k] = 2 + sum_resp + n_features
                self.scale_[k] = (sum_resp + 1) * np.identity(n_features)
                diff = X - self.means_[k]
                self.scale_[k] += np.dot(diff.T, z[:, k:k + 1] * diff)
                self.scale_[k] = linalg.pinv(self.scale_[k])
                self.precs_[k] = self.dof_[k] * self.scale_[k]
                self.det_scale_[k] = linalg.det(self.scale_[k])
                self.bound_prec_[k] = 0.5 * wishart_log_det(self.dof_[k],
                                                            self.scale_[k],
                                                            self.det_scale_[k],
                                                           n_features)
                self.bound_prec_[k] -= 0.5 * self.dof_[k] * np.trace(
                    self.scale_[k])
开发者ID:WeatherGod,项目名称:scikit-learn,代码行数:55,代码来源:dpgmm.py


示例17: INsc1

def INsc1(A):
    X = np.copy(A)
    E = 0.5*(np.eye(A.shape[0])-A)
    dA = spl.det(A)**(0.5)
    for i in range (1,5):
        print i
        detX =  spl.det(X)
        uk = np.abs(detX/dA)**(-1.0/i)
        Etk = (E + (0.5*X))/uk - (0.5)*uk*X
        X = uk*X + Etk
        print X
        E = (-0.5)*(Etk.dot(spl.inv(X))).dot(Etk)
    return X
开发者ID:sn1p3r46,项目名称:Tiro,代码行数:13,代码来源:scriptblocchi.py


示例18: NWsc

def NWsc(M, object=False):
    X = np.copy(M)
    dM = spl.det(M)**(1.0/2)
    ras = result()
    for i in range(1,31):
        print i
        uk = np.abs(spl.det(X)/dM)**(-1.0/i)
        X = (0.5)*(uk*X + (uk**(-1))*spl.inv(X).dot(M))
        print spl.norm(X.dot(X)-M)/spl.norm(M)
        ras.res.append((spl.norm(X.dot(X)-M)/spl.norm(M)))
    ras.iter = i
    ras.ris = X
    return (ras if object else X)
开发者ID:sn1p3r46,项目名称:Tiro,代码行数:13,代码来源:TestCapitolo3.py


示例19: _produceSamples

    def _produceSamples(self):
        """ Append batchsize new samples and evaluate them. """
        if self.numLearningSteps == 0 or not self.importanceMixing:
            for _ in range(self.batchSize):
                self._produceNewSample()
            self.allGenerated.append(self.batchSize + self.allGenerated[-1])
        else:
            olds = len(self.allSamples)
            oldDetFactorSigma = det(self.allFactorSigmas[-2])
            newDetFactorSigma = det(self.factorSigma)
            invA = inv(self.factorSigma)
    
            # All pdfs computed here are off by a coefficient of 1/power(2.0*pi, self.numDistrParams/2.)
            # but as only their relative values matter, we ignore it.
            
            # stochastically reuse old samples, according to the change in distribution
            for s in range(olds - self.batchSize, olds):
                oldPdf = exp(-0.5 * dot(self.allPs[s], self.allPs[s])) / oldDetFactorSigma
                sample = self.allSamples[s]
                newPs = dot(invA.T, (sample - self.x))
                newPdf = exp(-0.5 * dot(newPs, newPs)) / newDetFactorSigma
                r = rand()
                if r < (1 - self.forcedRefresh) * newPdf / oldPdf:
                    self.allSamples.append(sample)
                    self.allFitnesses.append(self.allFitnesses[s])
                    self.allPs.append(newPs)
                # never use only old samples
                if (olds + self.batchSize) - len(self.allSamples) < self.batchSize * self.forcedRefresh:
                    break
            self.allGenerated.append(self.batchSize - (len(self.allSamples) - olds) + self.allGenerated[-1])

            # add the remaining ones
            oldInvA = inv(self.allFactorSigmas[-2])
            while  len(self.allSamples) < olds + self.batchSize:
                r = rand()
                if r < self.forcedRefresh:
                    self._produceNewSample()
                else:
                    while True:
                        p = randn(self.numParameters)
                        newPdf = exp(-0.5 * dot(p, p)) / newDetFactorSigma
                        sample = dot(self.factorSigma.T, p) + self.x
                        sample = array(map(float,sample[:lt.Dsize-1])+map(degree_bound,sample[lt.Dsize-1:]))
                        if useNearest:
                            sample = array(nearestPoint(sample[:lt.Dsize-1])+map(float,sample[lt.Dsize-1:]))
                        if validDist(sample):
                            break
                    oldPs = dot(oldInvA.T, (sample - self.allCenters[-2]))
                    oldPdf = exp(-0.5 * dot(oldPs, oldPs)) / oldDetFactorSigma
                    if r < 1 - oldPdf / newPdf:
                        self._produceNewSample(sample, p)
开发者ID:klandor,项目名称:CodeSim,代码行数:51,代码来源:NES_fit2.py


示例20: _getBetaT

   def _getBetaT(self,XStar,Ap,D,Yt):
      P = []
      for i in range(self.M): P += (self.Kva + D[i]).tolist()
      P = np.array(P)
      L = np.kron(Ap,XStar)
      A = L.T * 1.0/(P+1.0)
      B = np.dot(A,L)
      Bi = linalg.inv(B)
      beta = np.dot(np.dot(Bi,A),Yt)

      _REML_part = np.log(linalg.det(np.dot(L.T,L))) + np.log(linalg.det(B))

      mu = np.dot(L,beta)
      return beta,mu,Bi,_REML_part
开发者ID:nickFurlotte,项目名称:mvLMM,代码行数:14,代码来源:mvLMM.py



注:本文中的scipy.linalg.det函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python linalg.diagsvd函数代码示例发布时间:2022-05-27
下一篇:
Python linalg.companion函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap