本文整理汇总了Python中numpy.trace函数的典型用法代码示例。如果您正苦于以下问题:Python trace函数的具体用法?Python trace怎么用?Python trace使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了trace函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: step
def step(self, x, last_b):
# initialize
m = len(x)
mu = np.matrix(last_b).T
sigma = self.sigma
theta = self.theta
eps = self.eps
x = np.matrix(x).T # matrices are easier to manipulate
# 4. Calculate the following variables
M = mu.T * x
V = x.T * sigma * x
x_upper = sum(diag(sigma) * x) / trace(sigma)
# 5. Update the portfolio distribution
mu, sigma = self.update(x, x_upper, mu, sigma, M, V, theta, eps)
# 6. Normalize mu and sigma
mu = tools.simplex_proj(mu)
sigma = sigma / (m**2 * trace(sigma))
"""
sigma(sigma < 1e-4*eye(m)) = 1e-4;
"""
self.sigma = sigma
return mu
开发者ID:gofia,项目名称:universal-portfolios,代码行数:25,代码来源:cwmr.py
示例2: fid
def fid(target_unitary, error_channel_operators, density_matrix, symbolic=1):
"""Fidelity between a unitary gate and a non-necessarily unitary gate,
for a given initial density matrix. This is later used when calculating
the worst case fidelity.
Notice that the input format of the general channel is a list of Kraus
operators instead of a process matrix. The input format of the target
unitary is just the matrix itself, not its process matrix.
symbolic = 1 is the case when the the input matrices are sympy,
while symbolic = 0 is used when the input matrices are numpy.
"""
V, K, rho = target_unitary, error_channel_operators, density_matrix
if symbolic:
Tra = (((V.H)*K[0])*rho).trace()
fid = Tra*(fun.conjugate(Tra))
for i in range(1,len(K)):
Tra = (((V.H)*K[i])*rho).trace()
fid += Tra*(fun.conjugate(Tra))
return fid.expand()
else:
Tra = np.trace((V.H)*K[0]*rho)
fid = Tra*(Tra.conjugate())
for i in range(1,len(K)):
Tra = np.trace((V.H)*K[i]*rho)
fid += Tra*(Tra.conjugate())
return fid
开发者ID:maiwol,项目名称:Exact-Simulator,代码行数:25,代码来源:Approx_Errors.py
示例3: test_mapping_cost
def test_mapping_cost(
self,
other,
bend_coef=DEFAULT_LAMBDA[1],
outlierprior=1e-1,
outlierfrac=1e-2,
outliercutoff=1e-2,
T=5e-3,
norm_iters=DEFAULT_NORM_ITERS,
):
mapping_err = self.mapping_cost(other, outlierprior, outlierfrac, outliercutoff, T, norm_iters)
for i in range(self.N):
## compute error for 0 on cpu
s_gpu = mapping_err[i]
s_cpu = np.float32(0)
xt = self.pts_t[i].get()
xw = self.pts_w[i].get()
yt = other.pts_t[i].get()
yw = other.pts_w[i].get()
##use the trace b/c then numpy will use float32s all the way
s_cpu += np.trace(xt.T.dot(xt) + xw.T.dot(xw) - 2 * xw.T.dot(xt))
s_cpu += np.trace(yt.T.dot(yt) + yw.T.dot(yw) - 2 * yw.T.dot(yt))
if not np.isclose(s_cpu, s_gpu, atol=1e-4):
## high err tolerance is b/c of difference in cpu and gpu precision?
print "cpu and gpu sum sq differences differ!!!"
ipy.embed()
sys.exit(1)
开发者ID:rll,项目名称:lfd,代码行数:30,代码来源:batchtps.py
示例4: __update_tau
def __update_tau(self, X):
"""
Update b_tau_tilde, as a_tau_tilde is independent of other update rules
b_tau_tilde = b_tau + 1/2 sum ( Z )
where Z =
|| X_n ||^2 + <|| mu ||^2> + Tr(<W.T * W> <z_n * z_n.T>) +
2*<mu.T> * <W> * <z_n> - 2 * X_n.T * <W> * <z_n> - 2 * X_n.T * <mu>
"""
x_norm_sq = np.power(np.linalg.norm(X, axis=0), 2)
# <|mu|^2> = <mu.T mu> = Tr(Sigma_mu) + mean_mu.T mean_mu
exp_mu_norm_sq = np.trace(self.sigma_mu) + np.dot(self.mean_mu.T, self.mean_mu)
exp_mu_norm_sq = exp_mu_norm_sq[0] # reshape from (1,1) to (1,)
# TODO what is <W.T W>
exp_w = self.means_w
exp_wt_w = np.dot(exp_w.T, exp_w) # TODO fix
exp_z_zt = self.N * self.sigma_z + np.dot(self.means_z, self.means_z.T)
trace_w_z = np.trace(np.dot(exp_wt_w, exp_z_zt))
mu_w_z = np.dot(np.dot(self.mean_mu.T, self.means_w), self.means_z)
x_w_z = np.dot(X.T, self.means_w).T * self.means_z
x_mu = np.dot(X.T, self.mean_mu)
big_sum = np.sum(x_norm_sq) + self.N * exp_mu_norm_sq + trace_w_z + \
2*np.sum(mu_w_z) - 2*np.sum(x_w_z) - 2*np.sum(x_mu)
self.b_tau_tilde = self.b_tau + 0.5*big_sum
开发者ID:macabot,项目名称:mlpm_lab,代码行数:31,代码来源:vpca.py
示例5: confmat
def confmat(self,inputs,targets):
"""Confusion matrix"""
# Add the inputs that match the bias node
inputs = np.concatenate((inputs,-np.ones((self.nData,1))),axis=1)
outputs = np.dot(inputs,self.weights)
nClasses = np.shape(targets)[1]
if nClasses==1:
nClasses = 2
outputs = np.where(outputs>0,1,0)
else:
# 1-of-N encoding
outputs = np.argmax(outputs,1)
targets = np.argmax(targets,1)
cm = np.zeros((nClasses,nClasses))
for i in range(nClasses):
for j in range(nClasses):
cm[i,j] = np.sum(np.where(outputs==i,1,0)*np.where(targets==j,1,0))
print cm
print np.trace(cm)/np.sum(cm)
开发者ID:dnorth,项目名称:MachineLearning,代码行数:25,代码来源:test_perceptron.py
示例6: grad_log_like
def grad_log_like(phis, *args):
x_train, t_train = args
#init the matrices for the derivatives of each phi according to each pair of data points
dert0 = np.zeros((x_train.shape[0], x_train.shape[0]))
dert1 = np.zeros((x_train.shape[0], x_train.shape[0]))
dert2 = np.zeros((x_train.shape[0], x_train.shape[0]))
dert3 = np.zeros((x_train.shape[0], x_train.shape[0]))
#vector of the final result of the derivatives
der = np.zeros_like(phis)
K = computeK_opt(x_train, x_train, phis)
C = computeC(K, beta)
invC = np.linalg.inv(C)
for i in range(len(x_train)):
for j in range(len(x_train)):
dert0[i,j] = np.exp((-np.exp(phis[1])/2)*((x_train[i] - x_train[j])**2))*np.exp(phis[0])
dert1[i,j] = -0.5*np.exp(phis[0])*np.exp((-np.exp(phis[1])/2)*((x_train[i] - x_train[j])**2))*((x_train[i] - x_train[j])**2)*np.exp(phis[1])
dert2[i,j] = np.exp(phis[2])
dert3[i,j] = x_train[i]*x_train[j]*np.exp(phis[3])
# get the derivatives of the negative log-likelihood
der[0] = -(((-1/2)*np.trace(np.dot(invC, dert0))) + ((1/2)*np.dot(np.dot(np.dot(np.dot(t_train.T, invC),dert0), invC),t_train)))
der[1] = -(((-1/2)*np.trace(np.dot(invC, dert1))) + ((1/2)*np.dot(np.dot(np.dot(np.dot(t_train.T, invC),dert1), invC),t_train)))
der[2] = -(((-1/2)*np.trace(np.dot(invC, dert2))) + ((1/2)*np.dot(np.dot(np.dot(np.dot(t_train.T, invC),dert2), invC),t_train)))
der[3] = -(((-1/2)*np.trace(np.dot(invC, dert3))) + ((1/2)*np.dot(np.dot(np.dot(np.dot(t_train.T, invC),dert3), invC),t_train)))
return der
开发者ID:clouizos,项目名称:Machine-Learning,代码行数:28,代码来源:lab4.py
示例7: _fit
def _fit(self, cov_a, cov_b):
"""Aux Function (modifies cov_a and cov_b in-place)."""
cov_a /= np.trace(cov_a)
cov_b /= np.trace(cov_b)
# computes the eigen values
lambda_, u = linalg.eigh(cov_a + cov_b)
# sort them
ind = np.argsort(lambda_)[::-1]
lambda2_ = lambda_[ind]
u = u[:, ind]
p = np.dot(np.sqrt(linalg.pinv(np.diag(lambda2_))), u.T)
# Compute the generalized eigen value problem
w_a = np.dot(np.dot(p, cov_a), p.T)
w_b = np.dot(np.dot(p, cov_b), p.T)
# and solve it
vals, vecs = linalg.eigh(w_a, w_b)
# sort vectors by discriminative power using eigen values
ind = np.argsort(np.maximum(vals, 1.0 / vals))[::-1]
vecs = vecs[:, ind]
# and project
w = np.dot(vecs.T, p)
self.filters_ = w
self.patterns_ = linalg.pinv(w).T
开发者ID:rajul,项目名称:mne-python,代码行数:26,代码来源:csp.py
示例8: m_step_Q
def m_step_Q(emd, stationary):
"""
Computes the optimised state-transition covariance hyperparameters `Q' of
the natural parameters of the posterior distributions over time. Here
just one single scalar is considered
:param container.EMData emd:
All data pertaining to the EM algorithm.
:param stationary:
If 'all' stationary on all thetas is assumed.
"""
inv_lmbda = 0
if emd.param_est_eta == 'exact':
for i in range(1, emd.T):
lag_one_covariance = emd.sigma_s_lag[i, :, :]
tmp = emd.theta_s[i, :] - emd.theta_s[i - 1, :]
inv_lmbda += numpy.trace(emd.sigma_s[i, :, :]) - \
2 * numpy.trace(lag_one_covariance) + \
numpy.trace(emd.sigma_s[i - 1, :, :]) + \
numpy.dot(tmp, tmp)
emd.Q = inv_lmbda / emd.D / (emd.T - 1) * numpy.identity(emd.D)
else:
for i in range(1, emd.T):
lag_one_covariance = emd.sigma_s_lag[i, :]
tmp = emd.theta_s[i, :] - emd.theta_s[i - 1, :]
inv_lmbda += numpy.sum(emd.sigma_s[i]) - \
2 * numpy.sum(lag_one_covariance) + \
numpy.sum(emd.sigma_s[i - 1]) + \
numpy.dot(tmp, tmp)
emd.Q = inv_lmbda / emd.D / (emd.T - 1) * \
numpy.identity(emd.D)
if stationary == 'all':
emd.Q = numpy.zeros(emd.Q.shape)
开发者ID:christiando,项目名称:ssll_lib,代码行数:33,代码来源:exp_max.py
示例9: maxwell_sihvola
def maxwell_sihvola(self,dielectric_medium,dielecv,shape,L,vf) :
"""Calculate the effective constant permittivity using the maxwell garnett method
dielectric_medium is the dielectric constant tensor of the medium
dielecv is the total frequency dielectric constant tensor at the current frequency
shape is the name of the current shape
L is the shapes depolarisation matrix
vf is the volume fraction of filler
The routine returns the effective dielectric constant"""
# Equation 6.29 on page 123 of Sihvola
# Equation 6.40 gives the averaging over the orientation function
# See also equation 5.80 on page 102 and equation 4.31 on page 70
Me = dielectric_medium
# assume that the medium is isotropic calculate the inverse of the dielectric
Mem1 = 3.0 / np.trace(Me)
Mi = dielecv
# calculate the polarisability matrix x the number density of inclusions
nA = vf*np.dot( (Mi-Me), np.linalg.inv( self.unit + (Mem1 * np.dot(L, (Mi - Me)))))
nAL = np.dot((nA),L)
# average the polarisability over orientation
nA = np.trace(nA) / 3.0 * self.unit
# average the polarisability*L over orientation
nAL = np.trace(nAL) / 3.0 * Mem1 * self.unit
# Calculate the average polarisation factor which scales the average field
# based on equation 5.80
# <P> = pol . <E>
pol = np.dot(np.linalg.inv(self.unit - nAL), nA)
# Meff . <E> = Me . <E> + <P>
# Meff . <E> = Me. <E> + pol . <E>
# Meff = Me + pol
effd = dielectric_medium + pol
# Average over orientation
trace = np.trace(effd) / 3.0
effdielec = np.array ( [ [trace, 0, 0], [0,trace,0], [0,0,trace] ] )
return effdielec
开发者ID:kcantosh,项目名称:PDielec,代码行数:34,代码来源:DielectricConstant.py
示例10: test_pullback
def test_pullback(self):
(D,P,N) = 2,5,10
A_data = numpy.zeros((D,P,N,N))
for d in range(D):
for p in range(P):
tmp = numpy.random.rand(N,N)
A_data[d,p,:,:] = numpy.dot(tmp.T,tmp)
if d == 0:
A_data[d,p,:,:] += N * numpy.diag(numpy.random.rand(N))
A = UTPM(A_data)
l,Q = UTPM.eigh(A)
L_data = UTPM._diag(l.data)
L = UTPM(L_data)
assert_array_almost_equal(UTPM.dot(Q, UTPM.dot(L,Q.T)).data, A.data, decimal = 13)
lbar = UTPM(numpy.random.rand(*(D,P,N)))
Qbar = UTPM(numpy.random.rand(*(D,P,N,N)))
Abar = UTPM.pb_eigh( lbar, Qbar, A, l, Q)
Abar = Abar.data[0,0]
Adot = A.data[1,0]
Lbar = UTPM._diag(lbar.data)[0,0]
Ldot = UTPM._diag(l.data)[1,0]
Qbar = Qbar.data[0,0]
Qdot = Q.data[1,0]
assert_almost_equal(numpy.trace(numpy.dot(Abar.T, Adot)), numpy.trace( numpy.dot(Lbar.T, Ldot) + numpy.dot(Qbar.T, Qdot)))
开发者ID:eteq,项目名称:algopy,代码行数:34,代码来源:test_utpm.py
示例11: test_pullback_repeated_eigenvalues
def test_pullback_repeated_eigenvalues(self):
D,P,N = 2,1,6
A = UTPM(numpy.zeros((D,P,N,N)))
V = UTPM(numpy.random.rand(D,P,N,N))
A.data[0,0] = numpy.diag([2,2,3,3.,4,5])
A.data[1,0] = numpy.diag([5,1,3,1.,1,3])
V,Rtilde = UTPM.qr(V)
A = UTPM.dot(UTPM.dot(V.T, A), V)
l,Q = UTPM.eigh(A)
L_data = UTPM._diag(l.data)
L = UTPM(L_data)
assert_array_almost_equal(UTPM.dot(Q, UTPM.dot(L,Q.T)).data, A.data, decimal = 13)
lbar = UTPM(numpy.random.rand(*(D,P,N)))
Qbar = UTPM(numpy.random.rand(*(D,P,N,N)))
Abar = UTPM.pb_eigh( lbar, Qbar, A, l, Q)
Abar = Abar.data[0,0]
Adot = A.data[1,0]
Lbar = UTPM._diag(lbar.data)[0,0]
Ldot = UTPM._diag(l.data)[1,0]
Qbar = Qbar.data[0,0]
Qdot = Q.data[1,0]
assert_almost_equal(numpy.trace(numpy.dot(Abar.T, Adot)), numpy.trace( numpy.dot(Lbar.T, Ldot) + numpy.dot(Qbar.T, Qdot)))
开发者ID:eteq,项目名称:algopy,代码行数:33,代码来源:test_utpm.py
示例12: test_eigh1_pushforward
def test_eigh1_pushforward(self):
(D,P,N) = 2,1,2
A = UTPM(numpy.zeros((D,P,N,N)))
A.data[0,0] = numpy.eye(N)
A.data[1,0] = numpy.diag([3,4])
L,Q,b = UTPM.eigh1(A)
assert_array_almost_equal(UTPM.dot(Q, UTPM.dot(L,Q.T)).data, A.data, decimal = 13)
Lbar = UTPM.diag(UTPM(numpy.zeros((D,P,N))))
Lbar.data[0,0] = [0.5,0.5]
Qbar = UTPM(numpy.random.rand(*(D,P,N,N)))
Abar = UTPM.pb_eigh1( Lbar, Qbar, None, A, L, Q, b)
Abar = Abar.data[0,0]
Adot = A.data[1,0]
Lbar = Lbar.data[0,0]
Ldot = L.data[1,0]
Qbar = Qbar.data[0,0]
Qdot = Q.data[1,0]
assert_almost_equal(numpy.trace(numpy.dot(Abar.T, Adot)), numpy.trace( numpy.dot(Lbar.T, Ldot) + numpy.dot(Qbar.T, Qdot)))
开发者ID:eteq,项目名称:algopy,代码行数:26,代码来源:test_utpm.py
示例13: test_det_ovlp
def test_det_ovlp(self):
mf = scf.UHF(mol)
mf.scf()
s, x = mf.det_ovlp(mf.mo_coeff, mf.mo_coeff, mf.mo_occ, mf.mo_occ)
self.assertAlmostEqual(s, 1.000000000, 9)
self.assertAlmostEqual(numpy.trace(x[0]), mf.nelec[0]*1.000000000, 9)
self.assertAlmostEqual(numpy.trace(x[0]), mf.nelec[1]*1.000000000, 9)
开发者ID:eronca,项目名称:pyscf,代码行数:7,代码来源:test_uhf.py
示例14: grad_nlogprob
def grad_nlogprob(hypers):
amp2 = np.exp(hypers[0])
noise = np.exp(hypers[1])
ls = np.exp(hypers[2:])
chol, corr, grad_corr = memoize(amp2, noise, ls)
solve = spla.cho_solve((chol, True), diffs)
inv_cov = spla.cho_solve((chol, True), np.eye(chol.shape[0]))
jacobian = np.outer(solve, solve) - inv_cov
grad = np.zeros(self.D + 2)
# Log amplitude gradient.
grad[0] = 0.5 * np.trace(np.dot( jacobian, corr + 1e-6*np.eye(chol.shape[0]))) * amp2
# Log noise gradient.
grad[1] = 0.5 * np.trace(np.dot( jacobian, np.eye(chol.shape[0]))) * noise
# Log length scale gradients.
for dd in xrange(self.D):
grad[dd+2] = 1 * np.trace(np.dot( jacobian, -amp2*grad_corr[:,:,dd]*comp[:,dd][:,np.newaxis]/(np.exp(ls[dd]))))*np.exp(ls[dd])
# Roll in the prior variance.
#grad -= 2*hypers/self.hyper_prior
return -grad
开发者ID:ninjin,项目名称:spearmint-lite,代码行数:27,代码来源:gp.py
示例15: __calcMergeCost
def __calcMergeCost(self, weightA, meanA, precA, weightB, meanB, precB):
"""Calculates and returns the cost of merging two Gaussians."""
# (For anyone wondering about the fact we are comparing them against each other rather than against the result of merging them that is because this way tends to get better results.)
# The log determinants and delta...
logDetA = math.log(numpy.linalg.det(precA))
logDetB = math.log(numpy.linalg.det(precB))
delta = meanA - meanB
# Kullback-Leibler of representing A using B...
klA = logDetB - logDetA
klA += numpy.trace(numpy.dot(precB, numpy.linalg.inv(precA)))
klA += numpy.dot(numpy.dot(delta, precB), delta)
klA -= precA.shape[0]
klA *= 0.5
# Kullback-Leibler of representing B using A...
klB = logDetA - logDetB
klB += numpy.trace(numpy.dot(precA, numpy.linalg.inv(precB)))
klB += numpy.dot(numpy.dot(delta, precA), delta)
klB -= precB.shape[0]
klB *= 0.5
# Return a weighted average...
return weightA * klA + weightB * klB
开发者ID:PeterZhouSZ,项目名称:helit,代码行数:25,代码来源:kde_inc.py
示例16: _brug_minimise_scalar
def _brug_minimise_scalar(self,variables,eps1,eps2,shape,L,f1) :
# unpack the complex number from the variables
# two things going on here.
# 1. the two variables refer to the real and imaginary components
# 2. we require the imaginary component to be positive
trace = complex(variables[0],np.exp(variables[1])-1.0)
epsbr = np.array ( [ [trace, 0, 0], [0,trace,0], [0,0,trace] ] )
f2 = 1.0 - f1
b1 = np.dot(L,(eps1 - epsbr))
b2 = np.dot(L,(eps2 - epsbr))
tb1 = np.trace(b1)/3.0
tb2 = np.trace(b2)/3.0
ta1 = 1.0/ ( 1.0 + tb1 )
ta2 = 1.0/ ( 1.0 + tb2 )
c1 = eps1-epsbr
c2 = eps2-epsbr
tc1 = np.trace(c1)/3.0
tc2 = np.trace(c2)/3.0
# alpha1 and 2 are the polarisabilities of 1 and 2 in the effective medium
talpha1 = tc1 * ta1
talpha2 = tc2 * ta2
error = f1*talpha1 + f2*talpha2
error = np.abs(error.conjugate() * error)
# Nasty issue in the powell method, the convergence on tol is given
# relative to the solution (0.0). Only a small number is added.
# So we shift the solution by 1.0, the tol is now relative to 1.0
return 1.0+error
开发者ID:kcantosh,项目名称:PDielec,代码行数:27,代码来源:DielectricConstant.py
示例17: SolveResponse
def SolveResponse(HamDMET, NelecActiveSpace, orb_i, omega, eta, toSolve, GSenergy, GSvector, printoutput=False):
GFvalue, Re2RDMresponse, Im2RDMresponse, A_2RDMresponse = SolveResponseBASE(
HamDMET, NelecActiveSpace, orb_i, omega, eta, toSolve, GSenergy, GSvector, printoutput
)
# Calculate the 1RDMs from the 2RDMs
RDM_A = np.einsum("ikjk->ij", A_2RDMresponse)
RDM_R = np.einsum("ikjk->ij", Re2RDMresponse)
RDM_I = np.einsum("ikjk->ij", Im2RDMresponse)
if (toSolve == "F") or (toSolve == "B"):
elecNum = NelecActiveSpace
if toSolve == "A":
elecNum = NelecActiveSpace + 1
if toSolve == "R":
elecNum = NelecActiveSpace - 1
# Now 1RDM for response as if calculated from normalized wave function
norm_A = np.trace(RDM_A) / elecNum
norm_R = np.trace(RDM_R) / elecNum
norm_I = np.trace(RDM_I) / elecNum
RDM_A = RDM_A / norm_A
RDM_R = RDM_R / norm_R
RDM_I = RDM_I / norm_I
return (GFvalue, RDM_A, RDM_R, RDM_I, norm_A, norm_R, norm_I)
开发者ID:BB-Goldstein,项目名称:PyDMET,代码行数:27,代码来源:SolveCorrelated.py
示例18: _prepare_data
def _prepare_data(self, k_point=None):
"""
Sets all necessary fields for 1D calculations. Sorts atom indices to improve parallelism.
:returns: number of atoms, sorted atom indices
"""
# load powder data for one k
clerk = AbinsModules.IOmodule(input_filename=self._input_filename,
group_name=AbinsModules.AbinsParameters.powder_data_group)
powder_data = clerk.load(list_of_datasets=["powder_data"])
self._a_tensors = powder_data["datasets"]["powder_data"]["a_tensors"][k_point]
self._b_tensors = powder_data["datasets"]["powder_data"]["b_tensors"][k_point]
self._a_traces = np.trace(a=self._a_tensors, axis1=1, axis2=2)
self._b_traces = np.trace(a=self._b_tensors, axis1=2, axis2=3)
# load dft data for one k point
clerk = AbinsModules.IOmodule(input_filename=self._input_filename,
group_name=AbinsModules.AbinsParameters.ab_initio_group)
dft_data = clerk.load(list_of_datasets=["frequencies", "weights"])
frequencies = dft_data["datasets"]["frequencies"][int(k_point)]
indx = frequencies > AbinsModules.AbinsConstants.ACOUSTIC_PHONON_THRESHOLD
self._fundamentals_freq = frequencies[indx]
self._weight = dft_data["datasets"]["weights"][int(k_point)]
# free memory
gc.collect()
开发者ID:DanNixon,项目名称:mantid,代码行数:27,代码来源:SPowderSemiEmpiricalCalculator.py
示例19: compute_energy
def compute_energy(self):
"""
Compute the rhf energy
:return: energy
"""
for i in range(self.maxiter):
D0 = np.trace(self.D)
h = self.T + self.V
j = np.einsum('mrns,rs',self.g,self.D)
k = np.einsum('msrn,rs',self.g,self.D)
v = j-.5*k
f = h + v
ft = np.dot(self.X , np.dot(f, self.X))
e , Ct = la.eigh(ft)
C = np.dot(self.X,Ct)
OC = C[:,:self.ndocc]
self.D = 2*np.dot(OC, OC.T)
T = h + .5*v
E = np.dot(T,self.D)
energy = np.trace(E) + self.V_nuc
if abs(D0 - np.trace(self.D)) < self.e_convergence:
break
self.energy = energy
print('Final RHF Energy:')
print(self.energy)
return(energy)
开发者ID:CCQC,项目名称:summer-program,代码行数:27,代码来源:rhf.py
示例20: stein_estimator
def stein_estimator(cov, precision, nsim=1, nbin=1, biased_precision=True):
""" Stein estimator
Parameters
----------
cov: numpy array
covariance
precision: numpy array
inverse covariance
nsim: int
number of simulations (default 1)
nbin: int
number of bins (default 1)
biased_precision: bool
use Hartlap correction for inverse covariance (default True)
Returns
------
numpy array
Stein estimator
"""
if (biased_precision):
stein = (nsim-nbin-2.)/(nsim-1.)*precision + (nbin*(nbin+1)-2.)/((nsim-1.)*np.trace(cov))*np.eye(nbin)
else:
stein = precision + (nbin*(nbin+1)-2.)/((nsim-1.)*np.trace(cov))*np.eye(nbin)
return stein
开发者ID:lindablot,项目名称:deus_pur,代码行数:27,代码来源:wishart_analysis.py
注:本文中的numpy.trace函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论