本文整理汇总了Python中numpy.tril_indices函数的典型用法代码示例。如果您正苦于以下问题:Python tril_indices函数的具体用法?Python tril_indices怎么用?Python tril_indices使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了tril_indices函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: actor_critic_update
def actor_critic_update(X0, A, R, X1, gamma=0.99, learnrate=0.0001):
global WP1, WP2, WV_A, WV_b, WV
global grad_WP1, grad_WP2, grad_N
global grad_WP1s, grad_WP2s
global noise_cov
S0 = XtoS(X0)
S1 = XtoS(X1)
SS0 = np.outer(S0, S0)[np.tril_indices(7)]
SS1 = np.outer(S1, S1)[np.tril_indices(7)]
deltaV = SS0 - gamma * SS1
WV_A += np.outer(deltaV, deltaV)
WV_b += R * deltaV
WV = np.linalg.solve(WV_A, WV_b)
advantage = R + gamma * np.dot(SS1, WV) - np.dot(SS0, WV)
g1 = -0.5*np.outer(A, A) * advantage
grad_WP1 += g1
grad_WP1s += g1*g1
g2 = np.outer(A, SS0) * advantage
grad_WP2 += g2
grad_WP2s += g2*g2
grad_N += 1
# if X0[4] > 4:
# print 'V(s0)', np.dot(SS0, WV), 'V(s1)', np.dot(SS1, WV), 'R', R
# print 'A', A, 'adv', advantage # , 'g_wp1\n', grad_WP1, 'g_wp2\n', grad_WP2
noise_cov = np.linalg.inv(np.linalg.cholesky(
WP1 + 1e-2 + np.eye(2))).T
开发者ID:a1k0n,项目名称:autorustler,代码行数:28,代码来源:sim.py
示例2: __init__
def __init__(self, value):
self.value = value
self.value_ = np.linalg.cholesky(value)[np.tril_indices(value.shape[0])]
self.shape = value.shape
self.size = value.size
self.free = np.resize(True, self.value_.shape)
self.to_external = lambda val: np.linalg.cholesky(val)[np.tril_indices(self.shape[0])]
开发者ID:jtorcasso,项目名称:OptimController,代码行数:7,代码来源:parameters.py
示例3: find_stationary_var
def find_stationary_var(amat=None, bmat=None, cmat=None):
"""Find fixed point of H = CC' + AHA' + BHB' given A, B, C.
Parameters
----------
amat, bmat, cmat : (nstocks, nstocks) arrays
Parameter matrices
Returns
-------
(nstocks, nstocks) array
Unconditional variance matrix
"""
nstocks = amat.shape[0]
kwargs = {'amat': amat, 'bmat': bmat, 'ccmat': cmat.dot(cmat.T)}
fun = partial(ParamGeneric.fixed_point, **kwargs)
try:
with np.errstate(divide='ignore', invalid='ignore'):
hvar = np.eye(nstocks)
sol = sco.fixed_point(fun, hvar[np.tril_indices(nstocks)])
hvar[np.tril_indices(nstocks)] = sol
hvar[np.triu_indices(nstocks, 1)] \
= hvar.T[np.triu_indices(nstocks, 1)]
return hvar
except RuntimeError:
# warnings.warn('Could not find stationary varaince!')
return None
开发者ID:khrapovs,项目名称:bekk,代码行数:28,代码来源:param_generic.py
示例4: test_frozen
def test_frozen(self):
# Test that the frozen and non-frozen inverse Wishart gives the same
# answers
# Construct an arbitrary positive definite scale matrix
dim = 4
scale = np.diag(np.arange(dim)+1)
scale[np.tril_indices(dim, k=-1)] = np.arange(dim*(dim-1)/2)
scale = np.dot(scale.T, scale)
# Construct a collection of positive definite matrices to test the PDF
X = []
for i in range(5):
x = np.diag(np.arange(dim)+(i+1)**2)
x[np.tril_indices(dim, k=-1)] = np.arange(dim*(dim-1)/2)
x = np.dot(x.T, x)
X.append(x)
X = np.array(X).T
# Construct a 1D and 2D set of parameters
parameters = [
(10, 1, np.linspace(0.1, 10, 5)), # 1D case
(10, scale, X)
]
for (df, scale, x) in parameters:
iw = invwishart(df, scale)
assert_equal(iw.var(), invwishart.var(df, scale))
assert_equal(iw.mean(), invwishart.mean(df, scale))
assert_equal(iw.mode(), invwishart.mode(df, scale))
assert_allclose(iw.pdf(x), invwishart.pdf(x, df, scale))
开发者ID:wiredfool,项目名称:scipy,代码行数:31,代码来源:test_multivariate.py
示例5: amplitudes_to_cisdvec
def amplitudes_to_cisdvec(c0, c1, c2):
nocc, nvir = c1.shape
ooidx = numpy.tril_indices(nocc, -1)
vvidx = numpy.tril_indices(nvir, -1)
c2tril = lib.take_2d(c2.reshape(nocc**2,nvir**2),
ooidx[0]*nocc+ooidx[1], vvidx[0]*nvir+vvidx[1])
return numpy.hstack((c0, c1.ravel(), c2tril.ravel()))
开发者ID:chrinide,项目名称:pyscf,代码行数:7,代码来源:gcisd.py
示例6: test_tril_indices
def test_tril_indices():
# indices without and with offset
il1 = tril_indices(4)
il2 = tril_indices(4, 2)
a = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]])
# indexing:
yield (assert_array_equal, a[il1],
array([ 1, 5, 6, 9, 10, 11, 13, 14, 15, 16]) )
# And for assigning values:
a[il1] = -1
yield (assert_array_equal, a,
array([[-1, 2, 3, 4],
[-1, -1, 7, 8],
[-1, -1, -1, 12],
[-1, -1, -1, -1]]) )
# These cover almost the whole array (two diagonals right of the main one):
a[il2] = -10
yield (assert_array_equal, a,
array([[-10, -10, -10, 4],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]]) )
开发者ID:Razin-Tailor,项目名称:ChatterBot,代码行数:29,代码来源:test_twodim_base.py
示例7: full_to_unique
def full_to_unique(y_full, feedmap, feedmask=None):
if feedmask is None:
feedmask = np.ones(feedmap.shape, dtype=np.bool)
y_full[np.tril_indices(feedmap.shape[0])] = y_full[np.tril_indices(feedmap.shape[0])].conj()
y_unique = y_full[np.where(feedmask)][np.unique(feedmap[np.where(feedmask)], return_index=True)[1]]
return y_unique
开发者ID:jrs65,项目名称:gibbs_calibration,代码行数:9,代码来源:gibbs.py
示例8: unique_to_full
def unique_to_full(y_unique, feedmap, feedmask=None):
y_full = y_unique[feedmap]
y_full[np.tril_indices(feedmap.shape[0])] = y_full[np.tril_indices(feedmap.shape[0])].conj()
if feedmask is not None:
y_full[np.where(np.logical_not(feedmask))] = 0.0
return y_full
开发者ID:jrs65,项目名称:gibbs_calibration,代码行数:9,代码来源:gibbs.py
示例9: test_equivalence
def test_equivalence(self):
"""
The Equivalence covariance structure can represent an
exchangeable covariance structure. Here we check that the
results are identical using the two approaches.
"""
np.random.seed(3424)
endog = np.random.normal(size=20)
exog = np.random.normal(size=(20, 2))
exog[:, 0] = 1
groups = np.kron(np.arange(5), np.ones(4))
groups[12:] = 3 # Create unequal size groups
# Set up an Equivalence covariance structure to mimic an
# Exchangeable covariance structure.
pairs = {}
start = [0, 4, 8, 12]
for k in range(4):
pairs[k] = {}
# Diagonal values (variance parameters)
if k < 3:
pairs[k][0] = (start[k] + np.r_[0, 1, 2, 3],
start[k] + np.r_[0, 1, 2, 3])
else:
pairs[k][0] = (start[k] + np.r_[0, 1, 2, 3, 4, 5, 6, 7],
start[k] + np.r_[0, 1, 2, 3, 4, 5, 6, 7])
# Off-diagonal pairs (covariance parameters)
if k < 3:
a, b = np.tril_indices(4, -1)
pairs[k][1] = (start[k] + a, start[k] + b)
else:
a, b = np.tril_indices(8, -1)
pairs[k][1] = (start[k] + a, start[k] + b)
ex = sm.cov_struct.Exchangeable()
model1 = sm.GEE(endog, exog, groups, cov_struct=ex)
result1 = model1.fit()
for return_cov in False, True:
ec = sm.cov_struct.Equivalence(pairs, return_cov=return_cov)
model2 = sm.GEE(endog, exog, groups, cov_struct=ec)
result2 = model2.fit()
# Use large atol/rtol for the correlation case since there
# are some small differences in the results due to degree
# of freedom differences.
if return_cov == True:
atol, rtol = 1e-6, 1e-6
else:
atol, rtol = 1e-3, 1e-3
assert_allclose(result1.params, result2.params, atol=atol, rtol=rtol)
assert_allclose(result1.bse, result2.bse, atol=atol, rtol=rtol)
assert_allclose(result1.scale, result2.scale, atol=atol, rtol=rtol)
开发者ID:Garlandal,项目名称:statsmodels,代码行数:57,代码来源:test_gee.py
示例10: tril_index_matrix
def tril_index_matrix(self):
n = self.global_size
num_tril_entries = self.num_tril_entries
tril_index_matrix = np.zeros([n, n], dtype=int)
tril_index_matrix[np.tril_indices(n)] = np.arange(num_tril_entries)
tril_index_matrix[
np.tril_indices(n)[::-1]
] = np.arange(num_tril_entries)
return tril_index_matrix
开发者ID:aasensio,项目名称:pymc3,代码行数:9,代码来源:approximations.py
示例11: scrape_args
def scrape_args(self, records, scale=1, guide_tree=None, niters=10, keep_topology=False):
# local lists
distances = []
variances = []
headers = []
for rec in records:
distances.append(rec.parameters.partitions.distances)
variances.append(rec.parameters.partitions.variances)
headers.append(rec.get_names())
num_matrices = len(records)
label_set = reduce(lambda x, y: x.union(y), (set(l) for l in headers))
labels_len = len(label_set)
# labels string can be built straight away
labels_string = '{0}\n{1}\n'.format(labels_len, ' '.join(label_set))
# distvar and genome_map need to be built up
distvar_list = [str(num_matrices)]
genome_map_list = ['{0} {1}'.format(num_matrices, labels_len)]
# build up lists to turn into strings
for i in range(num_matrices):
labels = headers[i]
dim = len(labels)
dmatrix = np.array(distances[i])
vmatrix = np.array(variances[i])
matrix = np.zeros(dmatrix.shape)
matrix[np.triu_indices(len(dmatrix), 1)] = dmatrix[np.triu_indices(len(dmatrix), 1)]
matrix[np.tril_indices(len(vmatrix), -1)] = vmatrix[np.tril_indices(len(vmatrix), -1)]
if scale:
matrix[np.triu_indices(dim, 1)] *= scale
matrix[np.tril_indices(dim, -1)] *= scale * scale
if isinstance(matrix, np.ndarray):
matrix_string = '\n'.join([' '.join(str(x) for x in row)
for row in matrix]) + '\n'
else:
matrix_string = matrix
distvar_list.append('{0} {0} {1}\n{2}'.format(dim, i + 1,
matrix_string))
genome_map_entry = ' '.join((str(labels.index(lab) + 1)
if lab in labels else '-1')
for lab in label_set)
genome_map_list.append(genome_map_entry)
distvar_string = '\n'.join(distvar_list)
genome_map_string = '\n'.join(genome_map_list)
if guide_tree is None:
guide_tree = Tree.new_iterative_rtree(labels_len, names=label_set, rooted=True)
tree_string = guide_tree.scale(scale).newick.replace('\'', '')
return distvar_string, genome_map_string, labels_string, tree_string, niters, keep_topology
开发者ID:kgori,项目名称:treeCl,代码行数:55,代码来源:tasks.py
示例12: test_cl_ldl
def test_cl_ldl(AA):
""" Test the CL implentation of LDL algorithm.
This tests a series (cl_size) of matrices against the Python implementation.
"""
# Convert to single float
AA = AA.astype(DTYPE)
# First calculate the Python based values for each matrix in AA
py_ldl_D = np.empty((AA.shape[0], AA.shape[2]), dtype=AA.dtype)
py_ldl_L = np.empty(AA.shape, dtype=AA.dtype)
for i in range(AA.shape[2]):
py_ldl_D[..., i], py_ldl_L[..., i] = ldl(AA[..., i])
# Setup CL context
import pyopencl as cl
from pycllp.ldl import cl_krnl_ldl
ctx = cl.create_some_context()
queue = cl.CommandQueue(ctx)
# Result arrays
m, n, cl_size = AA.shape
L = np.empty(cl_size*m*(m+1)/2, dtype=DTYPE)
D = np.empty(cl_size*m, dtype=DTYPE)
mf = cl.mem_flags
A_g = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=AA)
# Create and compile kernel
prg = cl_krnl_ldl(ctx)
L_g = cl.Buffer(ctx, mf.READ_WRITE, L.nbytes)
D_g = cl.Buffer(ctx, mf.READ_WRITE, D.nbytes)
# Test normal LDL (unmodified)
prg.ldl(queue, (cl_size,), None, np.int32(m), np.int32(n), A_g, L_g, D_g)
cl.enqueue_copy(queue, L, L_g)
cl.enqueue_copy(queue, D, D_g)
# Compare each matrix decomposition with the python equivalent.
for i in range(cl_size):
np.testing.assert_allclose(py_ldl_D[..., i], D[i::cl_size], rtol=1e-6, atol=1e-7)
np.testing.assert_allclose(py_ldl_L[..., i][np.tril_indices(m)], L[i::cl_size], rtol=1e-6, atol=1e-7)
# Now test the modified algorithm ...
beta = np.sqrt(np.amax(AA))
prg.modified_ldl(queue, (cl_size,), None, np.int32(m), np.int32(n), A_g, L_g, D_g,
DTYPE(beta), DTYPE(1e-6))
cl.enqueue_copy(queue, L, L_g)
cl.enqueue_copy(queue, D, D_g)
# Compare each matrix decomposition with the python equivalent.
for i in range(cl_size):
np.testing.assert_allclose(py_ldl_D[..., i], D[i::cl_size], rtol=1e-6, atol=1e-7)
np.testing.assert_allclose(py_ldl_L[..., i][np.tril_indices(m)], L[i::cl_size], rtol=1e-6, atol=1e-7)
开发者ID:jetuk,项目名称:pycllp,代码行数:54,代码来源:test_ldl.py
示例13: get_grad_tril
def get_grad_tril(mo_coeff_kpts, mo_occ_kpts, fock):
if is_khf:
grad_kpts = []
for k, mo in enumerate(mo_coeff_kpts):
f_mo = reduce(numpy.dot, (mo.T.conj(), fock[k], mo))
nmo = f_mo.shape[0]
grad_kpts.append(f_mo[numpy.tril_indices(nmo, -1)])
return numpy.hstack(grad_kpts)
else:
f_mo = reduce(numpy.dot, (mo_coeff_kpts.T.conj(), fock, mo_coeff_kpts))
nmo = f_mo.shape[0]
return f_mo[numpy.tril_indices(nmo, -1)]
开发者ID:chrinide,项目名称:pyscf,代码行数:12,代码来源:addons.py
示例14: impute_missing_bins
def impute_missing_bins(hic_matrix, regions=None, per_chromosome=True, stat=np.ma.mean):
"""
Impute missing contacts in a Hi-C matrix.
For inter-chromosomal data uses the mean of all inter-chromosomal contacts,
for intra-chromosomal data uses the mean of intra-chromosomal counts at the corresponding diagonal.
:param hic_matrix: A square numpy array
:param regions: A list of :class:`~GenomicRegion`s - if omitted, will create a dummy list
:param per_chromosome: Do imputation on a per-chromosome basis (recommended)
:param stat: The aggregation statistic to be used for imputation, defaults to the mean.
"""
if regions is None:
for i in range(hic_matrix.shape[0]):
regions.append(GenomicRegion(chromosome='', start=i, end=i))
chr_bins = dict()
for i, region in enumerate(regions):
if region.chromosome not in chr_bins:
chr_bins[region.chromosome] = [i, i]
else:
chr_bins[region.chromosome][1] = i
n = len(regions)
if not hasattr(hic_matrix, "mask"):
hic_matrix = masked_matrix(hic_matrix)
imputed = hic_matrix.copy()
if per_chromosome:
for c_start, c_end in chr_bins.itervalues():
# Correcting intrachromoc_startmal contacts by mean contact count at each diagonal
for i in range(c_end - c_start):
ind = kth_diag_indices(c_end - c_start, -i)
diag = imputed[c_start:c_end, c_start:c_end][ind]
diag[diag.mask] = stat(diag)
imputed[c_start:c_end, c_start:c_end][ind] = diag
# Correcting interchromoc_startmal contacts by mean of all contact counts between
# each set of chromoc_startmes
for other_start, other_end in chr_bins.itervalues():
# Only correct upper triangle
if other_start <= c_start:
continue
inter = imputed[c_start:c_end, other_start:other_end]
inter[inter.mask] = stat(inter)
imputed[c_start:c_end, other_start:other_end] = inter
else:
for i in range(n):
diag = imputed[kth_diag_indices(n, -i)]
diag[diag.mask] = stat(diag)
imputed[kth_diag_indices(n, -i)] = diag
# Copying upper triangle to lower triangle
imputed[np.tril_indices(n)] = imputed.T[np.tril_indices(n)]
return imputed
开发者ID:vaquerizaslab,项目名称:tadtool,代码行数:53,代码来源:tad.py
示例15: structure_function
def structure_function(self, bins):
"""
compute the structure function of the light curve at given time lags
"""
dt = np.subtract.outer(self.t,self.t)[np.tril_indices(self.t.shape[0], k=-1)]
dm = np.subtract.outer(self.y,self.y)[np.tril_indices(self.y.shape[0], k=-1)]
sqrsum, bins, _ = binned_statistic(dt, dm**2, bins=bins, statistic='sum')
n, _, _ = binned_statistic(dt, dm**2, bins=bins, statistic='count')
SF = np.sqrt(sqrsum/n)
lags = 0.5*(bins[1:] + bins[:-1])
return lags, SF
开发者ID:mattjhill,项目名称:drwfast,代码行数:12,代码来源:lightcurve.py
示例16: multivar_norm_cdf
def multivar_norm_cdf(upper, cov_matrix):
"""CDF of multivariate Gaussian centered at 0 with covariance matrix cov_matrix. CDF is taken from -inf to u."""
if upper.size == 1:
return scipy.stats.norm.cdf(upper[0], 0, numpy.sqrt(cov_matrix[0, 0]))
# Standardize the upper bound u using the standard deviation
std = numpy.sqrt(numpy.diag(cov_matrix))
std_upper = upper / std
# Convert covariance matrix into correlation matrix: http://en.wikipedia.org/wiki/Correlation_and_dependence#Correlation_matrices
corr_matrix = cov_matrix / std / std.reshape(upper.size, 1) # standardize -> correlation matrix
# Indices for traversing the strict lower triangular elements of corr_matrix in column major, as required by the fortran mvndst function.
strict_lower_diag_indices = numpy.tril_indices(upper.size, -1)
# Call into the scipy wrapper for the fortran method "mvndst"
# Link: http://www.math.wsu.edu/faculty/genz/software/fort77/mvtdstpack.f
out = scipy.stats.kde.mvn.mvndst(
numpy.zeros(upper.size, dtype=int), # The lower bound of integration. We initialize with 0 because it is ignored (because of the third argument).
std_upper, # The upper bound of integration
numpy.zeros(upper.size, dtype=int), # For each dim, 0 means -inf for lower bound
corr_matrix[strict_lower_diag_indices], # The vector of strict lower triangular correlation coefficients
maxpts=self._mvndst_parameters.maxpts_per_dim * upper.size, # Maximum number of iterations for the mvndst function
releps=self._mvndst_parameters.releps, # The error allowed relative to actual value
abseps=self._mvndst_parameters.abseps, # The absolute error allowed
)
return out[1] # Index 1 corresponds to the actual value. 0 has the error, and 2 is a flag denoting whether releps was reached
开发者ID:Allensmile,项目名称:MOE,代码行数:27,代码来源:expected_improvement.py
示例17: threePointsToStandard
def threePointsToStandard(e, p, q, r):
"""Return a projective transformation that maps three points on a conic to
the conic xy + yz + xz = 0.
Keyword arguments:
e -- a projective conic
p -- the first point on e
q -- the second point on e
r -- the third point on e
"""
coeffs = e
p, q, r = np.matrix(p), np.matrix(q), np.matrix(r)
# Determine a matrix A associated with a projective transformation that
# maps P, Q, and R onto [1, 0, 0], [0, 1, 0], and [0, 0, 1], respectively.
A = np.linalg.inv(np.vstack([p, q, r]))
# Determine the equation bx'y' + fx'z' + gy'z' = 0 of t(E), for some real
# numbers b, f, and g.
M = sum([coeff * u.T * v
for coeff, (u, v)
in zip(coeffs, combinations_with_replacement((p, q, r), 2))])
# Get B from M by adding like terms to find b, f, and g and then
# constructing a diagonal matrix from the flat [1/g, 1/f, 1/b].
B = np.diagflat([1 / (u + v)
for u, v
in reversed(zip(np.array(M)[np.triu_indices(3, 1)],
np.array(M)[np.tril_indices(3, -1)]))])
return B * A
开发者ID:PatrickFant,项目名称:PyJective,代码行数:31,代码来源:conic.py
示例18: _getSignificantData
def _getSignificantData(self, sig_lvl):
'''Find which edges significant at passed level and set self properties.
'''
rows,cols = self.data.shape #rows = cols
mask = zeros((rows,cols))
mask[tril_indices(rows,0)] = 1 #preparing mask
cvals = unique(self.data[triu_indices(rows,1)]) # cvals is sorted
# calculate upper bound, i.e. what value in the distribution of values
# has sig_lvl fraction of the data higher than or equal to it. this is
# not guaranteed to be precise because of repeated values. for instance
# assume the distribution of dissimilarity values is:
# [.1, .2, .2, .2, .2, .3, .4, .5, .6, .6, .6, .6, .6, .6, .7]
# and you want sig_lvl=.2, i.e. you get 20 percent of the linkages as
# significant. this would result in choosing the score .6 since its the
# third in the ordered list (of 15 elements, 3/15=.2). but, since there
# is no a-priori way to tell which of the multiple .6 linkages are
# significant, we select all of them, forcing our lower bound to
# encompass 7/15ths of the data. the round call on the ub is to avoid
# documented numpy weirdness where it will misassign >= calls for long
# floats.
ub = round(cvals[-round(sig_lvl*len(cvals))],7)
mdata = ma(self.data, mask)
self.actual_sig_lvl = \
(mdata >= ub).sum()/float(mdata.shape[0]*(mdata.shape[0]-1)/2)
self.sig_edges = where(mdata >= ub, 1, 0).nonzero()
self.otu1 = [self.otu_ids[i] for i in self.sig_edges[0]]
self.otu2 = [self.otu_ids[i] for i in self.sig_edges[1]]
self.sig_otus = list(set(self.otu1+self.otu2))
self.edges = zip(self.otu1, self.otu2)
self.cvals = mdata[self.sig_edges[0], self.sig_edges[1]]
开发者ID:adamrp,项目名称:correlations,代码行数:30,代码来源:parse.py
示例19: symmetrize
def symmetrize(m, use_triangle='lower'):
"""Symmetrize a square NumPy array by reflecting one triangular
section across the diagonal to the other.
"""
if use_triangle not in ('lower', 'upper'):
raise ValueError
if not len(m.shape) == 2:
raise ValueError
if not (m.shape[0] == m.shape[1]):
raise ValueError
dim = m.shape[0]
lower_indices = numpy.tril_indices(dim, k=-1)
upper_indices = numpy.triu_indices(dim, k=1)
ms = m.copy()
if use_triangle == 'lower':
ms[upper_indices] = ms[lower_indices]
if use_triangle == 'upper':
ms[lower_indices] = ms[upper_indices]
return ms
开发者ID:solccp,项目名称:cclib,代码行数:25,代码来源:utils.py
示例20: initialize
def initialize(self, model):
super(GlobalOddsRatio, self).initialize(model)
if self.model.weights is not None:
warnings.warn("weights not implemented for GlobalOddsRatio "
"cov_struct, using unweighted covariance estimate",
NotImplementedWarning)
# Need to restrict to between-subject pairs
cpp = []
for v in model.endog_li:
# Number of subjects in this group
m = int(len(v) / self._ncut)
i1, i2 = np.tril_indices(m, -1)
cpp1 = {}
for k1 in range(self._ncut):
for k2 in range(k1 + 1):
jj = np.zeros((len(i1), 2), dtype=np.int64)
jj[:, 0] = i1 * self._ncut + k1
jj[:, 1] = i2 * self._ncut + k2
cpp1[(k2, k1)] = jj
cpp.append(cpp1)
self.cpp = cpp
# Initialize the dependence parameters
self.crude_or = self.observed_crude_oddsratio()
if self.model.update_dep:
self.dep_params = self.crude_or
开发者ID:Bonfils-ebu,项目名称:statsmodels,代码行数:33,代码来源:cov_struct.py
注:本文中的numpy.tril_indices函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论