本文整理汇总了Python中numpy.tril_indices_from函数的典型用法代码示例。如果您正苦于以下问题:Python tril_indices_from函数的具体用法?Python tril_indices_from怎么用?Python tril_indices_from使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了tril_indices_from函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_pairplot_reg
def test_pairplot_reg(self):
vars = ["x", "y", "z"]
g = ag.pairplot(self.df, diag_kind="hist", kind="reg")
for ax in g.diag_axes:
nt.assert_equal(len(ax.patches), 10)
for i, j in zip(*np.triu_indices_from(g.axes, 1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
nt.assert_equal(len(ax.lines), 1)
nt.assert_equal(len(ax.collections), 2)
for i, j in zip(*np.tril_indices_from(g.axes, -1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
nt.assert_equal(len(ax.lines), 1)
nt.assert_equal(len(ax.collections), 2)
for i, j in zip(*np.diag_indices_from(g.axes)):
ax = g.axes[i, j]
nt.assert_equal(len(ax.collections), 0)
开发者ID:mwaskom,项目名称:seaborn,代码行数:33,代码来源:test_axisgrid.py
示例2: test_pairplot
def test_pairplot(self):
vars = ["x", "y", "z"]
g = ag.pairplot(self.df)
for ax in g.diag_axes:
assert len(ax.patches) > 1
for i, j in zip(*np.triu_indices_from(g.axes, 1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.tril_indices_from(g.axes, -1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.diag_indices_from(g.axes)):
ax = g.axes[i, j]
nt.assert_equal(len(ax.collections), 0)
g = ag.pairplot(self.df, hue="a")
n = len(self.df.a.unique())
for ax in g.diag_axes:
assert len(ax.lines) == n
assert len(ax.collections) == n
开发者ID:mwaskom,项目名称:seaborn,代码行数:34,代码来源:test_axisgrid.py
示例3: transform_covars_grad
def transform_covars_grad(self, internal_grad):
grad = np.empty((self.num_latent, self.get_covar_size()), dtype=np.float32)
for j in range(self.num_latent):
tmp = self._theano_transform_covars_grad(internal_grad[0, j], self.covars_cholesky[j])
tmp[np.diag_indices_from(tmp)] *= self.covars_cholesky[j][np.diag_indices_from(tmp)]
grad[j] = tmp[np.tril_indices_from(self.covars_cholesky[j])]
return grad.flatten()
开发者ID:Karl-Krauth,项目名称:Sparse-GP,代码行数:7,代码来源:full_gaussian_mixture.py
示例4: test_map_diag_and_offdiag
def test_map_diag_and_offdiag(self):
vars = ["x", "y", "z"]
g = ag.PairGrid(self.df)
g.map_offdiag(plt.scatter)
g.map_diag(plt.hist)
for ax in g.diag_axes:
nt.assert_equal(len(ax.patches), 10)
for i, j in zip(*np.triu_indices_from(g.axes, 1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.tril_indices_from(g.axes, -1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.diag_indices_from(g.axes)):
ax = g.axes[i, j]
nt.assert_equal(len(ax.collections), 0)
开发者ID:GeorgeMcIntire,项目名称:seaborn,代码行数:29,代码来源:test_axisgrid.py
示例5: _get_raw_covars
def _get_raw_covars(self):
flattened_covars = np.empty([self.num_latent, self.get_covar_size()], dtype=np.float32)
for i in xrange(self.num_latent):
raw_covars = self.covars_cholesky[i].copy()
raw_covars[np.diag_indices_from(raw_covars)] = np.log(raw_covars[np.diag_indices_from(raw_covars)])
flattened_covars[i] = raw_covars[np.tril_indices_from(raw_covars)]
return flattened_covars.flatten()
开发者ID:Karl-Krauth,项目名称:Sparse-GP,代码行数:7,代码来源:full_gaussian_mixture.py
示例6: map_lower
def map_lower(self, func, **kwargs):
"""Plot with a bivariate function on the lower diagonal subplots.
Parameters
----------
func : callable plotting function
Must take x, y arrays as positional arguments and draw onto the
"currently active" matplotlib Axes.
"""
kw_color = kwargs.pop("color", None)
for i, j in zip(*np.tril_indices_from(self.axes, -1)):
hue_grouped = self.data.groupby(self.hue_vals)
for k, (label_k, data_k) in enumerate(hue_grouped):
ax = self.axes[i, j]
plt.sca(ax)
x_var = self.x_vars[j]
y_var = self.y_vars[i]
color = self.palette[k] if kw_color is None else kw_color
func(data_k[x_var], data_k[y_var], label=label_k,
color=color, **kwargs)
self._clean_axis(ax)
self._update_legend_data(ax)
if kw_color is not None:
kwargs["color"] = kw_color
self._add_axis_labels()
开发者ID:andreas-h,项目名称:seaborn,代码行数:31,代码来源:axisgrid.py
示例7: test_pairplot
def test_pairplot(self):
vars = ["x", "y", "z"]
g = pairplot(self.df)
for ax in g.diag_axes:
nt.assert_equal(len(ax.patches), 10)
for i, j in zip(*np.triu_indices_from(g.axes, 1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.tril_indices_from(g.axes, -1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.diag_indices_from(g.axes)):
ax = g.axes[i, j]
nt.assert_equal(len(ax.collections), 0)
plt.close("all")
开发者ID:c-wilson,项目名称:seaborn,代码行数:29,代码来源:test_axisgrid.py
示例8: net_sample_multinomial
def net_sample_multinomial(A, minEdges, edgesPerSample=1, *args, **kwargs):
""" NETWORK SAMPLING ALGORITHM:
sample networks ties from multinomial distribution
defined as 1/AAT[i,j] normalized by sum(AAT[i>j])
problem: doesn't sufficiently cluster the resulting network
doesn't return exact number of ties, only at least as many as
specified minEdges
"""
draws = int(np.ceil(minEdges*1.2))
# pairwise distances between observations
dist = pdist(A) # what matrix to use: pdist(A) or just tril(AAT) directly?
invdist = dist
invdist[invdist != 0] = 1/invdist[invdist!=0] # prevent division by 0
thetavec = invdist / np.sum(invdist)
theta = squareform(thetavec)
# multinomial sample
n = np.shape(theta)[0]
Z = np.zeros((n,n))
# samp = sampleLinks(q=thetavec, edgesToDraw=1, draws=draws)
y = np.random.multinomial(edgesPerSample, thetavec, draws)
samp = np.asarray([np.mean([y[draw][item] for draw in np.arange(draws)]) for item in np.arange(len(thetavec))])
samp = np.ceil(samp)
# repeat until reaching enough network ties
while np.sum(samp) < minEdges:
draws = int(np.ceil(draws * 1.1)) #increase number of draws and try again
#samp = sampleLinks(q=thetavec,edgesToDraw=1,draws=draws)
y = np.random.multinomial(edgesPerSample, thetavec, draws)
samp = np.asarray([np.mean([y[draw][item] for draw in np.arange(draws)]) for item in np.arange(len(thetavec))])
samp = np.ceil(samp)
Z[np.tril_indices_from(Z, k =-1)] = samp
return (theta, Z)
开发者ID:sdownin,项目名称:netCreate,代码行数:35,代码来源:netcreate_previous_version.py
示例9: set_params
def set_params(self, values):
self.lengthscales = values[:-1]
self.variance = values[-1]
L = np.zeros((self.num_dim, self.num_dim))
L[np.tril_indices_from(L)] = self.lengthscales
self.L_inv = inv(L)
self.projection = np.dot(self.L_inv.T, self.L_inv)
开发者ID:jgosmann,项目名称:plume,代码行数:7,代码来源:prediction.py
示例10: __init__
def __init__(self, lengthscale_mat, variance=1.0):
lengthscale_mat = np.asarray(lengthscale_mat)
assert lengthscale_mat.shape[0] == lengthscale_mat.shape[1]
self.num_dim = lengthscale_mat.shape[0]
self.params = np.concatenate((
lengthscale_mat[np.tril_indices_from(lengthscale_mat)],
np.array([variance])))
开发者ID:jgosmann,项目名称:plume,代码行数:7,代码来源:prediction.py
示例11: shepard
def shepard(self, xax=1, yax=2):
coords = self.U[:,[xax-1, yax-1]]
reducedD = np.zeros((coords.shape[0], coords.shape[0]))
for i in xrange(coords.shape[0]):
for j in xrange(coords.shape[0]):
d = coords[i,:] - coords[j,:]
reducedD[i, j] = np.sqrt( d.dot(d) )
reducedD = reducedD[np.tril_indices_from(reducedD, k=-1)]
originalD = self.y2[np.tril_indices_from(self.y2, k=-1)]
xmin = np.min(reducedD)
xmax = np.max(reducedD)
f, ax = py.subplots()
ax.plot(reducedD, originalD, 'ko')
ax.plot([xmin, xmax], [xmin, xmax], 'r--')
ax.set_xlabel('Distances in Reduced Space')
ax.set_ylabel('Distances in Original Matrix')
py.show()
开发者ID:grovduck,项目名称:ecopy,代码行数:17,代码来源:pcoa.py
示例12: set_covars
def set_covars(self, raw_covars):
raw_covars = raw_covars.reshape([self.num_latent, self.get_covar_size()])
for j in xrange(self.num_latent):
cholesky = np.zeros([self.num_dim, self.num_dim], dtype=np.float32)
cholesky[np.tril_indices_from(cholesky)] = raw_covars[j]
cholesky[np.diag_indices_from(cholesky)] = np.exp(cholesky[np.diag_indices_from(cholesky)])
self.covars_cholesky[j] = cholesky
self.covars[j] = mdot(self.covars_cholesky[j], self.covars_cholesky[j].T)
开发者ID:Karl-Krauth,项目名称:Sparse-GP,代码行数:8,代码来源:full_gaussian_mixture.py
示例13: find_smallest_index
def find_smallest_index(matrice):
"""Return smallest number i,j index in a matrice
A Tuple (i,j) is returned.
Warning, the diagonal should have the largest number so it will never be choose
"""
index = np.tril_indices_from(matrice, -1)
return np.vstack(index)[:, matrice[index].argmin()]
开发者ID:UdeM-LBIT,项目名称:profileNJ,代码行数:8,代码来源:ClusterUtils.py
示例14: _band_infinite
def _band_infinite():
'''Suppress the diagonal+- of a distance matrix'''
band = np.empty( (t, t) )
band[:] = np.inf
band[np.triu_indices_from(band, width)] = 0
band[np.tril_indices_from(band, -width)] = 0
return band
开发者ID:BWalburn,项目名称:librosa,代码行数:8,代码来源:segment.py
示例15: from_vector
def from_vector(x):
# Solution to the equation len(x) = n * (n + 1) / 2
n = int((math.sqrt(len(x) * 8 + 1) - 1) / 2)
result = np.zeros((n, n))
result[np.tril_indices_from(result, -1)] = x[n:]
result += result.transpose()
result[np.diag_indices_from(result)] = x[:n]
return result
开发者ID:filmor,项目名称:python-ma,代码行数:8,代码来源:noise_filter.py
示例16: plot_pairwise_scatter
def plot_pairwise_scatter(self, i, threshold=0.95):
'''plot pairwise scatter plot of data points, with contours as
background
Parameters
----------
i : int
threshold : float
Returns
-------
Figure instance
The lower triangle background is a binary contour based on the
specified threshold. All axis not shown are set to a default value
in the middle of their range
The upper triangle shows a contour map with the conditional
probability, again setting all non shown dimensions to a default value
in the middle of their range.
'''
model = self.models[i]
columns = model.params.index.values.tolist()
columns.remove('Intercept')
x = self._normalized[columns]
data = x.copy()
# TODO:: have option to change
# diag to CDF, gives you effectively the
# regional sensitivity analysis results
data['y'] = self.y # for testing
grid = sns.PairGrid(data=data, hue='y', vars=columns)
grid.map_lower(plt.scatter, s=5)
grid.map_diag(sns.kdeplot, shade=True)
grid.add_legend()
contour_levels = np.arange(0, 1.05, 0.05)
for i, j in zip(*np.triu_indices_from(grid.axes, 1)):
ax = grid.axes[i, j]
ylabel = columns[i]
xlabel = columns[j]
contours(ax, model, xlabel, ylabel, contour_levels)
levels = [0, threshold, 1]
for i, j in zip(*np.tril_indices_from(grid.axes, -1)):
ax = grid.axes[i, j]
ylabel = columns[i]
xlabel = columns[j]
contours(ax, model, xlabel, ylabel, levels)
fig = plt.gcf()
return fig
开发者ID:quaquel,项目名称:EMAworkbench,代码行数:57,代码来源:logistic_regression.py
示例17: WishartBartlett
def WishartBartlett(name, S, nu, is_cholesky=False, return_cholesky=False):
"""
Bartlett decomposition of the Wishart distribution. As the Wishart
distribution requires the matrix to be symmetric positive semi-definite
it is impossible for MCMC to ever propose acceptable matrices.
Instead, we can use the Barlett decomposition which samples a lower
diagonal matrix. Specifically:
If L ~ [[sqrt(c_1), 0, ...],
[z_21, sqrt(c_1), 0, ...],
[z_31, z32, sqrt(c3), ...]]
with c_i ~ Chi²(n-i+1) and n_ij ~ N(0, 1), then
L * A * A.T * L.T ~ Wishart(L * L.T, nu)
See http://en.wikipedia.org/wiki/Wishart_distribution#Bartlett_decomposition
for more information.
:Parameters:
S : ndarray
p x p positive definite matrix
Or:
p x p lower-triangular matrix that is the Cholesky factor
of the covariance matrix.
nu : int
Degrees of freedom, > dim(S).
is_cholesky : bool (default=False)
Input matrix S is already Cholesky decomposed as S.T * S
return_cholesky : bool (default=False)
Only return the Cholesky decomposed matrix.
:Note:
This is not a standard Distribution class but follows a similar
interface. Besides the Wishart distribution, it will add RVs
c and z to your model which make up the matrix.
"""
L = S if is_cholesky else scipy.linalg.cholesky(S)
diag_idx = np.diag_indices_from(S)
tril_idx = np.tril_indices_from(S, k=-1)
n_diag = len(diag_idx[0])
n_tril = len(tril_idx[0])
c = tt.sqrt(ChiSquared('c', nu - np.arange(2, 2+n_diag), shape=n_diag))
print('Added new variable c to model diagonal of Wishart.')
z = Normal('z', 0, 1, shape=n_tril)
print('Added new variable z to model off-diagonals of Wishart.')
# Construct A matrix
A = tt.zeros(S.shape, dtype=np.float32)
A = tt.set_subtensor(A[diag_idx], c)
A = tt.set_subtensor(A[tril_idx], z)
# L * A * A.T * L.T ~ Wishart(L*L.T, nu)
if return_cholesky:
return Deterministic(name, tt.dot(L, A))
else:
return Deterministic(name, tt.dot(tt.dot(tt.dot(L, A), A.T), L.T))
开发者ID:2php,项目名称:pymc3,代码行数:57,代码来源:multivariate.py
示例18: full_corrs
def full_corrs(data):
"""Same- and cross-team correlations.
Same-team correlations are above the diagonal;
cross-team correlations are on and below the diagonal.
"""
corr = same_team_corrs(data)
tril_ixs = np.tril_indices_from(corr)
corr.values[tril_ixs] = cross_team_corrs(data).values[tril_ixs]
return corr
开发者ID:hsharrison,项目名称:nfldata,代码行数:10,代码来源:stats.py
示例19: _update
def _update(self):
self.parameters = self.get_parameters()
for k in range(self.num_comp):
for j in range(self.num_process):
temp = np.zeros((self.num_dim, self.num_dim))
temp[np.tril_indices_from(temp)] = self.L_flatten[k,j,:].copy()
temp[np.diag_indices_from(temp)] = np.exp(temp[np.diag_indices_from(temp)])
# temp[np.diag_indices_from(temp)] = temp[np.diag_indices_from(temp)] ** 2
self.L[k,j,:,:] = temp
self.s[k,j] = mdot(self.L[k,j,:,:], self.L[k,j,:,:].T)
开发者ID:jfutoma,项目名称:savigp,代码行数:10,代码来源:mog_single_comp.py
示例20: transform_eye_grad
def transform_eye_grad(self):
"""
In the case of posterior distribution with one component, gradients of the
entropy term wrt to the posterior covariance is identity. This function returns flatten lower-triangular terms
of the identity matrices for all processes.
"""
grad = np.empty((self.num_comp, self.num_process, self.get_sjk_size()))
meye = np.eye((self.num_dim))[np.tril_indices_from(self.L[0,0])]
for k in range(self.num_comp):
for j in range(self.num_process):
grad[k,j] = meye
return grad.flatten()
开发者ID:jfutoma,项目名称:savigp,代码行数:12,代码来源:mog_single_comp.py
注:本文中的numpy.tril_indices_from函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论