本文整理汇总了Python中pymc3.summary函数的典型用法代码示例。如果您正苦于以下问题:Python summary函数的具体用法?Python summary怎么用?Python summary使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了summary函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: run
def run(n=5000):
with model_1:
xstart = pm.find_MAP()
xstep = pm.Slice()
trace = pm.sample(5000, xstep, xstart, random_seed=123, progressbar=True)
pm.summary(trace)
开发者ID:21hub,项目名称:pymc3,代码行数:7,代码来源:lightspeed_example.py
示例2: test_value_n_eff_rhat
def test_value_n_eff_rhat(self):
mu = -2.1
tau = 1.3
with Model():
Normal('x0', mu, tau, testval=floatX_array(.1)) # 0d
Normal('x1', mu, tau, shape=2, testval=floatX_array([.1, .1]))# 1d
Normal('x2', mu, tau, shape=(2, 2),
testval=floatX_array(np.tile(.1, (2, 2))))# 2d
Normal('x3', mu, tau, shape=(2, 2, 3),
testval=floatX_array(np.tile(.1, (2, 2, 3))))# 3d
trace = pm.sample(100, step=pm.Metropolis())
for varname in trace.varnames:
# test effective_n value
n_eff = pm.effective_n(trace, varnames=[varname])[varname]
n_eff_df = np.asarray(
pm.summary(trace, varnames=[varname])['n_eff']
).reshape(n_eff.shape)
npt.assert_equal(n_eff, n_eff_df)
# test Rhat value
rhat = pm.gelman_rubin(trace, varnames=[varname])[varname]
rhat_df = np.asarray(
pm.summary(trace, varnames=[varname])['Rhat']
).reshape(rhat.shape)
npt.assert_equal(rhat, rhat_df)
开发者ID:alexander-belikov,项目名称:pymc3,代码行数:25,代码来源:test_stats.py
示例3: test_summary_1d_variable_model
def test_summary_1d_variable_model():
mu = -2.1
tau = 1.3
with Model() as model:
x = Normal('x', mu, tau, shape=2, testval=[.1, .1])
step = Metropolis(model.vars, np.diag([1.]), blocked=True)
trace = pm.sample(100, step=step)
pm.summary(trace)
开发者ID:jameshensman,项目名称:pymc3,代码行数:8,代码来源:test_stats.py
示例4: test_summary_2d_variable_model
def test_summary_2d_variable_model(self):
mu = -2.1
tau = 1.3
with Model() as model:
Normal('x', mu, tau, shape=(2, 2),
testval=floatX_array(np.tile(.1, (2, 2))))
step = Metropolis(model.vars, np.diag([1.]), blocked=True)
trace = pm.sample(100, step=step)
pm.summary(trace)
开发者ID:springcoil,项目名称:pymc3,代码行数:9,代码来源:test_stats.py
示例5: __init__
def __init__(self,X_train,y_train,n_hidden,lam=1):
n_train = y_train.shape[0]
n_dim = X_train.shape[1]
print X_train.shape
with pm.Model() as rbfnn:
C = pm.Normal('C',mu=0,sd=10,shape=(n_hidden))
#beta = pm.Gamma('beta',1,1)
w = pm.Normal('w',mu=0,sd=10,shape=(n_hidden+1))
#component, updates = theano.scan(fn=lambda x: T.sum(C-x)**2,sequences=[X_train])
y_out=[]
for x in X_train:
#rbf_out = T.exp(-lam*T.sum((C-x)**2,axis=1))
#1d speed up
rbf_out = T.exp(-lam*(C-x)**2)
#rbf_out = theano.printing.Print(rbf_out)
rbf_out_biased = \
T.concatenate([ rbf_out, T.alloc(1,1) ], 0)
y_out.append(T.dot(w,rbf_out_biased))
y = pm.Normal('y',mu=y_out,sd=0.01,observed=y_train)
start = pm.find_MAP(fmin=scipy.optimize.fmin_l_bfgs_b)
print start
step = pm.NUTS(scaling=start)
trace = pm.sample(2000, step, progressbar=False)
step = pm.NUTS(scaling=trace[-1])
trace = pm.sample(20000,step,start=trace[-1])
print summary(trace, vars=['C', 'w'])
vars = trace.varnames
for i, v in enumerate(vars):
for d in trace.get_values(v, combine=False, squeeze=False):
d=np.squeeze(d)
with open(str(v)+".txt","w+") as thefile:
for item in d:
print>>thefile, item
traceplot(trace)
plt.show()
开发者ID:jshe857,项目名称:thesis-rbfnn,代码行数:42,代码来源:MC_net.py
示例6: run
def run(n=1500):
if n == 'short':
n = 50
with m:
trace = pm.sample(n)
pm.traceplot(trace, varnames=['mu_hat'])
print('Example observed data: ')
print(y[:30, :].T)
print('The true ranking is: ')
print(yreal.flatten())
print('The Latent mean is: ')
latentmu = np.hstack(([0], pm.summary(trace, varnames=['mu_hat'])['mean'].values))
print(np.round(latentmu, 2))
print('The estimated ranking is: ')
print(np.argsort(latentmu))
开发者ID:aloctavodia,项目名称:pymc3,代码行数:18,代码来源:rankdata_ordered.py
示例7: print
import pymc3 as pm
import seaborn as sn
import matplotlib.pyplot as plt
with pm.Model() as model:
uniform = pm.Uniform('uniform', lower=0, upper=1)
normal = pm.Normal('normal', mu=0, sd=1)
beta = pm.Beta('beta', alpha=0.5, beta=0.5)
exponential = pm.Exponential('exponential', 1.0)
trace = pm.sample(2000)
print(pm.summary(trace).round(2))
pm.traceplot(trace)
plt.show()
开发者ID:yaochitc,项目名称:learning_libraries,代码行数:16,代码来源:continuous.py
示例8: get_garch_model
}
"""
def get_garch_model():
r = np.array([28, 8, -3, 7, -1, 1, 18, 12], dtype=np.float64)
sigma1 = np.array([15, 10, 16, 11, 9, 11, 10, 18], dtype=np.float64)
alpha0 = np.array([10, 10, 16, 8, 9, 11, 12, 18], dtype=np.float64)
shape = r.shape
with Model() as garch:
alpha1 = Uniform('alpha1', 0., 1., shape=shape)
beta1 = Uniform('beta1', 0., 1 - alpha1, shape=shape)
mu = Normal('mu', mu=0., sd=100., shape=shape)
theta = tt.sqrt(alpha0 + alpha1 * tt.pow(r - mu, 2) +
beta1 * tt.pow(sigma1, 2))
Normal('obs', mu, sd=theta, observed=r)
return garch
def run(n=1000):
if n == "short":
n = 50
with get_garch_model():
tr = sample(n, tune=1000)
return tr
if __name__ == '__main__':
summary(run())
开发者ID:alexander-belikov,项目名称:pymc3,代码行数:30,代码来源:garch_example.py
示例9: mixed_effects
#.........这里部分代码省略.........
)
# Dependent Variable
BoundedNegativeBinomial = pm.Bound(pm.NegativeBinomial, lower=1)
y_est = BoundedNegativeBinomial('y_est', mu=mu, alpha=alpha, observed=y)
y_pred = BoundedNegativeBinomial('y_pred', mu=mu, alpha=alpha, shape=y.shape)
# y_est = pm.NegativeBinomial('y_est', mu=mu, alpha=alpha, observed=y)
# y_pred = pm.NegativeBinomial('y_pred', mu=mu, alpha=alpha, shape=y.shape)
# y_est = pm.Poisson('y_est', mu=mu, observed=data)
# y_pred = pm.Poisson('y_pred', mu=mu, shape=data.shape)
start = pm.find_MAP()
step = pm.Metropolis(start=start)
# step = pm.NUTS()
# backend = pm.backends.Text('test')
# trace = pm.sample(NSamples, step, start=start, chain=1, njobs=2, progressbar=True, trace=backend)
trace = pm.sample(NSamples, step, start=start, njobs=1, progressbar=True)
trace2 = trace
trace = trace[-burn::thin]
# waic = pm.waic(trace)
# dic = pm.dic(trace)
# with pm.Model() as model:
# trace_loaded = pm.backends.sqlite.load('FF49_industry.sqlite')
# y_pred.dump('FF49_industry_missing/y_pred')
## POSTERIOR PREDICTIVE CHECKS
y_pred = trace.get_values('y_pred')
pm.summary(trace, vars=covariates)
# PARAMETER POSTERIORS
anno_kwargs = {'xycoords': 'data', 'textcoords': 'offset points',
'rotation': 90, 'va': 'bottom', 'fontsize': 'large'}
anno_kwargs2 = {'xycoords': 'data', 'textcoords': 'offset points',
'rotation': 0, 'va': 'bottom', 'fontsize': 'large'}
n0, n1, n2, n3 = 1, 5, 9, 14 # numbering for posterior plots
# intercepts
# mn = pm.df_summary(trace)['mean']['Intercept_log__0']
# ax[0,0].annotate('{:.3f}'.format(mn), xy=(mn,0), xytext=(0,15), color=blue, **anno_kwargs2)
# mn = pm.df_summary(trace)['mean']['Intercept_log__1']
# ax[0,0].annotate('{:.3f}'.format(mn), xy=(mn,0), xytext=(0,15), color=purple, **anno_kwargs2)
# coeffs
# mn = pm.df_summary(trace)['mean'][2]
# ax[1,0].annotate('{:.3f}'.format(mn), xy=(mn,0), xytext=(5, 10), color=red, **anno_kwargs)
# mn = pm.df_summary(trace)['mean'][3]
# ax[2,0].annotate('{:.3f}'.format(mn), xy=(mn,0), xytext=(5,10), color=red, **anno_kwargs)
# mn = pm.df_summary(trace)['mean'][4]
# ax[3,0].annotate('{:.3f}'.format(mn), xy=(mn,0), xytext=(5,10), color=red, **anno_kwargs)
# plt.savefig('figure1_mixed.png')
ax = pm.traceplot(trace, vars=['Intercept']+trace.varnames[n0:n1],
lines={k: v['mean'] for k, v in pm.df_summary(trace).iterrows()}
)
for i, mn in enumerate(pm.df_summary(trace)['mean'][n0:n1]): # +1 because up and down intercept
ax[i,0].annotate('{:.3f}'.format(mn), xy=(mn,0), xytext=(5,10), color=red, **anno_kwargs)
plt.savefig('figure1_mixed.png')
开发者ID:peitalin,项目名称:CoarseClocks,代码行数:66,代码来源:bayesempirics.py
示例10: posterior_summary
def posterior_summary(self, **kwargs):
return pm.summary(self.posterior_, **kwargs)
开发者ID:eric-czech,项目名称:portfolio,代码行数:2,代码来源:models.py
示例11: print
with mdl_ols:
## find MAP using Powell, seems to be more robust
t1 = time.time()
start_MAP = pm.find_MAP(fmin=optimize.fmin_powell)
t2 = time.time()
print("Found MAP, took %f seconds" % (t2 - t1))
## take samples
t1 = time.time()
traces_ols = pm.sample(2000, start=start_MAP, step=pm.NUTS(), progressbar=True)
print()
t2 = time.time()
print("Done sampling, took %f seconds" % (t2 - t1))
pm.summary(traces_ols)
## plot the samples and the marginal distributions
_ = pm.traceplot(
traces_ols,
figsize=(12, len(traces_ols.varnames) * 1.5),
lines={k: v["mean"] for k, v in pm.df_summary(traces_ols).iterrows()},
)
plt.show()
do_tstudent = False
if do_tstudent:
print("Robust Student-t analysis...")
开发者ID:iastro-pt,项目名称:Bayes-IA,代码行数:30,代码来源:linear_regression_outliers.py
示例12: print
print(map_estimate)
from pymc3 import NUTS, sample
from pymc3 import traceplot
with basic_model:
# obtain starting values via MAP
start = find_MAP(fmin=optimize.fmin_powell)
# instantiate sampler
step = NUTS(scaling=start)
# draw 2000 posterior samples
trace = sample(2000, step, start=start)
trace['alpha'][-5:]
traceplot(trace)
plt.show()
from pymc3 import summary
summary(trace)
n = 500
p = 0.3
with Model():
x = Normal('alpha', mu=0, sd=10)
print type(x)
开发者ID:shidanxu,项目名称:Meng-Finale,代码行数:31,代码来源:mengpymc.py
示例13: print
else:
fit_results = np.array([out.values['decay']*delta_t,
np.sqrt(out.covar[0,0])*delta_t,
out.values['amplitude'],
np.sqrt(out.covar[1,1])])
print(out.fit_report(min_correl=0.25))
trace = sm.run(x=data,
aB=alpha_B,
bB=beta_B,
aA=alpha_A,
bA=beta_A,
delta_t=delta_t,
N=N)
pm.summary(trace)
traceB_results = np.percentile(trace['B'],(2.5,25,50,75,97.5))
traceB_results = np.concatenate((traceB_results, [np.std(trace['B'])], [np.mean(trace['B'])]))
traceA_results=np.percentile(trace['A'],(2.5,25,50,75,97.5))
traceA_results = np.concatenate((traceA_results, [np.std(trace['A'])], [np.mean(trace['A'])]))
results = np.concatenate((data_results, fit_results, traceB_results, traceA_results))
print(results)
if result_array is None:
result_array = results
else:
result_array = np.vstack((result_array, results))
开发者ID:hstrey,项目名称:Bayesian-Analysis,代码行数:31,代码来源:bayesian_mapping_BA.py
示例14: run
def run(n=5000):
with model_1:
trace = pm.sample(n)
pm.summary(trace)
开发者ID:aloctavodia,项目名称:pymc3,代码行数:5,代码来源:lightspeed_example.py
示例15: two_gaussians
log_like2 = - 0.5 * n * tt.log(2 * np.pi) \
- 0.5 * tt.log(dsigma) \
- 0.5 * (x - mu2).T.dot(isigma).dot(x - mu2)
return tt.log(w1 * tt.exp(log_like1) + w2 * tt.exp(log_like2))
with pm.Model() as ATMIP_test:
X = pm.Uniform('X',
shape=n,
lower=-2. * np.ones_like(mu1),
upper=2. * np.ones_like(mu1),
testval=-1. * np.ones_like(mu1),
transform=None)
like = pm.Deterministic('like', two_gaussians(X))
llk = pm.Potential('like', like)
with ATMIP_test:
step = atmcmc.ATMCMC(n_chains=n_chains, tune_interval=tune_interval,
likelihood_name=ATMIP_test.deterministics[0].name)
trcs = atmcmc.ATMIP_sample(
n_steps=n_steps,
step=step,
njobs=njobs,
progressbar=True,
trace=test_folder,
model=ATMIP_test)
pm.summary(trcs)
Pltr = pm.traceplot(trcs, combined=True)
plt.show(Pltr[0][0])
开发者ID:21hub,项目名称:pymc3,代码行数:30,代码来源:ATMIP_2gaussians.py
示例16: real_func
trace = mc.sample(nsamples, step=step, start=start, njobs=self.njobs, trace=backend)
return trace
if __name__ == "__main__":
def real_func():
x = np.linspace(0.01, 1.0, 10)
f = x + np.random.randn(len(x))*0.01
return f
def model_func(beta):
x = np.linspace(0.01, 1.0, 10)
f = beta
return f
data = real_func()
tau_obs = np.eye(10)/.01**2
tau_prior = np.eye(10)/1.0**2
beta_prior = np.ones_like(data)*1.0
beta_map = np.linspace(0.01, 1.0, 10) + np.random.randn(10)*0.1
sampler = MCMCSampler(model_func, data, tau_obs, beta_prior, tau_prior, beta_map, is_cov=False, method=None)
trace = sampler.sample(2000)
mc.summary(trace)
mc.traceplot(trace)
plt.figure()
plt.plot(beta_map, label='ACTUAL')
plt.plot(np.mean(trace['beta'][:,:], axis=0), label='MCMC')
plt.show()
开发者ID:anandpratap,项目名称:inverse_toy_problems,代码行数:30,代码来源:mcmc.py
示例17: runModel
#.........这里部分代码省略.........
obstype : observed type, SN Ia=0, SNII=1 Marginalized over
Luminosity :
"""
if observation['spectype'][i] == -1 :
logluminosity = LogLuminosityMarginalizedOverType('logluminosity'+str(i),
mus=[logL_snIa, logL_snII], \
sds = [numpy.log(10)/2.5*sigma_snIa,numpy.log(10)/2.5*sigma_snII], p=prob, \
testval = 1.)
else:
if observation['spectype'][i] == 0:
usemu = logL_snIa
usesd = numpy.log(10)/2.5*sigma_snIa
usep = prob
else:
usemu = logL_snII
usesd = numpy.log(10)/2.5*sigma_snII
usep = 1-prob
logluminosity = LogLuminosityGivenSpectype('logluminosity'+str(i), \
mu=usemu,sd=usesd, p=usep)
luminosity = T.exp(logluminosity)
"""
Redshift Node.
Not considered explicitly in our model.
"""
"""
Observed Redshift, Counts Node.
pdf(observed redshift, Counts | Luminosity, Redshift, Cosmology, Calibration)
= pdf(observed redshift| Redshift) *
pdf(Counts | Luminosity, Redshift, Cosmology, Calibration)
The pdf of the observed redshift is assumed to be a sum of delta functions, perfectly
measured redshift of the supernova or redshifts of potential galaxy hosts.
pdf(observed redshift | Redshift) = sum_i p_i delta(observer redshift_i - Redshift)
where p_i is the probability of observer redshift_i being the correct redshift.
so
pdf(observed redshift, Counts | Luminosity, Redshift, Cosmology, Calibration)
= sum_i p_i pdf(Counts | Luminosity, Redshift=observer_redshift_i, Cosmology, Calibration)
The class CountsWithThreshold handles this pdf
Dependencies
------------
luminosity : luminosity
redshift : host redshift
cosmology : cosmology
Calibration : calibration
Parameters
-----------
observed_redshift Marginalized over
counts
"""
lds=[]
fluxes=[]
for z_ in observation['specz'][i]:
# ld = 0.5/h0*(z_+T.sqr(z_))* \
# (1+ 1//T.sqrt((1+z_)**3 * (Om0 + (1-Om0)*(1+z_)**(3*w0))))
ld = luminosity_distance(z_, Om0, w0)
lds.append(ld)
fluxes.append(luminosity/4/numpy.pi/ld**2)
counts = Counts('counts'+str(i),fluxes =fluxes, \
pzs = observation['zprob'][i], Z=zeropoints, observed=observation['counts'][i])
if observation['spectype'][i] == -1 :
pass
else:
normalization=SampleRenormalization('normalization'+str(i), threshold = 1e-9,
logL_snIa=logL_snIa, sigma_snIa=sigma_snIa, logL_snII=logL_snII, sigma_snII=sigma_snII,
luminosity_distances=lds, Z=zeropoints, pzs=observation['zprob'][i], prob=prob, observed=1)
from pymc3 import find_MAP, NUTS, sample, summary
from scipy import optimize
with basic_model:
backend = SQLite('trace.sqlite')
# obtain starting values via MAP
start = find_MAP(fmin=optimize.fmin_bfgs, disp=True)
# draw 2000 posterior samples
trace = sample(500, start=start, trace=backend)
summary(trace)
开发者ID:AlexGKim,项目名称:abc,代码行数:101,代码来源:Nodes.py
示例18: get_garch_model
def get_garch_model():
r = np.array([28, 8, -3, 7, -1, 1, 18, 12])
sigma1 = np.array([15, 10, 16, 11, 9, 11, 10, 18])
alpha0 = np.array([10, 10, 16, 8, 9, 11, 12, 18])
shape = r.shape
with Model() as garch:
alpha1 = Normal('alpha1', mu=np.zeros(shape=shape), sd=np.ones(shape=shape), shape=shape)
BoundedNormal = Bound(Normal, upper=(1 - alpha1))
beta1 = BoundedNormal('beta1',
mu=np.zeros(shape=shape),
sd=1e6 * np.ones(shape=shape),
shape=shape)
mu = Normal('mu', mu=np.zeros(shape=shape), sd=1e6 * np.ones(shape=shape), shape=shape)
theta = tt.sqrt(alpha0 + alpha1 * tt.pow(r - mu, 2) +
beta1 * tt.pow(sigma1, 2))
Normal('obs', mu, sd=theta, observed=r)
return garch
def run(n=1000):
if n == "short":
n = 50
with get_garch_model():
tr = sample(n, n_init=10000)
return tr
if __name__ == '__main__':
print(summary(run()))
开发者ID:hstm,项目名称:pymc3,代码行数:30,代码来源:garch_example.py
示例19: load_australian_credit
import theano.tensor as T
from load_data import load_australian_credit, load_german_credit, load_heart, load_pima_indian
import pymc3 as pm
import numpy as np
from pymc3 import summary
from pymc3 import traceplot
germanData, germanLabel = load_australian_credit()
# germanData, germanLabel = load_pima_indian()
# normalize to let each dimension have mean 1 and std 0
g_mean = np.mean(germanData, axis=0)
g_std = np.std(germanData, axis=0)
germanData = (germanData - g_mean) / g_std
with pm.Model() as model:
alpha = pm.Normal("alpha_pymc3", mu=0.0, tau=1e-2)
beta = pm.Normal("beta_pymc3", mu=0.0, tau=1e-2, shape=14) # for australian data, it has 14 predictors
y_hat_prob = 1.0 / (1.0 + T.exp(-(T.sum(beta * germanData, axis=1) + alpha)))
yhat = pm.Bernoulli("yhat", y_hat_prob, observed=germanLabel)
trace = pm.sample(10000, pm.NUTS())
trace1 = trace[5000:] # get rid of the burn-in samples
summary(trace1)
traceplot(trace1)
alpha_mean = np.mean(trace1["alpha_pymc3"])
beta_mean = np.mean(trace1["beta_pymc3"], axis=0)
param_mean = (np.sum(alpha_mean) + np.sum(beta_mean)) / 15.0
print " the overall mean of the parameters: ", param_mean
开发者ID:hthth0801,项目名称:HMC_sample,代码行数:30,代码来源:pyMC3_getSamples.py
示例20:
Hans_Model = pm.Model()
with Hans_Model:
# Define prior
alpha = pm.Normal('alpha_est',mu=0,sd=10)
beta = pm.Normal('beta_est',mu=0,sd=10,shape=2)
sigma=pm.HalfNormal('sigma_est',sd=1)
# Model parameter
mu = alpha + beta[0]*X1 + beta[1]*X2
# Likelihood
Y_rv = pm.Normal('Y_rv',mu=mu,sd=sigma,observed=Y)
''' Model fitting'''
with Hans_Model:
# step = pm.Metropolis(vars=[alpha,beta,sigma])
param_MAP = pm.find_MAP(fmin = sp.optimize.fmin_powell)
Method = pm.Slice(vars=[alpha,beta,sigma])
trace = pm.sample(Niter,step=Method,start=param_MAP)
pm.traceplot(trace)
print pm.summary(trace)
plt.show()
#
# plt.plot(trace['alpha_est'])
# print pm.summary(trace)
# plt.show()
开发者ID:HansJung,项目名称:ML_Implementation,代码行数:31,代码来源:Practice_pyMC2.py
注:本文中的pymc3.summary函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论