本文整理汇总了Python中pymc.sample函数的典型用法代码示例。如果您正苦于以下问题:Python sample函数的具体用法?Python sample怎么用?Python sample使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了sample函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: run_DREAM
def run_DREAM(self,nsamples=100000):
model = pymc.Model()
with model:
params = pymc.Normal('params', mu=self.start_parameters,
sd=np.array([1.0
]*len(self.start_parameters)),
shape=(len(self.start_parameters)))
#params = pymc.Flat('params',shape=(len(self.start_parameters)))
global cost_function
cost_function = self.cost_function
error = pymc.Potential('error', DREAM_cost(params))
nseedchains = 10*len(self.model.parameters_rules())
step = pymc.Dream(variables=[params],
nseedchains=nseedchains,
blocked=True,
start_random=False,
save_history=True,
parallel=True,
adapt_crossover=False,
verbose=False,)
trace = pymc.sample(nsamples, step,
start=self.pso_results,
njobs=self.nchains,
use_mpi=False,
progressbar=False,)
cont_flag = True
while cont_flag:
cont_flag = False
conv_stats = gelman_rubin(trace)
for i in conv_stats['params']:
if i>1.2:
print "Parameters have not converged, will continue run."
print "Value so far is %s"%i
cont_flag = True
break
trace = pymc.sample(int(nsamples*.1), step,
#start=self.pso_results,
njobs=self.nchains,
use_mpi=False,
trace = trace,
progressbar=False,)
conv_stats = gelman_rubin(trace)
for i in conv_stats['params']:
print i,i<1.2
#pymc.traceplot(trace,vars=[params,error])
#plt.show()
return trace
开发者ID:LoLab-VU,项目名称:NightMare,代码行数:52,代码来源:nightmare.py
示例2: bayesian_random_effects
def bayesian_random_effects(data, labels, group, n_samples=2000, n_burnin=500):
import pymc as pm
#preparing the data
donors = data[group].unique()
donors_lookup = dict(zip(donors, range(len(donors))))
data['donor_code'] = data[group].replace(donors_lookup).values
n_donors = len(data[group].unique())
donor_idx = data['donor_code'].values
#setting up the model
with pm.Model() as hierarchical_model:
# Hyperpriors for group nodes
group_intercept_mean = pm.Normal('group intercept (mean)', mu=0., sd=100**2)
group_intercept_variance = pm.Uniform('group intercept (variance)', lower=0, upper=100)
group_slope_mean = pm.Normal('group slope (mean)', mu=0., sd=100**2)
group_slope_variance = pm.Uniform('group slope (variance)', lower=0, upper=100)
individual_intercepts = pm.Normal('individual intercepts', mu=group_intercept_mean, sd=group_intercept_variance, shape=n_donors)
individual_slopes = pm.Normal('individual slopes', mu=group_slope_mean, sd=group_slope_variance, shape=n_donors)
# Model error
residuals = pm.Uniform('residuals', lower=0, upper=100)
expression_est = individual_slopes[donor_idx] * data[labels[0]].values + individual_intercepts[donor_idx]
# Data likelihood
expression_like = pm.Normal('expression_like', mu=expression_est, sd=residuals, observed=data[labels[1]])
start = pm.find_MAP()
step = pm.NUTS(scaling=start)
hierarchical_trace = pm.sample(n_samples, step, start=start, progressbar=True)
mean_slope = hierarchical_trace['group slope (mean)'][n_burnin:].mean()
zero_percentile = percentileofscore(hierarchical_trace['group slope (mean)'][n_burnin:], 0)
#print "Mean group level slope was %g (zero was %g percentile of the posterior distribution)"%(mean_slope, zero_percentile)
#pm.summary(hierarchical_trace[n_burnin:], vars=['group slope (mean)'])
#pm.traceplot(hierarchical_trace[n_burnin:])
#selection = donors
#fig, axis = plt.subplots(2, 3, figsize=(12, 6), sharey=True, sharex=True)
#axis = axis.ravel()
#xvals = np.linspace(data[labels[0]].min(), data[labels[0]].max())
#for i, c in enumerate(selection):
# c_data = data.ix[data[group] == c]
# c_data = c_data.reset_index(drop = True)
# z = list(c_data['donor_code'])[0]
# for a_val, b_val in zip(hierarchical_trace['individual intercepts'][n_burnin::10][z], hierarchical_trace['individual slopes'][n_burnin::10][z]):
# axis[i].plot(xvals, a_val + b_val * xvals, 'g', alpha=.1)
# axis[i].plot(xvals, hierarchical_trace['individual intercepts'][n_burnin:][z].mean() + hierarchical_trace['individual slopes'][n_burnin:][z].mean() * xvals,
# 'g', alpha=1, lw=2.)
# axis[i].hexbin(c_data[labels[0]], c_data[labels[1]], mincnt=1, cmap=plt.cm.YlOrRd_r)
# axis[i].set_title(c)
# axis[i].set_xlabel(labels[0])
# axis[i].set_ylabel(labels[1])
#
#plt.show()
return mean_slope, zero_percentile
开发者ID:Conxz,项目名称:alleninf,代码行数:60,代码来源:gewa_analysis.py
示例3: get_traces_hierarchical
def get_traces_hierarchical(x, y, idxs, max_iter=100000):
""" sample hierarchical model """
idx_size = len(np.unique(idxs))
with pm.Model() as hierarchical_model:
# hyperpriors for group nodes, all uninformative
alpha_mu = pm.Normal('alpha_mu', mu=0., sd=100**2)
alpha_sigma = pm.Uniform('alpha_sigma', lower=0, upper=100)
beta_mu = pm.Normal('beta_mu', mu=0., sd=100**2)
beta_sigma = pm.Uniform('beta_sigma', lower=0, upper=100)
# Intercept for each testtype, distributed around group mean mu_a
# Above mu & sd are fixed value while below we plug in a common
# group distribution for all a & b (which are vectors of length idx_size).
# priors for alpha, beta and model error, uninformative
alpha = pm.Normal('alpha', mu=alpha_mu, sd=alpha_sigma, shape=idx_size)
beta = pm.Normal('beta', mu=beta_mu, sd=beta_sigma, shape=idx_size)
epsilon = pm.Uniform('epsilon', lower=0, upper=100)
# hierarchical linear model
y_est = alpha[idxs] + beta[idxs] * x
likelihood = pm.Normal('likelihood', mu=y_est, sd=epsilon, observed=y)
traces = pm.sample(max_iter, step=pm.Metropolis()
,start=pm.find_MAP(), progressbar=True)
return traces
开发者ID:tcarnus,项目名称:suas_literacy,代码行数:27,代码来源:02_InitialExploration.py
示例4: MCMC
def MCMC(model):
import time
with model:
n = 6000
START = time.time()
try:
start = pm.find_MAP()
except AssertionError:
return model, {'error':'AssertionError in pm.find_MAP()'}
init_time = time.time()-START
print 'Time to initialize: %ds' % (init_time)
START = time.time()
trace = pm.sample(n,pm.Metropolis(),start)
duration = time.time()-START
print 'Time to sample (MH): %ds' % (duration)
# START = time.time()
# trace = pm.sample(n,pm.Slice(),start)
# print 'Time to sample (Slice): %ds' % (time.time()-START)
# START = time.time()
# trace = pm.sample(n,pm.HamiltonianMC(),start)
# print 'Time to sample (HMC): %ds' % (time.time()-START)
# error_b, error_x, output = error(trace,model.data.A,model.data.x_true,
# model.data.b_obs,model.data.scaling)
# fig = pm.traceplot(trace)
# plot(error_b,error_x)
# plt.show()
return model, trace, init_time, duration
开发者ID:megacell,项目名称:traffic-estimation-bayesian,代码行数:32,代码来源:grid_simulation.py
示例5: poisson_regression
def poisson_regression(targets, predictors, iters=2000):
""" Return the posterior of a Bayesian Poisson regression model.
This function takes the targets and predictors and builds a Poisson
regression model. The predictor coefficients are found by sampling
from the posterior using PyMC (NUTS in particular).
The posterior is returned as an MxN array, where M is the number of
samples and N is the number of predictors. The first column is the
coefficient for the first predictor and so on.
Requires PyMC3
"""
with pm.Model() as poisson_model:
# priors for coefficients
coeffs = pm.Uniform('coeffs', -10, 10, shape=(1, predictors.shape[1]))
p = t.exp(pm.sum(coeffs*predictors.values, 1))
obs = pm.Poisson('obs', p, observed=targets)
start = pm.find_MAP()
step = pm.NUTS(scaling=start)
poisson_trace = pm.sample(iters, step, start=start, progressbar=False)
return poisson_trace['coeffs'].squeeze()
开发者ID:mcleonard,项目名称:memory,代码行数:27,代码来源:bayes.py
示例6: test_summary_1d_variable_model
def test_summary_1d_variable_model():
mu = -2.1
tau = 1.3
with Model() as model:
x = Normal('x', mu, tau, shape=2, testval=[.1, .1])
step = Metropolis(model.vars, np.diag([1.]))
trace = pm.sample(100, step=step)
pm.summary(trace)
开发者ID:Isilendil,项目名称:pymc,代码行数:8,代码来源:test_stats.py
示例7: test_summary_0d_variable_model
def test_summary_0d_variable_model():
mu = -2.1
tau = 1.3
with Model() as model:
x = Normal('x', mu, tau, testval=.1)
step = Metropolis(model.vars, np.diag([1.]), blocked=True)
trace = pm.sample(100, step=step)
pm.summary(trace)
开发者ID:17patelumang,项目名称:pymc,代码行数:8,代码来源:test_stats.py
示例8: foo
def foo(self, discrete):
student_ids = []
timestep_ids = []
y = []
ids = collections.defaultdict(itertools.count().next)
for t in range(0, len(self)):
student_ids += [ids[o.id] for o in self[t]]
timestep_ids += [t for o in self[t]]
y += [o.value for o in self[t]]
n_students = len(set(student_ids))
n_timesteps = len(self)
print student_ids, "!", n_students
with pm.Model() as hierarchical_model:
# Hyperpriors for group nodes
mu_student = pm.Normal('mu_student', mu=0., sd=100**2)
sigma_student = pm.Uniform('sigma_student', lower=0, upper=100)
#mu_timestep = pm.Normal('mu_beta', mu=0., sd=100**2)
#sigma_timestep = pm.Uniform('sigma_beta', lower=0, upper=100)
student = pm.Normal('student', mu=mu_student, sd=sigma_student, shape=n_students) #random effect
timestep = pm.Normal('timestep', mu=0, sd=100**2, shape=n_timesteps) #fixed effect
# Model error
eps = pm.Uniform('eps', lower=0, upper=100)
theta = student[student_ids] + timestep[timestep_ids]
# Data likelihood
if discrete:
ll = pm.Bernoulli('theta', p=self.invlogit(theta), observed=y)
else:
ll = pm.Normal('theta', mu=theta, sd=eps, observed=y)
with hierarchical_model:
print "Find MAP..."
start = pm.find_MAP()
#if discrete:
# step = pm.BinaryMetropolis(scaling=start)
# else:
print "NUTS..."
step = pm.NUTS(scaling=start)
print "Samples..."
hierarchical_trace = pm.sample(2000, step, start=start, progressbar=False)
print "done..."
print "Plot..."
pl.figure(figsize=(10,10))
f = pm.traceplot(hierarchical_trace[500:])
f.savefig("a.png")
return hierarchical_trace
开发者ID:ml-smores,项目名称:confident,代码行数:56,代码来源:estimator.py
示例9: test_plots_multidimensional
def test_plots_multidimensional():
# Test single trace
from .models import multidimensional_model
start, model, _ = multidimensional_model()
with model as model:
h = np.diag(find_hessian(start))
step = Metropolis(model.vars, h)
trace = sample(3000, step, start)
traceplot(trace)
开发者ID:Jakobularius,项目名称:pymc,代码行数:12,代码来源:test_plots.py
示例10: run_sig
def run_sig():
signal_responses = binom.rvs(100, 0.69, size=1)
noise_responses = binom.rvs(100, 0.30, size=1)
m = sig_detect(signal_responses, noise_responses, 1, 100)
with m:
#step = pm.Metropolis(blocked=False)
step = pm.HamiltonianMC()
start = pm.find_MAP()
#start = {'Pr. mean discrim.':0.0, 'Pr. mean bias':0.0,
# 'taud':0.001, 'tauc':0.001}
trace = pm.sample(5000, step, start, tune=500, njobs=2)
return trace[1000:]
开发者ID:nwilming,项目名称:mcmodels,代码行数:12,代码来源:model.py
示例11: run_pl
def run_pl():
x = arange(180)
x = concatenate((x,x,x,x))
xx = piecewise_predictor(x, 90, 50, 1, 2)
y = xx + 20*(np.random.randn(len(x)))
y = y-y.mean()
m = piecewise_linear(y,x)
with m:
step = pm.Metropolis()
#step = pm.NUTS()
trace = pm.sample(2000, step, {}, tune=50, njobs=1, progressbar=True)
return trace
开发者ID:nwilming,项目名称:mcmodels,代码行数:12,代码来源:model.py
示例12: test_multichain_plots
def test_multichain_plots():
from pymc.examples import disaster_model as dm
with dm.model as model:
# Run sampler
step1 = Slice([dm.early_mean, dm.late_mean])
step2 = Metropolis([dm.switchpoint])
start = {'early_mean': 2., 'late_mean': 3., 'switchpoint': 50}
ptrace = sample(1000, [step1, step2], start, njobs=2)
forestplot(ptrace, vars=['early_mean', 'late_mean'])
autocorrplot(ptrace, vars=['switchpoint'])
开发者ID:17patelumang,项目名称:pymc,代码行数:14,代码来源:test_plots.py
示例13: run_fixdur
def run_fixdur():
import cPickle
dur, fa, obs = cPickle.load(open('durtest.pickle'))
m = piecewise_durations(dur, fa, obs-1)
with m:
start = pm.find_MAP() #cPickle.load(open('fixdur_map.pickle'))
step = pm.Metropolis(vars=[m.named_vars['Mean_offset'],
m.named_vars['Mean_slope1'], m.named_vars['Mean_slope2'],
m.named_vars['Mean_split']], blocked=False)
step2 = pm.Metropolis(vars=[m.named_vars['Slope1'],
m.named_vars['Slope2'], m.named_vars['Offsets'], m.named_vars['Breakpoint']])
trace = pm.sample(5000, [step, step2], start, tune=1000, njobs=1,
progressbar=True)
return trace
开发者ID:nwilming,项目名称:mcmodels,代码行数:14,代码来源:model.py
示例14: test_plots
def test_plots():
# Test single trace
from pymc.examples import arbitrary_stochastic as asmod
with asmod.model as model:
start = model.test_point
h = find_hessian(start)
step = Metropolis(model.vars, h)
trace = sample(3000, step, start)
forestplot(trace)
autocorrplot(trace)
开发者ID:DrDark,项目名称:pymc,代码行数:15,代码来源:test_plots.py
示例15: run_banova
def run_banova():
y = 10+hstack((np.random.randn(100),np.random.randn(100)+1,
np.random.randn(100)+2))
y = y-y.mean()
y = y/y.std()
x = concatenate(([1.0]*100,[0.0]*200))
X = vstack((x, np.roll(x,100), np.roll(x,200)))
m = oneway_banova(y.astype(float),X.astype(float))
start = {'offset': 0.0,
'alphas': array([0,1,2.])}
with m:
step = pm.Metropolis()
#step = pm.NUTS()
trace = pm.sample(150000, step, start, tune=1500, njobs=1, progressbar=True)
pm.traceplot(trace[::2])
show()
开发者ID:nwilming,项目名称:mcmodels,代码行数:16,代码来源:model.py
示例16: sample_model
def sample_model(model, steps, tune=None, njobs=1, observed=['Data']):
if tune is None:
tune = steps/2
with model:
start = pm.find_MAP() # cPickle.load(open('fixdur_map.pickle'))
non_blocked_step = pm.Metropolis(
vars=[v for k, v in model.named_vars.iteritems()
if ('Obs_SD' in k) or ('Mean_' in k) and not (k in set(observed))],
blocked=False)
blocked = pm.Metropolis(
vars=[v for k, v in model.named_vars.iteritems()
if not (('Obs_SD' in k) or ('Mean_' in k))
and not (k in set(observed))],
blocked=True)
trace = pm.sample(
steps, [non_blocked_step, blocked], start,
tune=tune, njobs=njobs, progressbar=True)
return trace
开发者ID:nwilming,项目名称:mcmodels,代码行数:18,代码来源:fixdur.py
示例17: sample_model_appc
def sample_model_appc(model, steps, tune=None, njobs=4, observed=['Data']):
if tune is None:
tune = steps / 2
with model:
start = pm.find_MAP()
non_blocked_step = pm.Metropolis(
vars=[v for k, v in model.named_vars.iteritems()
if ('Obs_SD' in k) or ('Mean_' in k) and not (k in set(observed)) and not k.startswith('DNS')],
blocked=False)
blocked = pm.Metropolis(
vars=[v for k, v in model.named_vars.iteritems()
if not (('Obs_SD' in k) or ('Mean_' in k))
and not (k in set(observed)) and not k.startswith('DNS')],
blocked=True)
trace = pm.sample(
steps, [non_blocked_step, blocked], start,
tune=tune, njobs=njobs, progressbar=True)
return trace
开发者ID:nwilming,项目名称:mcmodels,代码行数:18,代码来源:appc.py
示例18: run_best
def run_best():
a = np.random.randn(10)
b = np.random.randn(10)+2
print 'A:', a.mean(), a.std()
print 'B:', b.mean(), b.std()
x_eval = np.linspace(-10,10,100)
m = best('A', a, 'B', b)
start = {'A Mean': b.mean(),
'A Std': a.std(),
'B Mean': a.mean(),
'B Std': b.std(),
'Nu-1': 100}
with m:
step = pm.Metropolis(blocked=False)
trace = pm.sample(10000, step, start, tune=1000, njobs=3, progressbar=False)
pm.traceplot(trace)
show()
return m, trace
开发者ID:nwilming,项目名称:mcmodels,代码行数:19,代码来源:model.py
示例19: test_sample
def test_sample():
model, start, step, _ = simple_init()
test_samplers = [sample]
tr = sample(5, step, start, model=model)
test_traces = [None, tr]
if test_parallel:
test_samplers.append(psample)
with model:
for trace in test_traces:
for samplr in test_samplers:
for n in [0,1, 10, 300]:
yield samplr, n, step, {}
yield samplr, n, step, {}, trace
yield samplr, n, step, start
开发者ID:Jfeng3,项目名称:pymc,代码行数:20,代码来源:test_sampling.py
示例20: get_traces_individual
def get_traces_individual(x, y, max_iter=10000,quad=False):
""" sample individual model """
with pm.Model() as individual_model:
# priors for alpha, beta and model error, uninformative
alpha = pm.Normal('alpha', mu=0, sd=100**2)
beta = pm.Normal('beta', mu=0, sd=100**2)
epsilon = pm.Uniform('epsilon', lower=0, upper=100)
# configure model
y_est = alpha + beta * x
if quad:
gamma = pm.Normal('gamma', mu=0, sd=100**2)
y_est = alpha + beta * x + gamma * x ** 2
# calc likelihood and do sampling
likelihood = pm.Normal('likelihood', mu=y_est, sd=epsilon, observed=y)
traces = pm.sample(max_iter, step=pm.NUTS(), start=pm.find_MAP(), progressbar=True)
return traces
开发者ID:tcarnus,项目名称:suas_literacy,代码行数:22,代码来源:02_InitialExploration.py
注:本文中的pymc.sample函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论