本文整理汇总了Python中pymc.invlogit函数的典型用法代码示例。如果您正苦于以下问题:Python invlogit函数的具体用法?Python invlogit怎么用?Python invlogit使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了invlogit函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: logit_normal_draw
def logit_normal_draw(cf_mean, std, N, J):
std = pl.array(std)
if mc.__version__ == '2.0rc2': # version on Omak
X = [mc.invlogit(mc.rnormal(mu=cf_mean, tau=std**-2)) for n in range(N)]
Y = pl.array(X)
else:
X = mc.rnormal(mu=cf_mean, tau=std**-2, size=(N,J))
Y = mc.invlogit(X)
return Y
开发者ID:aflaxman,项目名称:pymc-cod-correct,代码行数:9,代码来源:data.py
示例2: generate_synthetic_data
def generate_synthetic_data(truth, key, d):
""" create simulated data"""
a0 = d['age_start']
a1 = d['age_end']
age_weights = d['age_weights']
d.update(condition='type_2_diabetes',
year_start=y,
year_end=y)
p0 = dismod3.utils.rate_for_range(truth[key], range(a0, a1 + 1), np.ones(a1 + 1 - a0)/(a1+1-a0))
p0 = dismod3.utils.trim(p0, 1.e-6, 1. - 1.e-6)
# TODO: make beta dispersion study level (instead of datum level)
# p1 = mc.rbeta(p0 * dispersion, (1 - p0) * dispersion)
p1 = p0
# TODO: add additional covariates
if key.find('prevalence') != -1:
if random.random() < .1:
d['self-reported'] = True
p1 = mc.invlogit(mc.logit(p1) - .2)
else:
d['self-reported'] = False
#p2 = mc.rbinomial(n, p1) / n
p2 = float(p1)
d['value'] = p2
d['standard_error'] = .0001
return d
开发者ID:flaxter,项目名称:gbd,代码行数:32,代码来源:generate_covariate_data.py
示例3: simdata_postproc
def simdata_postproc(sp_sub, survey_plan):
"""
This function should take a value for the Gaussian random field in the submodel
sp_sub, evaluated at the survey plan locations, and return a simulated dataset.
"""
p = pm.invlogit(sp_sub)
n = survey_plan.n
return pm.rbinomial(n, p)
开发者ID:apatil,项目名称:survey-eval-example,代码行数:8,代码来源:__init__.py
示例4: p_wells
def p_wells(base_fx=base_fx,
batch_fx=batch_fx,
plate_fx=plate_fx,
batchrow_fx=batchrow_fx,
batchcol_fx=batchcol_fx,
treatment_fx=treatment_fx):
# use this ordering to make everything turn into an ArrayContainer
return invlogit(treatment_fx[treatment_idxs] +
base_fx +
batch_fx[batch_idxs] +
plate_fx[plate_idxs] +
batchrow_fx[batchrow_idxs] +
batchcol_fx[batchcol_idxs])
开发者ID:thouis,项目名称:works-in-progress,代码行数:13,代码来源:nephrine_frac.py
示例5: PR_samps
def PR_samps(mesh, Ms, Cs, Vs, ind, facs):
"""
Converts a mean function, covariance function, nugget and array of correction factors
to a sample for the average of parasite rate over a given spatiotemporal mesh.
"""
nm = mesh.shape[0]
samps = np.empty((len(ind), nm))
for i in ind:
C = Cs[i](mesh, mesh)
C[::nm+1] += Vs[i]
samps[i,:] = pm.invlogit(pm.mv_normal_cov(Ms[i](mesh), C).ravel()) * facs[A[i]]
return np.mean(samps,axis=1)
开发者ID:apatil,项目名称:mbg-world,代码行数:13,代码来源:frontend_interface.py
示例6: known_age_corr_likelihoods_f
def known_age_corr_likelihoods_f(pos, A, fac_array, f_mesh, nug, type=None):
"""
Computes spline representations over P_mesh for the likelihood
of N_pos | N_exam, A
"""
# TODO: Optimize large-N case using CLT of some kind.
# Allocate work and output arrays.
N_recs = len(A)
likelihoods = empty((N_recs, len(f_mesh)))
likes_now = empty((fac_array.shape[1], len(f_mesh)), dtype=float128)
splreps = []
p1 = invlogit(f_mesh)
# For each record
for i in xrange(N_recs):
posi = pos[i]
Ai = A[i]
spi = np.sum(posi)
negi = 1.-posi
if type is None:
if len(Ai) < 100:
fn = outer_small
else:
fn = outer_large
elif type=='s':
fn = outer_small
else:
fn = outer_large
likelihoods[i,:] = fn(p1, fac_array, Ai, spi, posi, negi, likes_now)
# Clean out occasional infinities on the edges.
good_indices = where(1-isinf(likelihoods[i,:]))[0]
# Compute spline representations.
this_splrep = interp.splrep(x=f_mesh[good_indices], y=likelihoods[i,good_indices].squeeze())
def this_fun(x, sp=this_splrep, Pml=f_mesh[good_indices].min(), Pmh=f_mesh[good_indices].max()):
out = np.atleast_1d(interp.splev(x, sp))
if np.any(x<Pml) or np.any(x>Pmh):
out[np.where(x<Pml)] = -np.Inf
out[np.where(x>Pmh)] = -np.Inf
return out.reshape(np.shape(x))
splreps.append(this_fun)
return splreps
开发者ID:apatil,项目名称:mbg-world,代码行数:50,代码来源:correction_factors.py
示例7: mortality
def mortality(self, key="all-cause_mortality", data=None):
""" Calculate the all-cause mortality rate for the
region and sex of disease_model, and return it
in an array corresponding to age_mesh
Parameters
----------
key : str, optional
of the form 'all-cause_mortality+gbd_region+year+sex'
data: list, optional
the data list to extract all-cause mortality from
"""
if self.params.get("initial_value", {}).has_key(key):
return self.get_initial_value(key)
if not data:
data = self.filter_data("all-cause_mortality data")
if len(data) == 0:
return NEARLY_ZERO * np.ones(len(self.get_estimate_age_mesh()))
else:
M, C = uninformative_prior_gp(c=-1.0, scale=300.0)
age = []
val = []
V = []
for d in data:
scale = self.extract_units(d)
a0 = d.get("age_start", MISSING)
a1 = d.get("age_end", MISSING)
y = self.value_per_1(d)
se = self.se_per_1(d)
if se == MISSING:
se = 0.01
if MISSING in [a0, a1, y]:
continue
age.append(0.5 * (a0 + a1))
val.append(y + 0.00001)
V.append(se ** 2.0)
if len(data) > 0:
gp.observe(M, C, age, mc.logit(val), V)
normal_approx_vals = mc.invlogit(M(self.get_estimate_age_mesh()))
self.set_initial_value(key, normal_approx_vals)
return self.get_initial_value(key)
开发者ID:flaxter,项目名称:gbd,代码行数:47,代码来源:disease_json.py
示例8: sim_data
def sim_data(N, true_cf=[[.3, .6, .1],
[.3, .5, .2]],
true_std=[[.2, .05, .05],
[.3, 0.1, 0.1]],
sum_to_one=True):
"""
Create an NxTxJ matrix of simulated data (T is determined by the length
of true_cf, J by the length of the elements of true_cf).
true_cf - a list of lists of true cause fractions (each must sum to one)
true_std - a list of lists of the standard deviations corresponding to the true csmf's
for each time point. Can either be a list of length J inside a list of length
1 (in this case, the same standard deviation is used for all time points) or
can be T lists of length J (in this case, the a separate standard deviation
is specified and used for each time point).
"""
if sum_to_one == True:
assert pl.allclose(pl.sum(true_cf, 1), 1), 'The sum of elements of true_cf must equal 1'
T = len(true_cf)
J = len(true_cf[0])
## if only one std provided, duplicate for all time points
if len(true_std)==1 and len(true_cf)>1:
true_std = [true_std[0] for i in range(len(true_cf))]
## transform the mean and std to logit space
transformed_std = []
for t in range(T):
pi_i = pl.array(true_cf[t])
sigma_pi_i = pl.array(true_std[t])
transformed_std.append( ((1/(pi_i*(pi_i-1)))**2 * sigma_pi_i**2)**0.5 )
## find minimum standard deviation (by cause across time) and draw from this
min = pl.array(transformed_std).min(0)
common_perturbation = [pl.ones([T,J])*mc.rnormal(mu=0, tau=min**-2) for n in range(N)]
## draw from remaining variation
tau=pl.array(transformed_std)**2 - min**2
tau[tau==0] = 0.000001
additional_perturbation = [[mc.rnormal(mu=0, tau=tau[t]**-1) for t in range(T)] for n in range(N)]
result = pl.zeros([N, T, J])
for n in range(N):
result[n, :, :] = [mc.invlogit(mc.logit(true_cf[t]) + common_perturbation[n][t] + additional_perturbation[n][t]) for t in range(T)]
return result
开发者ID:aflaxman,项目名称:pymc-cod-correct,代码行数:47,代码来源:data.py
示例9: normal_approx
def normal_approx(asrf):
"""
This 'normal approximation' of the age-specific rate function is
formed by using each rate to produce an estimate of the
age-specific rate, and then saying that that logit of the true
rate function is a gaussian process and these age-specific rates
are observations of this gaussian process.
This is less valid and less accurate than using mcmc or map on the
vars produced by the model_rate_list method below, but maybe it
will be faster.
"""
M,C = uninformative_prior_gp()
# use prior to set rate near zero as requested
for prior_str in asrf.fit.get('priors', '').split('\n'):
prior = prior_str.split()
if len(prior) > 0 and prior[0] == 'zero':
age_start = int(prior[1])
age_end = int(prior[2])
gp.observe(M, C, range(age_start, age_end+1), [-10.], [0.])
for r in asrf.rates.all():
mesh, obs, V = logit_rate_from_range(r)
# make sure that there is something to observe
if mesh == []:
continue
# uncomment the following line to make more inferences than
# are valid from the data
#gp.observe(M, C, mesh, obs, V)
# uncomment the following 2 lines to make less inferences than
# possible: it may be better to waste information than have
# false confidence
ii = len(mesh)/2
gp.observe(M, C, [mesh[ii]], [obs[ii]], [V[ii]])
x = asrf.fit['out_age_mesh']
na_rate = mc.invlogit(M(x))
asrf.fit['normal_approx'] = list(na_rate)
asrf.save()
return M, C
开发者ID:flaxter,项目名称:gbd,代码行数:46,代码来源:probabilistic_utils.py
示例10: mu_age_p
def mu_age_p(logit_C0=logit_C0, i=rate["i"]["mu_age"], r=rate["r"]["mu_age"], f=rate["f"]["mu_age"]):
# for acute conditions, it is silly to use ODE solver to
# derive prevalence, and it can be approximated with a simple
# transformation of incidence
if r.min() > 5.99:
return i / (r + m_all + f)
C0 = mc.invlogit(logit_C0)
x = pl.hstack((i, r, f, 1 - C0, C0))
y = fun.forward(0, x)
susceptible = y[:N]
condition = y[N:]
p = condition / (susceptible + condition)
p[pl.isnan(p)] = 0.0
return p
开发者ID:peterhm,项目名称:gbd,代码行数:19,代码来源:ism.py
示例11: reduce_realizations
def reduce_realizations(filename, reduce_fns, slices, a_lo, a_hi, n_per):
"""
Generates n_per * len(filename.root.realizations) realizations,
on the space-time slice defined by slice (a tuple of three slices)
and reduces them according to the function reduce. Reduce_fns should
be a list of Python functions of the form
reduce(this_PR_chunk, product_sofar=None)
and incorporate this_realization into product_sofar in the desired
way. It should be robust to the product_sofar=None case, of course.
a_lo and a_hi are the limits of the age range.
"""
slices = tuple(slices)
hf = tb.openFile(filename)
hr = hf.root
n_realizations = len(hr.realizations)
products = dict(zip(reduce_fns, [None]*len(reduce_fns)))
N_facs = int(1e5)
# Get nugget variance and age-correction factors
V = hr.PyMCsamples.col('V')[:]
facs = mbgw.correction_factors.age_corr_factors_from_limits(a_lo, a_hi, N_facs)
for i in xrange(n_realizations):
# Pull out parasite rate chunk
tot_slice = (slice(i,i+1,1),) + slices
f_chunk = hr.realizations[tot_slice].squeeze()
for j in xrange(n_per):
chunk = f_chunk + np.random.normal(loc=0, scale=np.sqrt(V[i]), size=f_chunk.shape)
chunk = pm.invlogit(chunk)
chunk *= facs[np.random.randint(N_facs, size=np.prod(chunk.shape))]
chunk = chunk.reshape(f_chunk.shape)
for f in reduce_fns:
product_sofar = products[f]
products[f] = f(chunk, product_sofar)
return products
开发者ID:malaria-atlas-project,项目名称:JS2011,代码行数:40,代码来源:supervisor.py
示例12: ages_and_data
def ages_and_data(N_exam, f_samp, correction_factor_array, age_lims):
"""Called by pred_samps. Simulates ages of survey participants and data given f."""
N_samp = len(f_samp)
N_age_samps = correction_factor_array.shape[1]
# Get samples for the age distribution at the observation points.
age_distribution = []
for i in xrange(N_samp):
l = age_lims[i]
age_distribution.append(S_trace[np.random.randint(S_trace.shape[0]),0,l[0]:l[1]+1])
age_distribution[-1] /= np.sum(age_distribution[-1])
# Draw age for each individual, draw an age-correction profile for each location,
# compute probability of positive for each individual, see how many individuals are
# positive.
A = []
pos = []
for s in xrange(N_samp):
A.append(np.array(pm.rcategorical(age_distribution[s], size=N_exam[s]),dtype=int) + age_lims[s][0])
P_samp = pm.invlogit(f_samp[s].ravel())*correction_factor_array[:,np.random.randint(N_age_samps)][A[-1]]
pos.append(pm.rbernoulli(P_samp))
return A, pos, age_distribution
开发者ID:apatil,项目名称:mbg-world,代码行数:24,代码来源:EP_MAP.py
示例13: fit_emp_prior
def fit_emp_prior(dm, param_type):
""" Generate an empirical prior distribution for a single disease parameter
Parameters
----------
dm : dismod3.DiseaseModel
The object containing all the data, (hyper)-priors, and additional
information (like input and output age-mesh).
param_type : str, one of 'incidence', 'prevalence', 'remission', 'excess-mortality'
The disease parameter to work with
Notes
-----
The results of this fit are stored in the disease model's params
hash for use when fitting multiple paramter types together
Example
-------
$ python2.5 gbd_fit.py 175 -t incidence -p 'zero 0 4, zero 41 100, smooth 25' # takes 7m to run
"""
data = [d for d in dm.data if clean(d['data_type']).find(param_type) != -1]
# don't do anything if there is no data for this parameter type
if len(data) == 0:
return
dm.fit_initial_estimate(param_type, data)
dm.vars = setup(dm, param_type, data)
# fit the model
dm.map = mc.MAP(dm.vars)
try:
dm.map.fit(method='fmin_powell', iterlim=500, tol=.00001, verbose=1)
except KeyboardInterrupt:
print 'User halted optimization routine before optimal value found'
# save the results in the param_hash
dm.clear_empirical_prior()
prior_vals = dict(
alpha=list(dm.vars['region_coeffs'].value),
beta=list(dm.vars['study_coeffs'].value),
gamma=list(dm.vars['age_coeffs'].value),
sigma=float(dm.vars['dispersion'].value))
dm.set_empirical_prior(param_type, prior_vals)
dispersion = prior_vals['sigma']
for r in dismod3.gbd_regions:
for y in dismod3.gbd_years:
for s in dismod3.gbd_sexes:
key = dismod3.gbd_key_for(param_type, r, y, s)
logit_mu = predict_logit_rate(regional_covariates(key), **prior_vals)
mu = mc.invlogit(logit_mu)
dm.set_initial_value(key, mu)
dm.set_mcmc('emp_prior_mean', key, mu)
dm.set_mcmc('emp_prior_lower_ui', key, mc.invlogit(logit_mu - 1.96*dispersion))
dm.set_mcmc('emp_prior_upper_ui', key, mc.invlogit(logit_mu + 1.96*dispersion))
key = dismod3.gbd_key_for(param_type, 'world', 1997, 'total')
logit_mu = predict_logit_rate(regional_covariates(key), **prior_vals)
mu = mc.invlogit(logit_mu)
dm.set_initial_value(key, mu)
dm.set_mcmc('emp_prior_mean', key, mu)
dm.set_mcmc('emp_prior_lower_ui', key, mc.invlogit(logit_mu - 1.96*dispersion))
dm.set_mcmc('emp_prior_upper_ui', key, mc.invlogit(logit_mu + 1.96*dispersion))
开发者ID:flaxter,项目名称:gbd,代码行数:67,代码来源:logit_normal_model.py
示例14: theta
def theta(a=alpha,b=beta):
return pymc.invlogit(a+b*x)
开发者ID:jharia,项目名称:length-estimation-pygame,代码行数:2,代码来源:myModel1.py
示例15: theta
def theta(a=alpha, b=beta):
"""theta = logit^{−1}(a+b)"""
return pymc.invlogit(a + b * x)
开发者ID:balarsen,项目名称:pymc_learning,代码行数:3,代码来源:Example1.py
示例16: range
# <codecell>
### hyperpriors
d = mc.Normal('d', 0., 1.e-6, value=0.)
tau = mc.Gamma('tau', 1.e-3, 1.e-3, value=1.)
sigma = mc.Lambda('sigma', lambda tau=tau: tau**-.5)
delta_new = mc.Normal('delta_new', d, tau, value=0.)
### priors
mu = [mc.Normal('mu_%d'%i, 0., 1.e-5, value=0.) for i in range(N)]
delta = [mc.Normal('delta_%d'%i, d, tau, value=0.) for i in range(N)]
p_c = mc.Lambda('p_c', lambda mu=mu: mc.invlogit(mu))
p_t = mc.Lambda('p_t', lambda mu=mu, delta=delta: mc.invlogit(array(mu)+delta))
### likelihood
r_c = mc.Binomial('r_c', n_c_obs, p_c, value=r_c_obs, observed=True)
r_t = mc.Binomial('r_t', n_t_obs, p_t, value=r_t_obs, observed=True)
# <markdowncell>
# BUGS uses Gibbs steps automatically, so it only takes 10000 steps of MCMC after a 1000 step burn in for this model in their example.
#
# PyMC only uses Gibbs steps if you set them up yourself, and it uses Metropolis steps by default. So 10000 steps
# go by more quickly, but the chain takes longer to converge to the stationary distribution.
# <codecell>
开发者ID:greeness,项目名称:pymc-examples,代码行数:30,代码来源:blockers_bayesian_meta-analysis.py
示例17: generate_disease_data
def generate_disease_data(condition, cov):
""" Generate csv files with gold-standard disease data,
and somewhat good, somewhat dense disease data, as might be expected from a
condition that is carefully studied in the literature
"""
age_len = dismod3.MAX_AGE
ages = np.arange(age_len, dtype='float')
# incidence rate
i0 = .005 + .02 * mc.invlogit((ages - 44) / 3)
#i0 = np.maximum(0., .001 * (-.125 + np.ones_like(ages) + (ages / age_len)**2.))
# remission rate
#r = 0. * ages
r = .1 * np.ones_like(ages)
# excess-mortality rate
#f_init = .085 * (ages / 100) ** 2.5
SMR = 3. * np.ones_like(ages) - ages / age_len
# all-cause mortality-rate
mort = dismod3.get_disease_model('all-cause_mortality')
#age_intervals = [[a, a+9] for a in range(0, dismod3.MAX_AGE-4, 10)] + [[0, 100] for ii in range(1)]
age_intervals = [[a, a] for a in range(0, dismod3.MAX_AGE, 1)]
# TODO: take age structure from real data
sparse_intervals = dict([[region, random.sample(age_intervals, (ii**3 * len(age_intervals)) / len(countries_for)**3 / 1)] for ii, region in enumerate(countries_for)])
dense_intervals = dict([[region, random.sample(age_intervals, len(age_intervals)/2)] for ii, region in enumerate(countries_for)])
gold_data = []
noisy_data = []
for ii, region in enumerate(sorted(countries_for)):
if region == 'world':
continue
print region
sys.stdout.flush()
# introduce unexplained regional variation
#i = i0 * (1 + float(ii) / 21)
# or not
i = i0
for year in [1990, 2005]:
for sex in ['male', 'female']:
param_type = 'all-cause_mortality'
key = dismod3.gbd_key_for(param_type, region, year, sex)
m_all_cause = mort.mortality(key, mort.data)
# calculate excess-mortality rate from smr
f = (SMR - 1.) * m_all_cause
## compartmental model (bins S, C, D, M)
import scipy.linalg
from dismod3 import NEARLY_ZERO
from dismod3.utils import trim
SCDM = np.zeros([4, age_len])
p = np.zeros(age_len)
m = np.zeros(age_len)
SCDM[0,0] = 1.
SCDM[1,0] = 0.
SCDM[2,0] = 0.
SCDM[3,0] = 0.
p[0] = SCDM[1,0] / (SCDM[0,0] + SCDM[1,0] + NEARLY_ZERO)
m[0] = trim(m_all_cause[0] - f[0] * p[0], NEARLY_ZERO, 1-NEARLY_ZERO)
for a in range(age_len - 1):
A = [[-i[a]-m[a], r[a] , 0., 0.],
[ i[a] , -r[a]-m[a]-f[a], 0., 0.],
[ m[a], m[a] , 0., 0.],
[ 0., f[a], 0., 0.]]
SCDM[:,a+1] = np.dot(scipy.linalg.expm(A), SCDM[:,a])
p[a+1] = SCDM[1,a+1] / (SCDM[0,a+1] + SCDM[1,a+1] + NEARLY_ZERO)
m[a+1] = m_all_cause[a+1] - f[a+1] * p[a+1]
# duration = E[time in bin C]
hazard = r + m + f
pr_not_exit = np.exp(-hazard)
X = np.empty(len(hazard))
X[-1] = 1 / hazard[-1]
for ii in reversed(range(len(X)-1)):
X[ii] = (pr_not_exit[ii] * (X[ii+1] + 1)) + (1 / hazard[ii] * (1 - pr_not_exit[ii]) - pr_not_exit[ii])
country = countries_for[region][0]
params = dict(age_intervals=age_intervals, condition=condition, gbd_region=region,
country=country, year=year, sex=sex, effective_sample_size=1000)
params['age_intervals'] = [[0,99]]
#.........这里部分代码省略.........
开发者ID:flaxter,项目名称:gbd,代码行数:101,代码来源:good_dense_data.py
示例18: f
def f(sp_sub, a, b, n=n):
p = pm.invlogit(sp_sub)
h = pm.rbeta(a,b,size=len(sp_sub))
p_def = g6pd.p_fem_def(p,h)
return pm.rbinomial(n=n, p=p)
开发者ID:malaria-atlas-project,项目名称:g6pd,代码行数:5,代码来源:__init__.py
示例19: theta
def theta(a=alpha, b=beta, d=dose):
"""theta = inv_logit(a+b)"""
return pm.invlogit(a+b*d)
开发者ID:GunioRobot,项目名称:pymc,代码行数:3,代码来源:modelchecking.py
示例20: this_fun
def this_fun(x, p2=p2, p3=p3,negi=negi, posi=posi, Ai=Ai):
p1 = np.log(invlogit(x))
return p1*spi + p3 + cfh(p1,p2,negi)
开发者ID:apatil,项目名称:mbg-world,代码行数:3,代码来源:correction_factors.py
注:本文中的pymc.invlogit函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论