本文整理汇总了Python中pymultinest.run函数的典型用法代码示例。如果您正苦于以下问题:Python run函数的具体用法?Python run怎么用?Python run使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了run函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: main
def main():
# Begin timing the estimation process
start_time = time.time()
# Run the MultiNest software
pmn.run(binary_logit_log_likelihood, uniform_prior, num_dimensions,
outputfiles_basename=relative_output_folder,
n_live_points=num_live_points,
sampling_efficiency=desired_sampling_efficiency,
log_zero=-1e200,
mode_tolerance=-1e180,
null_log_evidence=-1e180,
resume=False, verbose=True, init_MPI=False)
# Record the ending time of the estimation process
end_time = time.time()
tot_minutes = (end_time - start_time) / 60.0
# Save the parameter names
with open(relative_output_folder + "parameter_names.json", 'wb') as f:
json.dump(explanatory_vars, f)
# Save the number of live points used as the total estimation time
model_run_params = {"n_live_points": num_live_points,
"sampling_efficiency": desired_sampling_efficiency,
"estimation_minutes": tot_minutes}
with open(relative_output_folder + "model_run_parameters.json", "wb") as f:
json.dump(model_run_params, f)
# Print a report on how long the estimation process took
print "Estimation process took {:.2f} minutes".format(tot_minutes)
开发者ID:timothyb0912,项目名称:first,代码行数:31,代码来源:test_binary_logit_mpi.py
示例2: multinest
def multinest(parameter_names, transform, loglikelihood, output_basename, **problem):
parameters = parameter_names
n_params = len(parameters)
def myprior(cube, ndim, nparams):
params = transform([cube[i] for i in range(ndim)])
for i in range(ndim):
cube[i] = params[i]
def myloglike(cube, ndim, nparams):
l = loglikelihood([cube[i] for i in range(ndim)])
return l
# run MultiNest
mn_args = dict(
outputfiles_basename = output_basename,
resume = problem.get('resume', False),
verbose = True,
n_live_points = problem.get('n_live_points', 400))
if 'seed' in problem:
mn_args['seed'] = problem['seed']
pymultinest.run(myloglike, myprior, n_params, **mn_args)
import json
# store name of parameters, always useful
with file('%sparams.json' % output_basename, 'w') as f:
json.dump(parameters, f, indent=2)
# analyse
a = pymultinest.Analyzer(n_params = n_params,
outputfiles_basename = output_basename)
s = a.get_stats()
with open('%sstats.json' % a.outputfiles_basename, mode='w') as f:
json.dump(s, f, indent=2)
return a
开发者ID:JohannesBuchner,项目名称:syscorr,代码行数:34,代码来源:__init__.py
示例3: test
def test():
test.prior_was_called = False
test.loglike_was_called = False
test.dumper_was_called = False
def myprior(cube, ndim, nparams):
for i in range(ndim):
cube[i] = cube[i] * 10 * math.pi
test.prior_was_called = True
def myloglike(cube, ndim, nparams):
chi = 1.
for i in range(ndim):
chi *= math.cos(cube[i] / 2.)
test.loglike_was_called = True
return math.pow(2. + chi, 5)
def mydumper(nSamples,nlive,nPar,
physLive,posterior,paramConstr,
maxLogLike,logZ,logZerr,nullcontext):
print("calling dumper")
test.dumper_was_called = True
# number of dimensions our problem has
parameters = ["x", "y"]
n_params = len(parameters)
# run MultiNest
pymultinest.run(myloglike, myprior, n_params,
resume = True, verbose = True,
dump_callback=mydumper)
assert test.prior_was_called
assert test.loglike_was_called
assert test.dumper_was_called
开发者ID:JohannesBuchner,项目名称:PyMultiNest,代码行数:34,代码来源:pymultinest_minimal_test.py
示例4: fit_multinest
def fit_multinest(self, n_live_points=1000, basename="chains/1-", verbose=True, overwrite=True, **kwargs):
self._mnest_basename = basename
# creates the directory for the output
# folder = os.path.abspath(os.path.dirname(self._mnest_basename))
# if not os.path.exists(self._mnest_basename):
# os.makedirs(self._mnest_basename)
if hasattr(self, "which"):
self.n_params = 9 + 6 * self.lc.n_planets
else:
self.n_params = 5 + 6 * self.lc.n_planets
pymultinest.run(
self.mnest_loglike,
self.mnest_prior,
self.n_params,
n_live_points=n_live_points,
outputfiles_basename=self._mnest_basename,
verbose=verbose,
**kwargs
)
self._make_samples()
开发者ID:NicholasBermuda,项目名称:transit-fitting,代码行数:25,代码来源:fitter.py
示例5: multinest
def multinest(optimizer, nprocs=1):
# number of dimensions our problem has
parameters = ["{0}".format(i)
for i in range(len(optimizer.params.get_all(True)))]
nparams = len(parameters)
if not os.path.exists('chains'):
os.mkdir('chains')
def lnprior(cube, ndim, nparams):
theta = np.array([cube[i] for i in range(ndim)])
for i in range(len(optimizer.params.get_all(True))):
param = optimizer.params.get_all(True)[i]
if "mass_" in param.name:
theta[i] = 10 ** (theta[i] * 8 - 9)
elif "radius_" in param.name:
theta[i] = 10 ** (theta[i] * 4 - 4)
elif "flux_" in param.name:
theta[i] = 10 ** (theta[i] * 4 - 4)
elif "a_" in param.name:
theta[i] = 10 ** (theta[i] * 2 - 2)
elif "e_" in param.name:
theta[i] = 10 ** (theta[i] * 3 - 3)
elif "inc_" in param.name:
theta[i] *= 2.0 * np.pi
elif "om_" in param.name:
theta[i] = 2.0 * np.pi * 10 ** (theta[i] * 2 - 2)
elif "ln_" in param.name:
theta[i] = 2.0 * np.pi * 10 ** (theta[i] * 8 - 8)
elif "ma_" in param.name:
theta[i] = 2.0 * np.pi * 10 ** (theta[i] * 2 - 2)
for i in range(ndim):
cube[i] = theta[i]
def lnlike(cube, ndim, nparams):
theta = np.array([cube[i] for i in range(ndim)])
optimizer.params.update(theta)
mod_flux, mod_rv = optimizer.model(nprocs)
flnl = -(0.5 * ((mod_flux - optimizer.photo_data[1]) /
optimizer.photo_data[2]) ** 2)
rvlnl = -(0.5 * ((mod_rv - optimizer.rv_data[1]) /
optimizer.rv_data[2]) ** 2)
tlnl = np.sum(flnl) + np.sum(rvlnl)
nobj = np.append(np.sum(flnl) + np.sum(rvlnl), theta)
optimizer.chain = np.vstack([optimizer.chain, nobj])
if tlnl > optimizer.maxlnp:
optimizer.iterout(tlnl, theta, mod_flux)
return np.sum(flnl) + np.sum(rvlnl)
# run MultiNest
pymultinest.run(lnlike, lnprior, nparams, n_live_points=1000)
开发者ID:nmearl,项目名称:pynamic,代码行数:59,代码来源:optimizers.py
示例6: main
def main():
cube = [ 0.9, 0.5, 0.1 ] # initial values not used
ndim = len(cube)
nparams = len(cube)
os.chdir('/home/jordi/allst/sample')
pm.run(F_calc_Likelihood4stmd, F_allpriors, nparams,importance_nested_sampling = False,
resume = False, verbose = True, n_live_points=32, outputfiles_basename="DMco_",
sampling_efficiency=0.02,const_efficiency_mode=True,init_MPI=False)
开发者ID:queise,项目名称:Bayes_Stars_SepCalc,代码行数:10,代码来源:allst_main.py
示例7: run_multinest
def run_multinest(posterior, save_file):
"""Uses multinest sampler to calculate evidence instead of emceee
cmd is bash command to call lagrange_cpp
posterior is posterior class, should have methods prior and lik
save_file is path to save. will resume from file if arround"""
# checks
# if path exsissts
if not os.path.exists(save_file) and mpi.COMM_WORLD.rank == 0:
os.mkdir(save_file)
assert hasattr(posterior, 'prior') and hasattr(posterior, 'lik'), 'must have prior and lik methods'
# run sampler
pymultinest.run(posterior.lik, posterior.prior, posterior.get_dim(),
outputfiles_basename=save_file)
开发者ID:drdangersimon,项目名称:mcmc_phylo,代码行数:13,代码来源:mcmc_lik.py
示例8: generate
def generate(lmod_pars, lparams, lphoto_data, lrv_data, lncores, lfname):
global mod_pars, params, photo_data, rv_data, ncores, fname
mod_pars, params, photo_data, rv_data, ncores, fname = \
lmod_pars, lparams, lphoto_data, lrv_data, lncores, lfname
# number of dimensions our problem has
parameters = ["{0}".format(i) for i in range(mod_pars[0] * 5 + (mod_pars[0] - 1) * 6)]
nparams = len(parameters)
# make sure the output directories exist
if not os.path.exists("./output/{0}/multinest".format(fname)):
os.makedirs(os.path.join("./", "output", "{0}".format(fname), "multinest"))
if not os.path.exists("./output/{0}/plots".format(fname)):
os.makedirs(os.path.join("./", "output", "{0}".format(fname), "plots"))
if not os.path.exists("chains"): os.makedirs("chains")
# we want to see some output while it is running
progress_plot = pymultinest.ProgressPlotter(n_params=nparams,
outputfiles_basename='output/{0}/multinest/'.format(fname))
progress_plot.start()
# progress_print = pymultinest.ProgressPrinter(n_params=nparams, outputfiles_basename='output/{0}/multinest/'.format(fname))
# progress_print.start()
# run MultiNest
pymultinest.run(lnlike, lnprior, nparams, outputfiles_basename=u'./output/{0}/multinest/'.format(fname),
resume=True, verbose=True,
sampling_efficiency='parameter', n_live_points=1000)
# run has completed
progress_plot.stop()
# progress_print.stop()
json.dump(parameters, open('./output/{0}/multinest/params.json'.format(fname), 'w')) # save parameter names
# plot the distribution of a posteriori possible models
plt.figure()
plt.plot(photo_data[0], photo_data[1], '+ ', color='red', label='data')
a = pymultinest.Analyzer(outputfiles_basename="./output/{0}/reports/".format(fname), n_params=nparams)
for theta in a.get_equal_weighted_posterior()[::100, :-1]:
params = utilfuncs.split_parameters(theta, mod_pars[0])
mod_flux, mod_rv = utilfuncs.model(mod_pars, params, photo_data[0], rv_data[0])
plt.plot(photo_data[0], mod_flux, '-', color='blue', alpha=0.3, label='data')
utilfuncs.report_as_input(params, fname)
plt.savefig('./output/{0}/plots/posterior.pdf'.format(fname))
plt.close()
开发者ID:nmearl,项目名称:pynamic-old,代码行数:51,代码来源:multinest.py
示例9: perform_scan_multinest
def perform_scan_multinest(self, chains_dir, nlive=100):
""" Perform a scan with MultiNest
"""
self.make_dirs([chains_dir])
n_params = len(self.floated_params)
pymultinest_options = {'importance_nested_sampling': False,
'resume': False, 'verbose': True,
'sampling_efficiency': 'model',
'init_MPI': False, 'evidence_tolerance': 0.5,
'const_efficiency_mode': False}
pymultinest.run(self.ll, self.prior_cube, n_params,
outputfiles_basename=chains_dir,
n_live_points=nlive, **pymultinest_options)
开发者ID:bsafdi,项目名称:GCE-2FIG,代码行数:14,代码来源:run.py
示例10: main
def main():
"""
"""
# Set up MPI variables
world=MPI.COMM_WORLD
rank=world.rank
size=world.size
master = rank==0
if master:
print "Runtime parameters"
pprint.pprint(rp)
time.sleep(2)
if not os.path.exists(rp["outdir"]):
try:
os.mkdir(rp["outdir"])
except:
pass
n_params = rp["nc_fit"] + 3
#progress = pymultinest.ProgressPlotter(n_params=n_params, interval_ms=10000,
# outputfiles_basename=rp["outputfiles_basename"])
#progress.start()
pymultinest.run(loglike, logprior, n_params, resume=False, verbose=True,
multimodal=rp["multimodal"], max_modes=rp["max_modes"], write_output=True,
n_live_points=rp["n_live_points"],
evidence_tolerance=rp["evidence_tolerance"],
mode_tolerance=rp["mode_tolerance"],
seed=rp["seed"],
max_iter=rp["max_iter"],
importance_nested_sampling=rp["do_ins"],
outputfiles_basename=rp["outputfiles_basename"],\
init_MPI=False)
if master:
# Copy the config.ini file to the output dir
shutil.copy(param_file,rp["outdir"])
#progress.stop()
return 0
开发者ID:ska-sa,项目名称:hibayes,代码行数:45,代码来源:hi_multinest.py
示例11: multinest
def multinest(self,*args,**kwargs):
import pymultinest
#res = self.fit(False,True)
#f = open("calls.txt","w+")
self.freeParameters = self.modelManager.getFreeParameters()
def prior(cube, ndim, nparams):
for i,p in enumerate(self.freeParameters.values()):
cube[i] = p.prior.multinestCall(cube[i])
pass
pass
def loglike(cube, ndim, nparams):
logL = self.minusLogLike(cube)*(-1)
if(numpy.isnan(logL)):
logL = -1e10
#f.write(" ".join(map(lambda x:"%s" %x,cube[:ndim])))
#f.write(" %s\n" % logL)
return logL
pass
if('verbose' not in kwargs):
kwargs['verbose'] = True
if('resume' not in kwargs):
kwargs['resume'] = False
if('outputfiles_basename' not in kwargs):
kwargs['outputfiles_basename'] = '_1_'
pass
kwargs['log_zero'] = -1e9
pymultinest.run(loglike, prior, len(self.freeParameters), *args, **kwargs)
print("done")
#Collect the samples
analyzer = pymultinest.Analyzer(n_params=len(self.freeParameters),outputfiles_basename=kwargs['outputfiles_basename'])
eqw = analyzer.get_equal_weighted_posterior()
self.samples = eqw[:,:-1]
self.posteriors = eqw[:,-1]
开发者ID:cdr397,项目名称:3ML,代码行数:41,代码来源:jointLikelihood.py
示例12: run
def run(gp):
pymultinest.run(myloglike, myprior, gp.ndim, n_params = gp.ndim+1,
n_clustering_params = gp.nrho, # gp.ndim, or separate modes on the rho parameters only: gp.nrho
wrapped_params = [ gp.pops, gp.nipol, gp.nrho],
importance_nested_sampling = False, # INS enabled
multimodal = False, # separate modes
const_efficiency_mode = True, # use const sampling efficiency
n_live_points = gp.nlive,
evidence_tolerance = 0.0, # 0 to keep algorithm working indefinitely
sampling_efficiency = 0.05, # 0.05, MultiNest README for >30 params
n_iter_before_update = 2, # output after this many iterations
null_log_evidence = -1e100,
max_modes = gp.nlive, # preallocation of modes: max=number of live points
mode_tolerance = -1.e100, # mode tolerance in the case where no special value exists: highly negative
outputfiles_basename = gp.files.outdir,
seed = -1, verbose = True,
resume = gp.restart,
context = 0, write_output = True,
log_zero = -1e500, # points with log likelihood<log_zero will be neglected
max_iter = 0, # set to 0 for never reaching max_iter (no stopping criterium based on number of iterations)
init_MPI = False, dump_callback = None)
开发者ID:PascalSteger,项目名称:darcoda,代码行数:21,代码来源:gravimage.py
示例13: run
def run():
import gl_file as gfile
if gp.getnewdata:
gfile.bin_data()
gfile.get_data()
## number of dimensions
n_dims = gp.nepol + gp.pops*gp.nepol + gp.pops*gp.nbeta #rho, (nu, beta)_i
parameters = stringlist(gp.pops, gp.nepol)
# show live progress
# progress = pymultinest.ProgressPlotter(n_params = n_dims)
# progress.start()
# threading.Timer(2, show, [gp.files.outdir+'/phys_live.points.pdf']).start()
# print(str(len(gp.files.outdir))+': len of gp.files.outdir')
pymultinest.run(myloglike, myprior,
n_dims, n_params = n_dims, # None beforehands
n_clustering_params = gp.nepol, # separate modes on the rho parameters only
wrapped_params = None, # do not wrap-around parameters
importance_nested_sampling = True, # INS enabled
multimodal = True, # separate modes
const_efficiency_mode = True, # use const sampling efficiency
n_live_points = gp.nlive,
evidence_tolerance = 0.0, # set to 0 to keep algorithm working indefinitely
sampling_efficiency = 0.80,
n_iter_before_update = gp.nlive, # output after this many iterations
null_log_evidence = -1, # separate modes if logevidence > this param.
max_modes = gp.nlive, # preallocation of modes: maximum = number of live points
mode_tolerance = -1.,
outputfiles_basename = gp.files.outdir,
seed = -1,
verbose = True,
resume = False,
context = 0,
write_output = True,
log_zero = -1e6,
max_iter = 10000000,
init_MPI = True,
dump_callback = None)
开发者ID:sofiasi,项目名称:darcoda,代码行数:40,代码来源:gravlite.py
示例14: run
def run(self, clean_up=None, **kwargs):
if clean_up is None:
if self.run_dir is None:
clean_up = True
else:
clean_up = False
if self.run_dir is None:
run_dir = tempfile.mkdtemp()
else:
run_dir = self.run_dir
basename = self.prepare_fit_directory(run_dir, self.prefix)
start_time = time.time()
logger.info('Starting fit in {0} with prefix {1}'.format(run_dir, self.prefix))
pymultinest.run(self.likelihood.multinest_evaluate, self.priors.prior_transform,
self.n_params,
outputfiles_basename='{0}_'.format(basename),
**kwargs)
logger.info("Fit finished - took {0:.2f} s"
.format(time.time() - start_time))
fitted_parameter_names = [item for item in self.likelihood.param_names
if not self.likelihood.fixed[item]]
self.result = MultiNestResult.from_multinest_basename(
basename, fitted_parameter_names)
if clean_up:
logger.info("Cleaning up - deleting {0}".format(run_dir))
shutil.rmtree(run_dir)
else:
logger.info("Multinest files can be found in {0}".format(run_dir))
self.likelihood.parameters[~self.likelihood.fixed_mask()] = (
self.result.median.values)
return self.result
开发者ID:specgrid,项目名称:starkit,代码行数:40,代码来源:base.py
示例15: run
def run(self, clean_up=None, **kwargs):
if clean_up is None:
if self.run_dir is None:
clean_up = True
else:
clean_up = False
if self.run_dir is None:
run_dir = tempfile.mkdtemp()
else:
run_dir = self.run_dir
basename = self.prepare_fit_directory(run_dir, self.prefix)
start_time = time.time()
logger.info('Starting fit in {0} with prefix {1}'.format(run_dir, self.prefix))
pymultinest.run(self.likelihood, self.priors.prior_transform,
self.n_params,
outputfiles_basename='{0}_'.format(basename),
**kwargs)
logger.info("Fit finished - took {0:.2f} s"
.format(time.time() - start_time))
self.result = MultinestResult.from_multinest_basename(
basename, self.likelihood.param_names)
if clean_up == True:
logger.info("Cleaning up - deleting {0}".format(run_dir))
shutil.rmtree(run_dir)
return self.result
开发者ID:specgrid,项目名称:specgrid,代码行数:36,代码来源:multinest_fitter.py
示例16: len
print "\n You are searching for the following parameters: {0}\n".format(parameters)
n_params = len(parameters)
print "\n The total number of parameters is {0}\n".format(n_params)
#####################
# Now, we sample.....
#####################
print "\n Now, we sample... \n"
if args.eccSearch==True:
dirextension = 'eccSearch'
else:
dirextension = 'circSearch'
master_path = os.getcwd()
os.chdir(args.datapath)
pymultinest.run(lnprob, my_prior_mnest, n_params,
importance_nested_sampling = False,
resume = False, verbose = True, n_live_points=1000,
outputfiles_basename=u'chains_{0}/{0}_'.format(dirextension),
sampling_efficiency='parameter')
开发者ID:jellis18,项目名称:NX01,代码行数:23,代码来源:eccentric_BayesMnest_NanoSets.py
示例17: run
def run(catalogfile, vel_err, mag_err, N_gauss, outdir, rotate=True):
"""
PyMultiNest run to determine cluster membership, using PM catalog and
applying vel_err and mag_err cuts. Output is put in newly-created outdir
directory (must be a string).
Parameters:
catalogflie --> String containing the name of a FITS catalog.
vel_err --> The maximum allowed velocity error for stars to be included.
mag_err --> The maximum allowed magnitude error for stars to be included.
N_gauss --> number bivariate gaussian, where N_gauss <= 4
outdir --> The output directory name.
Keywords:
rotate = 1 --> rotate star velocities into RA/DEC format, as opposed
to X,Y
"""
# Load data for full field, extract velocities (already converted to mas)
d = loadData(catalogfile, vel_err, mag_err, rotate=rotate)
star_Vx = d["fit_vx"]
star_Vy = d["fit_vy"]
star_Sigx = d["fit_vxe"]
star_Sigy = d["fit_vye"]
N_stars = len(d)
def print_param(pname, val, logp, headerFirst=False):
rowHead = "{0:6s} "
colHead = " val_{0} ( logp_{0} )"
colVal = "{0:6.3f} ({1:9.2e})"
if headerFirst:
outhdr = " ".join([colHead.format(k) for k in range(N_gauss)])
print rowHead.format("") + outhdr
outstr = " ".join([colVal.format(val[k], logp[k]) for k in range(N_gauss)])
print rowHead.format(pname) + outstr
return
def priors(cube, ndim, nparams):
return
def likelihood(cube, ndim, nparams):
"""
Define the likelihood function (from Clarkson+12, Hosek+15)
"""
# start the timer
t0 = time.time()
####################
# Set up model params
####################
# Number of parameters per Gaussian:
N_per_gauss = 6
# Make arrays for the paramters of each Gaussian
pi = np.arange(N_gauss, dtype=float)
vx = np.arange(N_gauss, dtype=float)
vy = np.arange(N_gauss, dtype=float)
sigA = np.arange(N_gauss, dtype=float)
sigB = np.arange(N_gauss, dtype=float)
theta = np.arange(N_gauss, dtype=float)
# Make arrays for the prior probability of each paramter
logp_pi = np.arange(N_gauss, dtype=float)
logp_vx = np.arange(N_gauss, dtype=float)
logp_vy = np.arange(N_gauss, dtype=float)
logp_sigA = np.arange(N_gauss, dtype=float)
logp_sigB = np.arange(N_gauss, dtype=float)
logp_theta = np.arange(N_gauss, dtype=float)
# Set the fraction of stars in each Gaussian
for kk in range(N_gauss):
pi[kk], logp_pi[kk] = random_pi(cube[kk * N_per_gauss + 0])
# Make sure all the sum(pi) = 1.
pi /= pi.sum()
# Sort the field pi values such that they are always ranked from
# smallest to largest.
sidx = pi[1:].argsort()
pi[1:] = pi[1:][sidx]
logp_pi[1:] = logp_pi[1:][sidx]
# Re-set the cube values. Note this is AFTER sorting.
for kk in range(N_gauss):
cube[kk * N_per_gauss + 0] = pi[kk]
# Set the other Gaussian parameters.
for kk in range(N_gauss):
# Treat the cluster gaussian (the first, most compact one)
# with a special prior function.
if kk == 0:
rand_vx = random_clust_vx
rand_vy = random_clust_vy
rand_sigA = random_clust_sigA
rand_sigB = random_clust_sigB
rand_theta = random_clust_theta
#.........这里部分代码省略.........
开发者ID:AtomyChan,项目名称:JLU-python-code,代码行数:101,代码来源:membership.py
示例18: range
chi = 1.
#print "cube", [cube[i] for i in range(ndim)], cube
for i in range(ndim):
chi *= math.cos(cube[i] / 2.)
#print "returning", math.pow(2. + chi, 5)
return math.pow(2. + chi, 5)
# number of dimensions our problem has
parameters = ["x", "y"]
n_params = len(parameters)
# we want to see some output while it is running
progress = pymultinest.ProgressPlotter(n_params = n_params, outputfiles_basename='chains/2-'); progress.start()
threading.Timer(2, show, ["chains/2-phys_live.points.pdf"]).start() # delayed opening
# run MultiNest
pymultinest.run(myloglike, myprior, n_params, importance_nested_sampling = False, resume = True, verbose = True, sampling_efficiency = 'model', n_live_points = 1000, outputfiles_basename='chains/2-')
# ok, done. Stop our progress watcher
progress.stop()
# lets analyse the results
a = pymultinest.Analyzer(n_params = n_params, outputfiles_basename='chains/2-')
s = a.get_stats()
import json
# store name of parameters, always useful
with open('%sparams.json' % a.outputfiles_basename, 'w') as f:
json.dump(parameters, f, indent=2)
# store derived stats
with open('%sstats.json' % a.outputfiles_basename, mode='w') as f:
json.dump(s, f, indent=2)
print()
开发者ID:followthesheep,项目名称:PyMultiNest,代码行数:31,代码来源:pymultinest_demo.py
示例19: dump
def dump():
progress = pymultinest.ProgressPlotter(n_params = n_params, outputfiles_basename=dir_output+'chains/2-'); progress.start()
threading.Timer(2, show, [dir_output+"chains/2-phys_live.points.pdf"]).start() # delayed opening
# run MultiNest
pymultinest.run(mc.multinest_call, mc.multinest_priors, n_params, importance_nested_sampling = False, resume = True,
verbose = True, sampling_efficiency = 'model', n_live_points = 1000, outputfiles_basename=dir_output+'chains/2-')
# ok, done. Stop our progress watcher
progress.stop()
# lets analyse the results
a = pymultinest.Analyzer(n_params = n_params, outputfiles_basename=dir_output+'chains/2-')
s = a.get_stats()
# store name of parameters, always useful
with open('%sparams.json' % a.outputfiles_basename, 'w') as f:
json.dump(parameters, f, indent=2)
# store derived stats
with open('%sstats.json' % a.outputfiles_basename, mode='w') as f:
json.dump(s, f, indent=2)
print()
print("-" * 30, 'ANALYSIS', "-" * 30)
print("Global Evidence:\n\t%.15e +- %.15e" % ( s['nested sampling global log-evidence'], s['nested sampling global log-evidence error'] ))
import matplotlib.pyplot as plt
plt.clf()
# run MultiNest
#pymultinest.run(mc.pymultinest_call, mc.pymultinest_priors, mc.ndim, outputfiles_basename=dir_output, resume = False, verbose = True)
#json.dump(parameters, open(dir_output+'params.json', 'w')) # save parameter names
# Here we will plot all the marginals and whatnot, just to show off
# You may configure the format of the output here, or in matplotlibrc
# All pymultinest does is filling in the data of the plot.
# Copy and edit this file, and play with it.
p = pymultinest.PlotMarginalModes(a)
plt.figure(figsize=(5 * n_params, 5 * n_params))
# plt.subplots_adjust(wspace=0, hspace=0)
for i in range(n_params):
plt.subplot(n_params, n_params, n_params * i + i + 1)
p.plot_marginal(i, with_ellipses=True, with_points=False, grid_points=50)
plt.ylabel("Probability")
plt.xlabel(parameters[i])
for j in range(i):
plt.subplot(n_params, n_params, n_params * j + i + 1)
# plt.subplots_adjust(left=0, bottom=0, right=0, top=0, wspace=0, hspace=0)
p.plot_conditional(i, j, with_ellipses=False, with_points=True, grid_points=30)
plt.xlabel(parameters[i])
plt.ylabel(parameters[j])
plt.savefig(dir_output+"chains/marginals_multinest.pdf") # , bbox_inches='tight')
show(dir_output+"chains/marginals_multinest.pdf")
for i in range(n_params):
outfile = '%s-mode-marginal-%d.pdf' % (a.outputfiles_basename, i)
p.plot_modes_marginal(i, with_ellipses=True, with_points=False)
plt.ylabel("Probability")
plt.xlabel(parameters[i])
plt.savefig(outfile, format='pdf', bbox_inches='tight')
plt.close()
outfile = '%s-mode-marginal-cumulative-%d.pdf' % (a.outputfiles_basename, i)
p.plot_modes_marginal(i, cumulative=True, with_ellipses=True, with_points=False)
plt.ylabel("Cumulative probability")
plt.xlabel(parameters[i])
plt.savefig(outfile, format='pdf', bbox_inches='tight')
plt.close()
print("Take a look at the pdf files in chains/")
开发者ID:LucaMalavolta,项目名称:PyORBIT,代码行数:72,代码来源:PyORBIT_V3_MultiNest.py
示例20: main
def main():
# MAIN -- TRADES + pyMultiNest
# ---
# initialize logger
logger = logging.getLogger("Main_log")
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s - %(message)s")
# READ COMMAND LINE ARGUMENTS
cli = get_args()
# STARTING TIME
start = time.time()
# RENAME
working_path = cli.full_path
#nthreads=cli.nthreads
log_file = os.path.join(working_path, '%s_log.txt' %(os.path.dirname(cli.sub_folder)))
flog = logging.FileHandler(log_file, 'w')
flog.setLevel(logging.DEBUG)
flog.setFormatter(formatter)
logger.addHandler(flog)
# log screen
slog = logging.StreamHandler()
slog.setLevel(logging.DEBUG)
slog.setFormatter(formatter)
logger.addHandler(slog)
fitting_priors, fitting_priors_type = read_priors(os.path.join(working_path, 'fitting_priors.dat'))
derived_priors, derived_priors_type = read_priors(os.path.join(working_path, 'derived_priors.dat'))
n_der_priors = len(derived_priors)
# INITIALISE TRADES WITH SUBROUTINE WITHIN TRADES_LIB -> PARAMETER NAMES, MINMAX, INTEGRATION ARGS, READ DATA ...
pytrades_lib.pytrades.initialize_trades(working_path, cli.sub_folder, 1)
# RETRIEVE DATA AND VARIABLES FROM TRADES_LIB MODULE
fitting_parameters = pytrades_lib.pytrades.fitting_parameters # INITIAL PARAMETER SET (NEEDED ONLY TO HAVE THE PROPER ARRAY/VECTOR)
parameters_minmax = pytrades_lib.pytrades.parameters_minmax # PARAMETER BOUNDARIES
delta_parameters = np.abs(parameters_minmax[:,1] - parameters_minmax[:,0]) # DELTA BETWEEN MAX AND MIN OF BOUNDARIES
n_bodies = pytrades_lib.pytrades.n_bodies # NUMBER OF TOTAL BODIES OF THE SYSTEM
n_planets = n_bodies - 1 # NUMBER OF PLANETS IN THE SYSTEM
ndata = pytrades_lib.pytrades.ndata # TOTAL NUMBER OF DATA AVAILABLE
npar = pytrades_lib.pytrades.npar # NUMBER OF TOTAL PARAMATERS ~n_planets X 6
nfit = pytrades_lib.pytrades.nfit # NUMBER OF PARAMETERS TO FIT
nfree = pytrades_lib.pytrades.nfree # NUMBER OF FREE PARAMETERS (ie nrvset)
dof = pytrades_lib.pytrades.dof # NUMBER OF DEGREES OF FREEDOM = NDATA - NFIT
global inv_dof
#inv_dof = np.float64(1.0 / dof)
inv_dof = pytrades_lib.pytrades.inv_dof
str_len = pytrades_lib.pytrades.str_len
temp_names = pytrades_lib.pytrades.get_parameter_names(nfit,str_len)
trades_names = anc.convert_fortran_charray2python_strararray(temp_names)
parameter_names = anc.trades_names_to_emcee(trades_names)
# RADIAL VELOCITIES SET
n_rv = pytrades_lib.pytrades.nrv
n_set_rv = pytrades_lib.pytrades.nrvset
# TRANSITS SET
n_t0 = pytrades_lib.pytrades.nt0
n_t0_sum = pytrades_lib.pytrades.ntts
n_set_t0 = 0
for i in range(0, n_bodies-1):
if (n_t0[i] > 0): n_set_t0 += 1
# compute global constant for the loglhd
global ln_err_const
#try:
#e_RVo = np.asarray(pytrades_lib.pytrades.ervobs[:], dtype=np.float64) # fortran variable RV in python will be rv!!!
#except:
#e_RVo = np.asarray([0.], dtype=np.float64)
#try:
#e_T0o = np.asarray(pytrades_lib.pytrades.et0obs[:,:], dtype=np.float64).reshape((-1))
#except:
#e_T0o = np.asarray([0.], dtype=np.float64)
#ln_err_const = anc.compute_ln_err_const(ndata, dof, e_RVo, e_T0o, cli.ln_flag)
ln_err_const = pytrades_lib.pytrades.ln_err_const
# READ THE NAMES OF THE PARAMETERS FROM THE TRADES_LIB AND CONVERT IT TO PYTHON STRINGS
#reshaped_names = pytrades_lib.pytrades.parameter_names.reshape((10,nfit), order='F').T
#parameter_names = [''.join(reshaped_names[i,:]).strip() for i in range(0,nfit)]
# INITIALISE SCRIPT FOLDER/LOG FILE
working_folder, run_log, of_run = init_folder(working_path, cli.sub_folder)
logger.info('')
logger.info('==================== ')
logger.info('pyTRADES-pyMultiNest')
logger.info('==================== ')
logger.info('')
logger.info('WORKING PATH = %s' %(working_path))
logger.info('dof = ndata(%d) - nfit(%d) = %d' %(ndata, nfit, dof))
#.........这里部分代码省略.........
开发者ID:lucaborsato,项目名称:trades,代码行数:101,代码来源:trades_pymnest.py
注:本文中的pymultinest.run函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论