本文整理汇总了Python中scipy.maxentropy.logsumexp函数的典型用法代码示例。如果您正苦于以下问题:Python logsumexp函数的具体用法?Python logsumexp怎么用?Python logsumexp使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了logsumexp函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: calc_avg_rcmks
def calc_avg_rcmks(parser):
options,args= parser.parse_args()
njks= 101
nmks= 101
jks= numpy.linspace(0.5,0.8,njks)
mks= numpy.linspace(-0.5,-3.,nmks)
if options.basti:
zs= numpy.array([0.004,0.008,0.01,0.0198,0.03,0.04])
zsolar= 0.019
elif options.parsec:
zs= numpy.arange(0.0005,0.06005,0.0005)
# zs= numpy.array([0.01,0.02])
zsolar= 0.019
else:
zs= numpy.arange(0.0005,0.03005,0.0005)
# zs= numpy.array([0.01,0.02])
zsolar= 0.019
if not os.path.exists(options.outfilename):
logpz= localzdist(zs,zsolar=zsolar)
logmkp= numpy.zeros((len(zs),njks,nmks))
logp= numpy.zeros((len(zs),njks,nmks))
funcargs= (zs,options,njks,jks,nmks,mks,logpz)
multOut= multi.parallel_map((lambda x: indiv_calc(x,
*funcargs)),
range(len(zs)),
numcores=numpy.amin([64,len(zs),
multiprocessing.cpu_count()]))
for ii in range(len(zs)):
logmkp[ii,:,:]= multOut[ii][0,:,:]
logp[ii,:,:]= multOut[ii][1,:,:]
save_pickles(options.outfilename,logmkp,logp)
else:
savefile= open(options.outfilename,'rb')
logmkp= pickle.load(savefile)
logp= pickle.load(savefile)
savefile.close()
indx= numpy.isnan(logp)
logp[indx]= -numpy.finfo(numpy.dtype(numpy.float64)).max
logmkp[indx]= -numpy.finfo(numpy.dtype(numpy.float64)).max
#Average the peak, so calculate the peak
for ii in range(len(zs)):
for jj in range(njks):
maxmkindx= numpy.argmax(logp[ii,jj,:])
totlogp= maxentropy.logsumexp(logp[ii,jj,:])
logmkp[ii,jj,:]= logmkp[ii,jj,maxmkindx]-logp[ii,jj,maxmkindx]+totlogp
logp[ii,jj,:]= totlogp
avgmk= numpy.exp(maxentropy.logsumexp(logmkp.flatten())\
-maxentropy.logsumexp(logp.flatten()))
solindx= numpy.argmin(numpy.fabs(zs-0.017))
avgmksolar= numpy.exp(maxentropy.logsumexp(logmkp[solindx,:,:].flatten())\
-maxentropy.logsumexp(logp[solindx,:,:].flatten()))
print "Average mk: %f" % (-avgmk)
print "Average mk if solar: %f" % (-avgmksolar)
return -avgmk
开发者ID:jobovy,项目名称:apogee-rc,代码行数:54,代码来源:calc_avg_rcmks.py
示例2: calc_model
def calc_model(params,options,data,logpiso,logpisodwarf,df,nlocs,locations,iso):
avg_plate_model= numpy.zeros(nlocs)
for ii in range(nlocs):
#Calculate vlos | los
indx= (data['LOCATION'] == locations[ii])
thesedata= data[indx]
thislogpiso= logpiso[indx,:]
if options.dwarf:
thislogpisodwarf= logpisodwarf[indx,:]
else:
thislogpisodwarf= None
vlos= numpy.linspace(-200.,200.,options.nvlos)
pvlos= numpy.zeros(options.nvlos)
if not options.multi is None:
pvlos= multi.parallel_map((lambda x: pvlosplate(params,vlos[x],
thesedata,
df,options,
thislogpiso,
thislogpisodwarf,iso)),
range(options.nvlos),
numcores=numpy.amin([len(vlos),multiprocessing.cpu_count(),options.multi]))
else:
for jj in range(options.nvlos):
print jj
pvlos[jj]= pvlosplate(params,vlos[jj],thesedata,df,options,
thislogpiso,thislogpisodwarf)
pvlos-= logsumexp(pvlos)
pvlos= numpy.exp(pvlos)
#Calculate mean and velocity dispersion
avg_plate_model[ii]= numpy.sum(vlos*pvlos)
return avg_plate_model
开发者ID:jobovy,项目名称:apogee-vcirc-2012,代码行数:31,代码来源:plot_internalcomparison.py
示例3: call_polymorphism
def call_polymorphism(self, obs, post):
"""Get the polymorphism probability.
This is the posterior probability that the strain is homozygous
for the non-reference base with the highest count at this position.
@param obs: one ref base count and three non-ref base counts
@param post: the posterior hidden state probabilities
@return: the polymorphism probability
"""
# unpack the posterior state distribution
p_recent, p_ancient, p_garbage, p_misaligned = post
# get the prior probability of polymorphism conditional on state
p_recent_AA = self.states[0].get_posterior_distribution(obs)[2]
p_ancient_AA = self.states[1].get_posterior_distribution(obs)[2]
# compute the posterior probability of a polymorphism
posterior_polymorphism = 0
posterior_polymorphism += p_recent * p_recent_AA
posterior_polymorphism += p_ancient * p_ancient_AA
# Given that a polymorphism occurred,
# get the probability distribution over the
# three non-reference nucleotides.
r = self.seqerr
log_Pr = math.log(r/4.0)
log_PA = math.log(1 - 3*r/4.0)
logs = [
obs[1]*log_PA + obs[2]*log_Pr + obs[3]*log_Pr,
obs[1]*log_Pr + obs[2]*log_PA + obs[3]*log_Pr,
obs[1]*log_Pr + obs[2]*log_Pr + obs[3]*log_PA]
condmaxpost = math.exp(max(logs) - logsumexp(logs))
# get the posterior probability distribution
maxpost = posterior_polymorphism * condmaxpost
return maxpost
开发者ID:argriffing,项目名称:drosophiline,代码行数:31,代码来源:ModelA.py
示例4: run
def run(*args):
dprintn(8, "# Generating data")
global hypotheses
RANK = str(MPI.COMM_WORLD.Get_rank())
data_size = args[0]
p_representation = defaultdict(int) # how often do you get the right representation
p_response = defaultdict(int) # how often do you get the right response?
p_representation_literal = defaultdict(int) # how often do you get the right representation
p_response_literal = defaultdict(int) # how often do you get the right response?
p_representation_presup = defaultdict(int) # how often do you get the right representation
p_response_presup = defaultdict(int) # how often do you get the right response?
dprintn(8, "# Generating data")
data = generate_data(data_size)
# recompute these
dprintn(8, "# Computing posterior")
#[ x.unclear_functions() for x in hypotheses ]
[ x.compute_posterior(data) for x in hypotheses ]
# normalize the posterior in fs
dprintn(8, "# Computing normalizer")
Z = logsumexp([x.posterior_score for x in hypotheses])
# and output the top hypotheses
qq = FiniteBestSet(max=True, N=25)
for h in hypotheses: qq.push(h, h.posterior_score) # get the tops
for i, h in enumerate(qq.get_sorted()):
for w in h.all_words():
fprintn(8, data_size, i, w, h.posterior_score, q(h.lex[w]), f=options.OUT_PATH+"-hypotheses."+RANK+".txt")
# and compute the probability of being correct
dprintn(8, "# Computing correct probability")
for h in hypotheses:
hstr = str(h)
#print data_size, len(data), exp(h.posterior_score), correct[ str(h)+":"+w ]
for w in words:
p = exp(h.posterior_score - Z)
key = w + ":" + hstr
p_representation[w] += p * (agree_pct[key] == 1.)
p_representation_presup[w] += p * (agree_pct_presup[key] == 1.) # if we always agree with the target, then we count as the right rep.
p_representation_literal[w] += p * (agree_pct_literal[key] == 1.)
# and just how often does the hypothesis agree?
p_response[w] += p * agree_pct[key]
p_response_presup[w] += p * agree_pct_presup[key]
p_response_literal[w] += p * agree_pct_literal[key]
dprintn(8, "# Outputting")
for w in words:
fprintn(10, rank, q(w), data_size, p_representation[w], p_representation_presup[w], p_representation_literal[w], p_response[w], p_response_presup[w], p_response_literal[w], f=options.OUT_PATH+"-stats."+RANK+".txt")
return 0
开发者ID:gamahead,项目名称:LOTlib,代码行数:59,代码来源:Evaluate_MPI.py
示例5: testErrs
def testErrs(options,args):
ndfehs, ndafes= 201,201
dfehs= numpy.linspace(0.01,0.4,ndfehs)
dafes= numpy.linspace(0.01,0.3,ndafes)
if os.path.exists(args[0]):
savefile= open(args[0],'rb')
loglike= pickle.load(savefile)
ii= pickle.load(savefile)
jj= pickle.load(savefile)
savefile.close()
else:
loglike= numpy.zeros((ndfehs,ndafes))
ii, jj= 0, 0
while ii < ndfehs:
while jj < ndafes:
sys.stdout.write('\r'+"Working on %i / %i" %(ii*ndafes+jj+1,ndafes*ndfehs))
sys.stdout.flush()
loglike[ii,jj]= errsLogLike(dfehs[ii],dafes[jj],options)
jj+= 1
ii+= 1
jj= 0
save_pickles(args[0],loglike,ii,jj)
save_pickles(args[0],loglike,ii,jj)
sys.stdout.write('\r'+_ERASESTR+'\r')
sys.stdout.flush()
if options.prior:
prior= numpy.zeros((ndfehs,ndafes))
for ii in range(ndfehs):
prior[ii,:]= -0.5*(dafes-0.1)**2./0.1**2.-0.5*(dfehs[ii]-0.2)**2./0.1**2.
loglike+= prior
loglike-= maxentropy.logsumexp(loglike)
loglike= numpy.exp(loglike)
loglike/= numpy.sum(loglike)*(dfehs[1]-dfehs[0])*(dafes[1]-dafes[0])
#Plot
bovy_plot.bovy_print()
bovy_plot.bovy_dens2d(loglike.T,origin='lower',
cmap='gist_yarg',
xlabel=r'\delta_{[\mathrm{Fe/H}]}',
ylabel=r'\delta_{[\alpha/\mathrm{Fe}]}',
xrange=[dfehs[0],dfehs[-1]],
yrange=[dafes[0],dafes[-1]],
contours=True,
cntrmass=True,
onedhists=True,
levels= special.erf(0.5*numpy.arange(1,4)))
if options.prior:
bovy_plot.bovy_text(r'$\mathrm{with\ Gaussian\ prior:}$'+
'\n'+r'$\delta_{[\mathrm{Fe/H}]}= 0.2 \pm 0.1$'
+'\n'+r'$\delta_{[\alpha/\mathrm{Fe}]}= 0.1 \pm 0.1$',
top_right=True)
bovy_plot.bovy_end_print(options.plotfile)
开发者ID:jobovy,项目名称:segue-maps,代码行数:51,代码来源:testErrs.py
示例6: neg_log_likelihood
def neg_log_likelihood(theta_sparse, hb = None):
if not hb is None:
h, b = hb
else:
h, b = dp(theta_sparse)
log_kappa = logsumexp(h[0] + b[1])
nll = log_kappa
nll -= h[0][0]
for k in range(1, params['M']):
nll -= h[k][0,0]
for ind in theta_sparse:
nll += params['lambda'] * np.abs(theta_sparse[ind])
return nll
开发者ID:othercriteria,项目名称:neuro_inference,代码行数:15,代码来源:inference_stepwise.py
示例7: pvlosplate
def pvlosplate(params,vhelio,data,df,options,logpiso,logpisodwarf,iso):
"""
NAME:
pvlosplate
PURPOSE:
calculate the vlos probability for a given location
INPUT:
params - parameters of the model
vhelio - heliocentric los velocity to evaluate
data - data array for this location
df - df object(s) (?)
options - options
logpiso, logpisodwarf - precalculated isochrones
OUTPUT:
log of the probability
HISTORY:
2012-02-20 - Written - Bovy (IAS)
"""
#Output is sum over data l,b,jk,h
l= data['GLON']*_DEGTORAD
b= data['GLAT']*_DEGTORAD
sinl= numpy.sin(l)
cosl= numpy.cos(l)
sinb= numpy.sin(b)
cosb= numpy.cos(b)
jk= data['J0MAG']-data['K0MAG']
try:
jk[(jk < 0.5)]= 0.5 #BOVY: FIX THIS HACK BY EMAILING GAIL
except TypeError:
pass #HACK
h= data['H0MAG']
options.multi= 1 #To avoid conflict
out= -mloglike(params,numpy.zeros(len(data))+vhelio,
l,
b,
jk,
h,
df,options,
sinl,
cosl,
cosb,
sinb,
logpiso,
logpisodwarf,True,None,iso,data['FEH']) #None iso for now
#indx= (out >= -0.1)*(out <= 0.1)
#print out[indx], jk[indx], h[indx]
return logsumexp(out)
开发者ID:jobovy,项目名称:apogee-vcirc-2012,代码行数:47,代码来源:compareDataModel.py
示例8: bound
def bound(self, corpus, gamma=None, subsample_ratio=1.0):
"""
Estimate the variational bound of documents from `corpus`:
E_q[log p(corpus)] - E_q[log q(corpus)]
`gamma` are the variational parameters on topic weights for each `corpus`
document (=2d matrix=what comes out of `inference()`).
If not supplied, will be inferred from the model.
"""
score = 0.0
_lambda = self.state.get_lambda()
Elogbeta = dirichlet_expectation(_lambda)
for d, doc in enumerate(corpus): # stream the input doc-by-doc, in case it's too large to fit in RAM
if d % self.chunksize == 0:
logger.debug("bound: at document #%i", d)
if gamma is None:
gammad, _ = self.inference([doc])
else:
gammad = gamma[d]
Elogthetad = dirichlet_expectation(gammad)
# E[log p(doc | theta, beta)]
score += np.sum(cnt * logsumexp(Elogthetad + Elogbeta[:, int(id)]) for id, cnt in doc)
# E[log p(theta | alpha) - log q(theta | gamma)]; assumes alpha is a vector
score += np.sum((self.alpha - gammad) * Elogthetad)
score += np.sum(gammaln(gammad) - gammaln(self.alpha))
score += gammaln(np.sum(self.alpha)) - gammaln(np.sum(gammad))
# Compensate likelihood for when `corpus` above is only a sample of the whole corpus. This ensures
# that the likelihood is always rougly on the same scale.
score *= subsample_ratio
# E[log p(beta | eta) - log q (beta | lambda)]; assumes eta is a scalar
score += np.sum((self.eta - _lambda) * Elogbeta)
score += np.sum(gammaln(_lambda) - gammaln(self.eta))
if np.ndim(self.eta) == 0:
sum_eta = self.eta * self.num_terms
else:
sum_eta = np.sum(self.eta)
score += np.sum(gammaln(sum_eta) - gammaln(np.sum(_lambda, 1)))
return score
开发者ID:JKamlah,项目名称:gensim,代码行数:47,代码来源:ldamodel.py
示例9: _parse_hz_dict_indiv
def _parse_hz_dict_indiv(self,hz):
htype= hz.get('type','exp')
if htype == 'exp':
zd= hz.get('h',0.0375)
th= lambda z, tzd=zd: 1./2./tzd*numpy.exp(-numpy.fabs(z)/tzd)
tH= lambda z, tzd= zd: (numpy.exp(-numpy.fabs(z)/tzd)-1.
+numpy.fabs(z)/tzd)*tzd/2.
tdH= lambda z, tzd= zd: 0.5*numpy.sign(z)\
*(1.-numpy.exp(-numpy.fabs(z)/tzd))
elif htype == 'sech2':
zd= hz.get('h',0.0375)
th= lambda z, tzd=zd: 1./numpy.cosh(z/2./tzd)**2./4./tzd
# Avoid overflow in cosh
tH= lambda z, tzd= zd: \
tzd*(logsumexp(numpy.array([z/2./tzd,-z/2./tzd]),axis=0)\
-numpy.log(2.))
tdH= lambda z, tzd= zd: numpy.tanh(z/2./tzd)/2.
return (th,tH,tdH)
开发者ID:jobovy,项目名称:galpy,代码行数:18,代码来源:DiskSCFPotential.py
示例10: _eval_sumgaussians
def _eval_sumgaussians(x,xamp,xmean,xcovar):
"""x array [ndata,ndim], return log"""
ndata= x.shape[0]
da= x.shape[1]
out= numpy.zeros(ndata)
ngauss= len(xamp)
loglike= numpy.zeros(ngauss)
for ii in range(ndata):
for kk in range(ngauss):
if xamp[kk] == 0.:
loglike[kk]= numpy.finfo(numpy.dtype(numpy.float64)).min
continue
tinv= linalg.inv(xcovar[kk,:,:])
delta= x[ii,:]-xmean[kk,:]
loglike[kk]= numpy.log(xamp[kk])+0.5*numpy.log(linalg.det(tinv))\
-0.5*numpy.dot(delta,numpy.dot(tinv,delta))+\
da*_SQRTTWOPI
out[ii]= maxentropy.logsumexp(loglike)
return out
开发者ID:jobovy,项目名称:qso-var,代码行数:19,代码来源:classQSOXD.py
示例11: bound
def bound(self, corpus, gamma=None, subsample_ratio=1.0):
score = 0.0
_lambda = self.state.get_lambda()
Elogbeta = dirichlet_expectation(_lambda)
for d, doc in enumerate(corpus):
if gamma is None:
gammad, _ = self.inference([doc])
else:
gammad = gamma[d]
Elogthetad = dirichlet_expectation(gammad)
score += numpy.sum(cnt * logsumexp(Elogthetad + Elogbeta[:, id]) for id, cnt in doc)
score += numpy.sum((self.alpha - gammad) * Elogthetad)
score += numpy.sum(gammaln(gammad) - gammaln(self.alpha))
score += gammaln(numpy.sum(self.alpha)) - gammaln(numpy.sum(gammad))
score *= subsample_ratio
score += numpy.sum((self.eta - _lambda) * Elogbeta)
score += numpy.sum(gammaln(_lambda) - gammaln(self.eta))
score += numpy.sum(gammaln(self.eta * self.num_terms) - gammaln(numpy.sum(_lambda, 1)))
return score
开发者ID:dalinhuang,项目名称:Smart-City,代码行数:19,代码来源:ldamodel.py
示例12: inference
def inference(self, doc):
"""
Perform inference on a single document.
Return 3-tuple of `(likelihood of this document, word-topic distribution
phi, expected word counts gamma (~topic distribution))`.
A document is simply a bag-of-words collection which supports len() and
iteration over (wordIndex, wordCount) 2-tuples.
The model itself is not affected in any way (this function is read-only aka
const).
"""
# init help structures
totalWords = sum(wordCount for _, wordCount in doc)
gamma = numpy.zeros(self.numTopics) + self.alpha + 1.0 * totalWords / self.numTopics
phi = numpy.zeros(shape = (len(doc), self.numTopics)) + 1.0 / self.numTopics
likelihood = likelihoodOld = converged = numpy.NAN
# variational estimate
for i in xrange(self.VAR_MAX_ITER):
# logger.debug("inference step #%s, converged=%s, likelihood=%s, likelikelihoodOld=%s" %
# (i, converged, likelihood, likelihoodOld))
if numpy.isfinite(converged) and converged <= self.VAR_CONVERGED:
logger.debug("document converged in %i iterations" % i)
break
for n, (wordIndex, wordCount) in enumerate(doc):
# compute phi vars, in log space, to prevent numerical nastiness
tmp = digamma(gamma) + self.logProbW[:, wordIndex] # vector operation
# convert phi and update gamma
newPhi = numpy.exp(tmp - logsumexp(tmp))
gamma += wordCount * (newPhi - phi[n])
phi[n] = newPhi
likelihood = self.computeLikelihood(doc, phi, gamma)
assert numpy.isfinite(likelihood)
converged = numpy.divide(likelihoodOld - likelihood, likelihoodOld)
likelihoodOld = likelihood
return likelihood, phi, gamma
开发者ID:beibeiyang,项目名称:Latent-Dirichlet-Allocation,代码行数:42,代码来源:ldamodel.py
示例13: bound
def bound(self, corpus, gamma=None):
"""
Estimate the variational bound of documents from `corpus`.
`gamma` are the variational parameters on topic weights (one for each
document in `corpus`). If not supplied, will be automatically inferred
from the model.
"""
score = 0.0
Elogbeta = numpy.log(self.expElogbeta)
for d, doc in enumerate(corpus):
if d % self.chunks == 0:
logger.info("PROGRESS: at document #%i" % d)
if gamma is None:
gammad, _ = self.inference([doc])
else:
gammad = gamma[d, :]
Elogthetad = dirichlet_expectation(gammad)
expElogthetad = numpy.exp(Elogthetad)
ids = [id for id, _ in doc]
cts = numpy.array([cnt for _, cnt in doc])
phinorm = numpy.zeros(len(ids))
for i in xrange(len(ids)):
phinorm[i] = logsumexp(Elogthetad + Elogbeta[:, ids[i]])
# E[log p(docs | theta, beta)]
score += numpy.sum(cts * phinorm)
# E[log p(theta | alpha) - log q(theta | gamma)]
score += numpy.sum((self.alpha - gammad) * Elogthetad)
score += numpy.sum(gammaln(gammad) - gammaln(self.alpha))
score += gammaln(self.alpha * self.numTopics) - gammaln(numpy.sum(gammad))
# E[log p(beta | eta) - log q (beta | lambda)]
score += numpy.sum((self.eta - self._lambda) * Elogbeta)
score += numpy.sum(gammaln(self._lambda) - gammaln(self.eta))
score += numpy.sum(gammaln(self.eta * self.numTerms) -
gammaln(numpy.sum(self._lambda, 1)))
return score
开发者ID:hyperink,项目名称:ideagen,代码行数:41,代码来源:ldamodel.py
示例14: dp
def dp(theta_sparse):
theta = theta_dense(theta_sparse)
h = [None] * params['M']
h[0] = np.empty(n_w[0])
for w in range(n_w[0]):
h[0][w] = np.sum(theta * hits_pre[0][w])
for k in range(1, params['M']):
h[k] = np.empty((n_w[k-1], n_w[k]))
for w_prev in range(n_w[k-1]):
for w in range(n_w[k]):
h[k][w_prev,w] = np.sum(theta * hits_pre[k][w_prev,w])
b = [None] * (params['M']+1)
b[params['M']] = np.zeros(n_w[params['M']-1])
for k in range(params['M']-1, 0, -1):
b[k] = np.empty(n_w[k-1])
for w_prev in range(n_w[k-1]):
b[k][w_prev] = logsumexp(h[k][w_prev] + b[k+1])
return h, b
开发者ID:othercriteria,项目名称:neuro_inference,代码行数:21,代码来源:inference_stepwise.py
示例15: _eval_gauss_grid
def _eval_gauss_grid(x,y,xamp,xmean,xcovar):
nx= len(x)
ny= len(y)
out= numpy.zeros((nx,ny))
ngauss= len(xamp)
dim= xmean.shape[1]
loglike= numpy.zeros(ngauss)
for ii in range(nx):
for jj in range(ny):
a= numpy.array([x[ii],y[jj]])
for kk in range(ngauss):
if xamp[kk] == 0.:
loglike[kk]= numpy.finfo(numpy.dtype(numpy.float64)).min
continue
tinv= numpy.linalg.inv(xcovar[kk,:,:])
delta= a-xmean[kk,:]
loglike[kk]= numpy.log(xamp[kk])+0.5*numpy.log(numpy.linalg.det(tinv))\
-0.5*numpy.dot(delta,numpy.dot(tinv,delta))+\
dim*_SQRTTWOPI
out[ii,jj]= logsumexp(loglike)
return out
开发者ID:jobovy,项目名称:segue-maps,代码行数:21,代码来源:plotXDPotPDFs.py
示例16: run
def run(*args):
dprintn(8, "# Generating data")
global hypotheses
data_size = args[0]
here_correct = dict() # how often is each word right?
for w in words: here_correct[w] = 0.0
dprintn(8, "# Generating data")
data = generate_data(data_size)
# recompute these
dprintn(8, "# Computing posterior")
[ x.compute_posterior(data) for x in hypotheses ]
# normalize the posterior in fs
dprintn(8, "# Computing normalizer")
Z = logsumexp([x.lp for x in hypotheses])
# and compute the probability of being correct
dprintn(8, "# Computing correct probability")
for h in hypotheses:
#print data_size, len(data), exp(h.lp), correct[ str(h)+":"+w ]
for w in words:
# the posterior times the prob of agreement with the right one, weighted by number of iterations
here_correct[w] += exp(h.lp-Z) * correct[ str(h)+":"+w ]
dprintn(8, "# Outputting")
o = open(OUT_PATH+str(rank), 'a')
for w in words:
print >>o, rank, data_size, here_correct[w], q(w)
o.close()
return 0
开发者ID:gamahead,项目名称:LOTlib,代码行数:36,代码来源:Evaluate_MPI_2011Jan15.py
示例17: plot_distanceprior
#.........这里部分代码省略.........
elif options.varfeh:
#Find correct iso
indx= (locl == data[ii]['LOCATION'])
logpiso[ii,:]= iso[0][indx](numpy.zeros(_BINTEGRATENBINS)+jk[ii],mh)
else:
logpiso[ii,:]= iso(numpy.zeros(_BINTEGRATENBINS)+jk[ii],mh)
for jj in range(_BINTEGRATENBINS):
d= ds[jj]/_REFR0
R= numpy.sqrt(1.+d**2.-2.*d*cosl)
indx= (R == 0.)
R[indx]+= 0.0001
theta= numpy.arcsin(d/R*sinl)
indx= (1./cosl < d)*(cosl > 0.)
theta[indx]= numpy.pi-theta[indx]
indx= (theta < 0.)
theta[indx]+= 2.*math.pi
thisout= _logpd([0.,1.],d,None,None,
None,None,None,
options,R,theta,
1.,0.,logpiso[:,jj])
#Find bin to which these contribute
thetabin= numpy.floor((theta-xgrid[0])/(xgrid[1]-xgrid[0])+0.5)
Rbin= numpy.floor((R-plotygrid[0])/(plotygrid[1]-plotygrid[0]))
indx= (thetabin < 0)
thetabin[indx]= 0
Rbin[indx]= 0
thisout[indx]= -numpy.finfo(numpy.dtype(numpy.float64)).max
indx= (thetabin >= 2*res)
thetabin[indx]= 0. #Has to be
#Rbin[indx]= 0
#thisout[indx]= -numpy.finfo(numpy.dtype(numpy.float64)).max
indx= (Rbin < 0)
thetabin[indx]= 0
Rbin[indx]= 0
thisout[indx]= -numpy.finfo(numpy.dtype(numpy.float64)).max
indx= (Rbin >= res)
thetabin[indx]= 0
Rbin[indx]= 0
thisout[indx]= -numpy.finfo(numpy.dtype(numpy.float64)).max
thetabin= thetabin.astype('int')
Rbin= Rbin.astype('int')
for ii in range(len(data)):
plotthis[thetabin,Rbin,ii]= thisout[ii]
#Normalize
for ii in range(2*res):
for jj in range(res):
plotthis[ii,jj,0]= logsumexp(plotthis[ii,jj,:])
plotthis= plotthis[:,:,0]
plotthis-= numpy.amax(plotthis)
plotthis= numpy.exp(plotthis)
plotthis[(plotthis == 0.)]= numpy.nan
#Get los
locations= list(set(data['LOCATION']))
nlocs= len(locations)
l_plate= numpy.zeros(nlocs)
for ii in range(nlocs):
indx= (data['LOCATION'] == locations[ii])
l_plate[ii]= numpy.mean(data['GLON'][indx])
bovy_plot.bovy_print()
ax= pyplot.subplot(111,projection='galpolar')#galpolar is in bovy_plot
vmin, vmax= 0., 1.
out= ax.pcolor(plotxgrid,plotygrid,plotthis.T,cmap='gist_yarg',
vmin=vmin,vmax=vmax,zorder=2)
#Overlay los
for ii in range(nlocs):
lds= numpy.linspace(0.,2.95,501)
lt= numpy.zeros(len(lds))
lr= numpy.zeros(len(lds))
lr= numpy.sqrt(1.+lds**2.-2.*lds*numpy.cos(l_plate[ii]*_DEGTORAD))
lt= numpy.arcsin(lds/lr*numpy.sin(l_plate[ii]*_DEGTORAD))
indx= (1./numpy.cos(l_plate[ii]*_DEGTORAD) < lds)*(numpy.cos(l_plate[ii]*_DEGTORAD) > 0.)
lt[indx]= numpy.pi-lt[indx]
ax.plot(lt,lr,
ls='--',color='w',zorder=3)
from matplotlib.patches import Arrow, FancyArrowPatch
arr= FancyArrowPatch(posA=(-math.pi/2.,1.8),
posB=(-math.pi/4.,1.8),
arrowstyle='->',
connectionstyle='arc3,rad=%4.2f' % (-math.pi/16.),
shrinkA=2.0, shrinkB=2.0, mutation_scale=20.0,
mutation_aspect=None,fc='k')
ax.add_patch(arr)
bovy_plot.bovy_text(-math.pi/2.,1.97,r'$\mathrm{Galactic\ rotation}$',
rotation=-22.5)
radii= numpy.array([0.5,1.,1.5,2.,2.5])
labels= []
for r in radii:
ax.plot(numpy.linspace(0.,2.*math.pi,501,),
numpy.zeros(501)+r,ls='-',color='0.65',zorder=1,lw=0.5)
labels.append(r'$%i$' % int(r*8.))
pyplot.rgrids(radii,labels=labels,angle=-32.5)
bovy_plot.bovy_text(5.785,2.82,r'$\mathrm{kpc}$')
azs= numpy.array([0.,45.,90.,135.,180.,225.,270.,315.])*_DEGTORAD
for az in azs:
ax.plot(numpy.zeros(501)+az,
numpy.linspace(0.,2.8,501),'-',color='0.6',lw=0.5,zorder=1)
#Sun
bovy_plot.bovy_text(0.065,.9075,r'$\odot$')
pyplot.ylim(0.,2.8)
bovy_plot.bovy_end_print(options.plotfile)
开发者ID:jobovy,项目名称:apogee-vcirc-2012,代码行数:101,代码来源:plot_distanceprior.py
示例18: plot_bestfit
#.........这里部分代码省略.........
siga_plate[ii] = numpy.std(data["VHELIO"][indx]) / numpy.sqrt(numpy.sum(indx))
sigerr_plate[ii] = bootstrap_sigerr(data["VHELIO"][indx])
# Calculate plate means and variances from the model
# Load initial parameters from file
savefile = open(args[0], "rb")
params = pickle.load(savefile)
if not options.index is None:
params = params[options.index]
savefile.close()
# params[0]= 245./235.
# params[1]= 8.5/8.
avg_plate_model = numpy.zeros(nlocs)
sig_plate_model = numpy.zeros(nlocs)
for ii in range(nlocs):
# Calculate vlos | los
indx = data["LOCATION"] == locations[ii]
thesedata = data[indx]
thislogpiso = logpiso[indx, :]
if options.dwarf:
thislogpisodwarf = logpisodwarf[indx, :]
else:
thislogpisodwarf = None
vlos = numpy.linspace(-200.0, 200.0, options.nvlos)
pvlos = numpy.zeros(options.nvlos)
if not options.multi is None:
pvlos = multi.parallel_map(
(lambda x: pvlosplate(params, vlos[x], thesedata, df, options, thislogpiso, thislogpisodwarf, iso)),
range(options.nvlos),
numcores=numpy.amin([len(vlos), multiprocessing.cpu_count(), options.multi]),
)
else:
for jj in range(options.nvlos):
print jj
pvlos[jj] = pvlosplate(params, vlos[jj], thesedata, df, options, thislogpiso, thislogpisodwarf, iso)
pvlos -= logsumexp(pvlos)
pvlos = numpy.exp(pvlos)
# Calculate mean and velocity dispersion
avg_plate_model[ii] = numpy.sum(vlos * pvlos)
sig_plate_model[ii] = numpy.sqrt(numpy.sum(vlos ** 2.0 * pvlos) - avg_plate_model[ii] ** 2.0)
# Plot everything
left, bottom, width, height = 0.1, 0.4, 0.8, 0.5
axTop = pyplot.axes([left, bottom, width, height])
left, bottom, width, height = 0.1, 0.1, 0.8, 0.3
axMean = pyplot.axes([left, bottom, width, height])
# left, bottom, width, height= 0.1, 0.1, 0.8, 0.2
# axSig= pyplot.axes([left,bottom,width,height])
fig = pyplot.gcf()
fig.sca(axTop)
pyplot.ylabel(r"$\mathrm{Heliocentric\ velocity}\ [\mathrm{km\ s}^{-1}]$")
pyplot.xlabel(r"$\mathrm{Galactic\ longitude}\ [\mathrm{deg}]$")
pyplot.xlim(0.0, 360.0)
pyplot.ylim(-200.0, 200.0)
nullfmt = NullFormatter() # no labels
axTop.xaxis.set_major_formatter(nullfmt)
bovy_plot.bovy_plot(data["GLON"], data["VHELIO"], "k,", yrange=[-200.0, 200.0], xrange=[0.0, 360.0], overplot=True)
ndata_t = int(math.floor(len(data) / 1000.0))
ndata_h = len(data) - ndata_t * 1000
bovy_plot.bovy_plot(l_plate, avg_plate, "o", overplot=True, mfc="0.5", mec="none")
bovy_plot.bovy_plot(l_plate, avg_plate_model, "x", overplot=True, ms=10.0, mew=1.5, color="0.7")
# Legend
bovy_plot.bovy_plot([260.0], [150.0], "k,", overplot=True)
bovy_plot.bovy_plot([260.0], [120.0], "o", mfc="0.5", mec="none", overplot=True)
bovy_plot.bovy_plot([260.0], [90.0], "x", ms=10.0, mew=1.5, color="0.7", overplot=True)
bovy_plot.bovy_text(270.0, 145.0, r"$\mathrm{data}$")
bovy_plot.bovy_text(270.0, 115.0, r"$\mathrm{data\ mean}$")
bovy_plot.bovy_text(270.0, 85.0, r"$\mathrm{model\ mean}$")
bovy_plot._add_ticks()
# Now plot the difference
fig.sca(axMean)
bovy_plot.bovy_plot([0.0, 360.0], [0.0, 0.0], "-", color="0.5", overplot=True)
bovy_plot.bovy_plot(l_plate, avg_plate - avg_plate_model, "ko", overplot=True)
pyplot.errorbar(
l_plate, avg_plate - avg_plate_model, yerr=siga_plate, marker="o", color="k", linestyle="none", elinestyle="-"
)
pyplot.ylabel(r"$\bar{V}_{\mathrm{data}}-\bar{V}_{\mathrm{model}}$")
pyplot.ylim(-14.5, 14.5)
pyplot.xlim(0.0, 360.0)
bovy_plot._add_ticks()
# axMean.xaxis.set_major_formatter(nullfmt)
pyplot.xlabel(r"$\mathrm{Galactic\ longitude}\ [\mathrm{deg}]$")
pyplot.xlim(0.0, 360.0)
bovy_plot._add_ticks()
# Save
bovy_plot.bovy_end_print(options.plotfilename)
return None
# Sigma
fig.sca(axSig)
pyplot.plot([0.0, 360.0], [1.0, 1.0], "-", color="0.5")
bovy_plot.bovy_plot(l_plate, sig_plate / sig_plate_model, "ko", overplot=True)
pyplot.errorbar(
l_plate,
sig_plate / sig_plate_model,
yerr=sigerr_plate / sig_plate_model,
marker="o",
color="k",
linestyle="none",
elinestyle="-",
)
pyplot.ylabel(r"$\sigma_{\mathrm{los}}^{\mathrm{data}}/ \sigma_{\mathrm{los}}^{\mathrm{model}}$")
pyplot.ylim(0.5, 1.5)
开发者ID:jobovy,项目名称:apogee-vcirc-2012,代码行数:101,代码来源:plot_bestfit.py
示例19: map_vc_like_simple
def map_vc_like_simple(parser):
"""
NAME:
map_vc_like_simple
PURPOSE:
map the vc likelihood assuming knowledge of the DF
INPUT:
parser - from optparse
OUTPUT:
stuff as specified by the options
HISTORY:
2011-04-20 - Written - Bovy (NYU)
"""
(options,args)= parser.parse_args()
if len(args) == 0:
parser.print_help()
sys.exit(-1)
#Set up DF
dfc= dehnendf(beta=0.,profileParams=(options.rd,options.rs,options.so),
correct=True,niter=20)
#Load data
picklefile= open(args[0],'rb')
out= pickle.load(picklefile)
picklefile.close()
ndata= len(out)
if options.linearfit:
plot_linear(out,options.los*_DEGTORAD,options,dfc)
return None
#Map likelihood
vcirc= nu.linspace(options.vmin,options.vmax,options.nvcirc)
if not options.nbeta is None:
betas= nu.linspace(options.betamin,options.betamax,options.nbeta)
like= nu.zeros((options.nvcirc,options.nbeta))
for ii in range(options.nvcirc):
for kk in range(options.nbeta):
thislike= 0.
for jj in range(ndata):
thislike+= single_vlos_loglike(vcirc[ii],out[jj],dfc,
options,
options.los*_DEGTORAD,
beta=betas[kk])
like[ii,kk]= thislike
like-= logsumexp(like.flatten())+m.log(vcirc[1]-vcirc[0])
bovy_plot.bovy_print()
bovy_plot.bovy_dens2d(nu.exp(like).T,
origin='lower',
xrange=[options.vmin,options.vmax],
yrange=[options.betamin,options.betamax],
aspect=(options.vmax-options.vmin)/\
(options.betamax-options.betamin),
cmap='gist_yarg',
xlabel=r'$v_c / v_0$',
ylabel=r'$\beta$',
contours=True,cntrmass=True,
levels=[0.682,0.954,0.997])
bovy_plot.bovy_text(r'$\sigma_R(R_0) = %4.2f \ v_0$' % options.so\
+'\n'+\
r'$l = %i^\circ$' % round(options.los),
top_left=True)
bovy_plot.bovy_end_print(options.plotfilename)
else:
like= nu.zeros(options.nvcirc)
for ii in range(options.nvcirc):
thislike= 0.
for jj in range(ndata):
thislike+= single_vlos_loglike(vcirc[ii],out[jj],dfc,options,
options.los*_DEGTORAD)
like[ii]= thislike
like-= logsumexp(like)+m.log(vcirc[1]-vcirc[0])
#Calculate mean and sigma
vcmean= nu.sum(vcirc*nu.exp(like)*(vcirc[1]-vcirc[0]))
vc2mean= nu.sum(vcirc**2.*nu.exp(like)*(vcirc[1]-vcirc[0]))
#Plot
bovy_plot.bovy_print()
bovy_plot.bovy_plot(vcirc,nu.exp(like),'k-',xlabel=r'$v_c / v_0$',
ylabel=r'$p(\mathrm{data} | v_c)$')
bovy_plot.bovy_text(r'$\langle v_c \rangle = %4.2f \ v_0$' % vcmean +'\n'+
r'$\sqrt{\langle v_c^2 \rangle - \langle v_c \rangle^2} = %4.2f \ v_0$' % (m.sqrt(vc2mean-vcmean**2.)) +'\n'+\
r'$\sigma_R(R_0) = %4.2f \ v_0$' % options.so+'\n'+\
r'$l = %i^\circ$' % round(options.los),
top_left=True)
bovy_plot.bovy_end_print(options.plotfilename)
开发者ID:jobovy,项目名称:apogee-vcirc-2012,代码行数:82,代码来源:map_vc_like_simple.py
示例20: createFakeData
#.........这里部分代码省略.........
logpiso[ii,:]= iso[0][indx](numpy.zeros(_BINTEGRATENBINS)+(data['J0MAG']-data['K0MAG'])[ii],mh)
else:
logpiso[ii,:]= iso[0](numpy.zeros(_BINTEGRATENBINS)
+(data['J0MAG']-data['K0MAG'])[ii],mh)
if options.dwarf:
logpisodwarf= numpy.zeros((len(data),_BINTEGRATENBINS))
dwarfds= numpy.linspace(_BINTEGRATEDMIN_DWARF,_BINTEGRATEDMAX_DWARF,
_BINTEGRATENBINS)
dm= _dm(dwarfds)
for ii in range(len(data)):
mh= data['H0MAG'][ii]-dm
logpisodwarf[ii,:]= iso[1](numpy.zeros(_BINTEGRATENBINS)
|
请发表评论