本文整理汇总了Python中numpy.histogramdd函数的典型用法代码示例。如果您正苦于以下问题:Python histogramdd函数的具体用法?Python histogramdd怎么用?Python histogramdd使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了histogramdd函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: bin_sum
def bin_sum(r, f, bins=10):
"""Binned sum of function f(r)
Parameters:
r: independent variable to be binned over
f: function to be summed
bins: (default 10): number of bins or bin edges `len(nbins)+1`
Returns:
total: the total value per bin
count: number of values summed per bin (histogram)
bins: bin edges
"""
multi = isinstance(f, tuple)
if bins is 1:
if r.dtype.kind not in 'iu':
assert np.allclose(r, np.around(r)), 'need integer array for bins=1'
print 'converting to int array'
r = r.astype(int)
count = np.bincount(r)
if multi:
total = [np.bincount(r, weights=fi) for fi in f]
else:
total = np.bincount(r, weights=f)
bins = np.arange(len(count)+1)
else:
count, bins = np.histogramdd(r, bins)
if multi:
total = [np.histogramdd(r, bins, weights=fi)[0] for fi in f]
else:
total = np.histogramdd(r, bins, weights=f)[0]
if len(bins) == 1:
bins = bins[0]
return total, count.astype(int), bins
开发者ID:leewalsh,项目名称:square-tracking,代码行数:34,代码来源:correlation.py
示例2: myhist
def myhist(x,weights=None,**histkw):
"""
Multidimensional histogram with option for multidimensional weights
"""
# Pars input
x=mmlpars.mml_pars(x,type=np.ndarray)
xshp=x.shape ; N=xshp[0]
# Histogram w/o weights
if weights==None: hist,bins=np.histogramdd(x,**histkw)
# Histogram w/ weights
else:
wshp=weights.shape
# Histogram w/ scaler weights
if wshp==(N,): hist,bins=np.histogramdd(x,weights=weights,**histkw)
# Histogram w/ vector weights
else:
# Handle errors
if wshp[0]!=N: raise Exception('Weights must have same size first dimension as data. (data.shape={},weights.shape={})'.format(xshp,wshp))
if len(wshp)>2: raise Exception('Weights with more than 2 dimensions not supported. (weights.shape={})'.format(wshp))
# Get histograms
owshp=tuple(list(histlist[0].shape)+[wshp[1]])
hist=np.zeros(owshp)
for iw in range(wshp[1]):
ihist,bins=np.histogramdd(x,weights=weights[:,iw],**histkw)
if iw==0: histkw['bins']=bins
if len(xshp)==1: hist[:,iw]=ihist
elif len(xshp)==2: hist[:,:,iw]=ihist
elif len(xshp)==3: hist[:,:,:,iw]=ihist
elif len(xshp)==4: hist[:,:,:,:,iw]=ihist
else: raise Exception('Multidimensional weights only supported for data with <4 dimensions. (data.shape={})'.format(xshp))
# Return output
return hist,bins
开发者ID:cfh5058,项目名称:mmlpy,代码行数:32,代码来源:mmlmath.py
示例3: grid_21cm
def grid_21cm(self,data=None):
deltaT=np.empty_like(data)
for i in np.arange(freq.shape[0]):
deltaT[i]=data[i]-data[i].mean()
nn=np.histogramdd(self.DPM,bins=(self.bin_x,self.bin_y,self.bin_z))[0]
T,edges=np.histogramdd(self.DPM,bins=(self.bin_x,self.bin_y,self.bin_z),weights=deltaT.reshape(-1))
return T
开发者ID:POFK,项目名称:ICA_learning,代码行数:7,代码来源:getGridPk.py
示例4: bin_by_mean
def bin_by_mean(lon, lat, z, bins=10, range=None):
bins = bins[::-1]
range = range[::-1]
w_sum, _ = np.histogramdd((lat, lon), weights=z, bins=bins, range=range)
n_pts, edges = np.histogramdd((lat, lon), bins=bins, range=range)
n_pts[n_pts==0] = np.nan
return (w_sum/n_pts), n_pts, edges[1], edges[0]
开发者ID:fspaolo,项目名称:code,代码行数:7,代码来源:bindata2.py
示例5: test3
def test3(self):
print "Testing the user interface, random base_distribution_type"
number_of_particles = 10000
test_grid = self.setup_simple_grid()
sph_particles = convert_grid_to_SPH(test_grid, number_of_particles,
base_distribution_type = "random", seed = 12345)
self.assertEqual(len(sph_particles), number_of_particles)
self.assertAlmostEqual(sph_particles.mass.sum(), 1.5 | units.kg)
self.assertAlmostEqual(sph_particles.velocity, [3.0, 4.0, 0.0] | units.m/units.s)
self.assertAlmostEqual(sph_particles.u, 1.0 | (units.m/units.s)**2)
# For 'random', the number of particles in a cell should scale only on average
# with the amount of mass in the cell:
self.assertAlmostRelativeEqual(
((1.5 | units.kg)/number_of_particles * numpy.histogramdd(
sph_particles.position.value_in(units.m), bins=(4,3,2))[0]).sum(),
(test_grid.rho*test_grid.cellsize().prod()).sum(),
places = 2
)
self.assertRaises(AssertionError,
self.assertAlmostRelativeEqual,
(1.5 | units.kg)/number_of_particles * numpy.histogramdd(sph_particles.position.value_in(units.m), bins=(4,3,2))[0],
test_grid.rho*test_grid.cellsize().prod(),
places = 2,
)
self.assertAlmostEqual(sph_particles.h_smooth, (50.0/number_of_particles)**(1.0/3) | units.m)
开发者ID:Ingwar,项目名称:amuse,代码行数:25,代码来源:test_grid_to_sph.py
示例6: __init__
def __init__(self, samples, recovery, bins=32, range=None,
transit_lnprob_function=None):
# Make sure that the samples have the correct format.
samples = np.atleast_2d(samples)
# Compute the recovery and injection histograms.
img_all, self.bins = np.histogramdd(samples, bins=bins, range=range)
img_yes, tmp = np.histogramdd(samples[recovery], bins=self.bins)
self.setup()
# Compute the completeness asserting zero completeness where there
# were no injections.
lncompleteness = -np.inf + np.zeros(img_yes.shape, dtype=float)
m = img_all > 0
lncompleteness[m] = np.log(img_yes[m]) - np.log(img_all[m])
# Compute the transit probability if a function was given.
if transit_lnprob_function is None:
lnprob = np.array(lncompleteness)
else:
args = np.meshgrid(*(self.bin_centers), indexing="ij")
transit_lnprob = transit_lnprob_function(*args)
lnprob = lncompleteness + transit_lnprob
# Expand the completeness and probability grids to have zeros around
# the edges.
self.lncompleteness = -np.inf + np.zeros(np.array(img_yes.shape)+2,
dtype=float)
self.lncompleteness[[slice(1, -1)] * len(self.bins)] = lncompleteness
self.lnprob = -np.inf + np.zeros(np.array(img_yes.shape)+2,
dtype=float)
self.lnprob[[slice(1, -1)] * len(self.bins)] = lnprob
开发者ID:RuthAngus,项目名称:exopop,代码行数:34,代码来源:population.py
示例7: spline_fit
def spline_fit(data,
bins = None,
range = None,
weights = None,
order = None,
filename = 'spline.fits'):
if bins is None:
bins = data.shape[1]*[10]
counts,bin_arrays = np.histogramdd(data,range=range,weights=weights)
vars,bin_arrays = np.histogramdd(data,range=range,weights=weights**2)
else:
counts,bin_arrays = np.histogramdd(data,bins=bins,range=range,weights=weights)
vars,bin_arrays = np.histogramdd(data,bins=bins,range=range,weights=weights**2)
coords = [(b[1:]+b[:-1])/2. for b in bin_arrays]
if order == None:
order = list(np.zeros_like(bins))
knots = pad_knots(bin_arrays, order)
w = 1./np.sqrt(vars)
w[~np.isfinite(w)] = np.nanmin(w)
result = glam.fit(counts,w,coords,knots,order,0)
if not filename is None:
if os.path.exists(filename):
os.system('rm '+filename)
if filename[-5:]=='.fits':
splinefitstable.write(result,filename)
else:
splinefitstable.write(result,filename+'.fits')
return result
开发者ID:gabinder,项目名称:pyllh,代码行数:34,代码来源:spline.py
示例8: transfer_entropy
def transfer_entropy(ts1, ts2, lag=2, bins=5):
""" D_1<-2 """
ts1, lts1 = multi_lag(ts1, lag)
ts2, lts2 = multi_lag(ts2, lag)
# P(i_n+1, i_(n), j_(n))
joint = np.histogramdd([ts1] + lts1 + lts2, bins=bins)[0]
joint = normalize(joint)
# P(i_n+1, i_(n))
auto = np.histogramdd([ts1] + lts1, bins=bins)[0]
auto = normalize(auto)
# P(i_(n))
lag1 = np.histogramdd(lts1, bins=bins)[0]
lag1 = normalize(lag1)
# P(i_(n), j_(n))
lag12 = np.histogramdd(lts1 + lts2, bins=bins)[0]
lag12 = normalize(lag12)
# P(i_n+1 | i_(n), j_(n))
jcond = np.divide(joint.T, lag12.T).T
jcond = clean(jcond)
jcond = do_cpdf(jcond.T, avg_zeros).T
# P(i_n+1 | i_(n))
acond = np.divide(auto.T, lag1.T).T
acond = clean(acond)
acond = do_cpdf(acond.T, avg_zeros).T
# E[log P(i_n+1 | i_(n), j_(n)) / P(i_n+1 | i_(n))]
transfer = joint * clean(np.log(np.divide(jcond, acond)))
return transfer.sum()
开发者ID:caffeine-xx,项目名称:pyglpp,代码行数:28,代码来源:information.py
示例9: MeasureColorVector
def MeasureColorVector(self,img,HistSizes):
""" Returns a color vector obtained by histogram
of number of bins mentioned in HistSizes for each color layer """
ImgX,ImgY,ImgZ = img.shape
# First define masks for the partions in the image
Mask0 = np.zeros(img.shape)
Mask0[ImgX/3:2*ImgX/3,ImgY/3:2*ImgY/3,:] = 1 # Central 1/3 region rectangle
Vector0 = np.histogramdd(img[Mask0], bins=HistSizes, normed=True)
Mask1 = np.zeros(img.shape)
Mask1[0:ImgX/2,0:ImgY/2,:] = 1 # 1st quadrent
Mask1[0:ImgX/2,0:ImgY/2,:] -= Mask0[0:ImgX/2,0:ImgY/2,:]
Vector1 = np.histogramdd(img[Mask1], bins=HistSizes, normed=True)
Mask2 = np.zeros(img.shape)
Mask2[ImgX/2:,0:ImgY/2,:] = 1 # 2nd quadrent
Mask2[ImgX/2:,0:ImgY/2,:] -= Mask0[ImgX/2:,0:ImgY/2,:]
Vector2 = np.histogramdd(img[Mask2], bins=HistSizes, normed=True)
Mask3 = np.zeros(img.shape)
Mask3[0:ImgX/2,ImgY/2:,:] = 1 # 3rd quadrent
Mask3[0:ImgX/2,ImgY/2:,:] -= Mask0[0:ImgX/2,ImgY/2:,:]
Vector3 = np.histogramdd(img[Mask3], bins=HistSizes, normed=True)
Mask4 = np.zeros(img.shape)
Mask4[ImgX/2:,ImgY/2:,:] = 1 # 4th quadrent
Mask4[ImgX/2:,ImgY/2:,:] -= Mask0[ImgX/2:,ImgY/2:,:]
Vector4 = np.histogramdd(img[Mask4], bins=HistSizes, normed=True)
return np.concatenate((Vector0,Vector1,Vector2,Vector3,Vector4))
开发者ID:indiajoe,项目名称:PhotographyScripts,代码行数:31,代码来源:GroupImages.py
示例10: Get_2DTProfile
def Get_2DTProfile(ar1, ar2, ar3, nbBinsX, nbBinsY,we):
'''
'''
d = numpy.array(zip(ar1,ar2,ar3))
number, axis = numpy.histogramdd( d, (nbBinsX,nbBinsY,1))
weight, axis = numpy.histogramdd( d, (nbBinsX,nbBinsY,1), weights=we )
mean, axis = numpy.histogramdd( d, (nbBinsX,nbBinsY,1), weights=we*ar3)
err, axis = numpy.histogramdd( d, (nbBinsX,nbBinsY,1), weights=we*(ar3**2.))
mean /= weight
err = numpy.sqrt((err/weight-mean**2.)/number)
mean = mean[:,:,0]
err = err[:,:,0]
number = number[:,:,0]
### find the axis X
#axisX = axis[0]
#axisX = numpy.array([ axisX[i]+(axisX[i+1]-axisX[i])/2. for i in range(0,axisX.size-1) ])
### find the axis Y
#axisY = axis[1]
#axisY = numpy.array([ axisY[i]+(axisY[i+1]-axisY[i])/2. for i in range(0,axisY.size-1) ])
### For test look at the histo
#plt.imshow(mean,origin='lower',extent=[0., 10., 0., 10.],interpolation='None')
#cbar = plt.colorbar()
#plt.show()
return mean, err, number
开发者ID:londumas,项目名称:CrossCorrelation,代码行数:30,代码来源:myTools.py
示例11: chi2div
def chi2div(y, x, idx1, idx2, edges):
n1, edges1 = np.histogramdd([y[idx1], x[idx1]], bins=edges)
n2, edges2 = np.histogramdd([y[idx2], x[idx2]], bins=edges)
chi2d = sum(len(idx1)*pow(n1 - n2, 2)/(2*n2*(len(idx1)-n2)))
#chi2 = m*(n1 - n2).^2./(2*n2.*(m-n2));
return chi2d
开发者ID:Anastasia874,项目名称:SampleSize,代码行数:8,代码来源:calcSampleSize.py
示例12: bin_by_mean
def bin_by_mean(lon, lat, z, bins=10, range=None):
bins = bins[::-1]
range = range[::-1]
wsum, _ = np.histogramdd((lat, lon), weights=z, bins=bins, range=range)
ppbin, edges = np.histogramdd((lat, lon), bins=bins, range=range)
#ppbin[ppbin==0] = np.nan
#ppbin = np.ma.masked_equal(ppbin, 0)
return (wsum/ppbin), ppbin, edges[1], edges[0]
开发者ID:fspaolo,项目名称:code,代码行数:8,代码来源:funcs.py
示例13: test_filter_minmax
def test_filter_minmax(self):
"""
"""
result_c = histogramnd(self.sample,
self.histo_range,
self.n_bins,
weights=self.weights,
last_bin_closed=True,
weight_min=self.filter_min,
weight_max=self.filter_max)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!
filter_min = self.dtype_weights(self.filter_min)
filter_max = self.dtype_weights(self.filter_max)
weight_idx = _get_in_range_indices(self.weights,
filter_min, # <------ !!!
filter_max, # <------ !!!
minop=operator.ge,
maxop=operator.le)
result_np = np.histogramdd(self.sample[weight_idx],
bins=self.n_bins,
range=self.histo_range)
result_np_w = np.histogramdd(self.sample[weight_idx],
bins=self.n_bins,
range=self.histo_range,
weights=self.weights[weight_idx])
# comparing "hits"
hits_cmp = np.array_equal(result_c[0],
result_np[0])
# comparing weights
weights_cmp = np.array_equal(result_c[1], result_np_w[0])
self.assertTrue(hits_cmp)
self.assertTrue(weights_cmp)
bins_min = [rng[0] for rng in self.histo_range]
bins_max = [rng[1] for rng in self.histo_range]
inrange_idx = _get_in_range_indices(self.sample[weight_idx],
bins_min,
bins_max,
minop=operator.ge,
maxop=operator.le)
inrange_idx = weight_idx[inrange_idx]
self.assertEqual(result_c[0].sum(), len(inrange_idx),
msg=self.state_msg)
# we have to sum the weights using the same precision as the
# histogramnd function
weights_sum = self.weights[inrange_idx].astype(result_c[1].dtype).sum()
self.assertTrue(self.array_compare(result_c[1].sum(), weights_sum),
msg=self.state_msg)
开发者ID:CaptainNemoz,项目名称:silx,代码行数:58,代码来源:test_histogramnd_vs_np.py
示例14: feature_dist
def feature_dist(self):
pcounts, e = np.histogramdd(self.pfeatures.view((np.float64, len(self.pfeatures.dtype.names))), bins=self.edges[:-1])
hcounts, e = np.histogramdd(self.hfeatures.view((np.float64, len(self.hfeatures.dtype.names))), bins=self.edges[:-1])
#Probability that a halo is present at a particle with features in a particular bin of feature space
#is the number of halos in that bin of feature space over the number of particles in that bin of
#feature space
self.php = hcounts/pcounts
开发者ID:j-dr,项目名称:ADDHALOS,代码行数:10,代码来源:model.py
示例15: test_inf_edges
def test_inf_edges(self):
"""Test using +/-inf bin edges works. See #1788."""
x = np.arange(6).reshape(3, 2)
expected = np.array([[1, 0], [0, 1], [0, 1]])
h, e = np.histogramdd(x, bins=[3, [-np.inf, 2, 10]])
assert_allclose(h, expected)
h, e = np.histogramdd(x, bins=[3, np.array([-1, 2, np.inf])])
assert_allclose(h, expected)
h, e = np.histogramdd(x, bins=[3, [-np.inf, 3, np.inf]])
assert_allclose(h, expected)
开发者ID:AJMartel,项目名称:3DPrinter,代码行数:10,代码来源:test_function_base.py
示例16: plot_lum
def plot_lum(self):
#print 'reading file'
#col = rdcol.read('./catalogs/fg_lum_cat/lum_catalog.dat',1,2)
print 'rounding'
self.z_li = np.round(self.z_l,3)*1000-100
self.z_li = self.z_li.astype(int)
print 'size of self.tmag_l '+str(self.tmag_l.shape)
print 'size of self.z_l '+str(self.z_li.shape)
print 'self.z_l'
print self.z_l[0:50]
print 'self.z_li'
print self.z_li[0:50]
print 'self.distances[z_l]'
print self.distances[self.z_li[0:50]]
self.absmag = 5.0 + self.tmag_l - 5.0*np.log10(self.distances[self.z_li])
self.lum_l = 10**((4.65-self.absmag)/2.5)#AB system SDSS r-band Solar AbsMagnitude
#self.lum_l = np.asarray(col['luminosity'])
#self.dec_l = np.asarray(col['dec'])
#self.ra_l = np.asarray(col['ra'])
self.lum_l = self.lum_l - np.mean(self.lum_l.ravel()) #Subtract out mean
self.kappa = self.kappa - np.mean(self.kappa.ravel())
print 'pixelizing'
self.lum2dw, edges = np.histogramdd(np.array([self.dec_l,self.ra_l]).T,
bins=(self.bin_dec, self.bin_ra),
weights=self.lum_l)
self.grid, edges = np.histogramdd(np.array([self.dec_l,self.ra_l]).T,
bins=(self.bin_dec, self.bin_ra))
self.lum2d = self.lum2dw/self.grid
#self.tmag2d, edges = np.histogramdd(np.array([self.dec_l,self.ra_l]).T,
# bins=(self.bin_dec, self.bin_ra),
# weights=self.tmag_l)
self.kappa2dw, edges = np.histogramdd(np.array([self.dec_l,self.ra_l]).T,
bins=(self.bin_dec, self.bin_ra),
weights=self.kappa)
self.kappa2d = self.kappa2dw/self.grid
self.mass2d = self.get_mass_from_kappa()
self.mass2d = self.mass2d - np.mean(self.mass2d.ravel())
print 'saving'
self.save_fits_image(self.lum2d,'/home/dbrout/bccml/maps/luminosity/lum_density'+str(self.file_root)+'.fits')
#self.save_fits_image(self.tmag2d,'/home/dbrout/bccml/maps/luminosity/mag_density.fits')
self.save_fits_image(self.kappa2d,'/home/dbrout/bccml/maps/luminosity/kappa_density'+str(self.file_root)+'.fits')
self.save_fits_image(self.mass2d,'/home/dbrout/bccml/maps/luminosity/mass'+str(self.file_root)+'.fits')
print 'plotting 3'
#plt.figure()
#n, bins, patches = plt.hist(self.lum2d[(self.lum2d < .4*10**10) & (self.lum2d > 1001.0)],100,log=True, histtype='bar')
#plt.xlabel('Solar Luminosity')
#plt.ylabel('# of Pixels')
#print 'saving'
#plt.savefig('/home/dbrout/bccml/maps/luminosity/lum_hist.png')
return
开发者ID:djbrout,项目名称:bccml,代码行数:55,代码来源:makelightmapv2.py
示例17: test_inf_edges
def test_inf_edges(self):
# Test using +/-inf bin edges works. See #1788.
with np.errstate(invalid='ignore'):
x = np.arange(6).reshape(3, 2)
expected = np.array([[1, 0], [0, 1], [0, 1]])
h, e = np.histogramdd(x, bins=[3, [-np.inf, 2, 10]])
assert_allclose(h, expected)
h, e = np.histogramdd(x, bins=[3, np.array([-1, 2, np.inf])])
assert_allclose(h, expected)
h, e = np.histogramdd(x, bins=[3, [-np.inf, 3, np.inf]])
assert_allclose(h, expected)
开发者ID:dpritsos,项目名称:DoGSWrapper,代码行数:11,代码来源:test_histograms.py
示例18: mopso
def mopso(f, in_dimensions, pop_size, repo_size, grid_partitions=10, w=0.4):
population = np.array([[np.random.random() * (upper_bound - lower_bound) + lower_bound for (upper_bound, lower_bound) in zip(upper_bounds, lower_bounds)] for _ in range(pop_size)])
population_velocity = np.zeros((pop_size, in_dimensions))
objectives = np.apply_along_axis(g, 1, population)
repository, repository_objectives = get_champions(population, objectives)
best_historical_individuals = np.copy(population)
best_historical_objectives = np.copy(objectives)
w = 0.4
bins = np.array([np.linspace(lower_bounds[i], upper_bounds[i], num=grid_partitions + 1) for i in range(in_dimensions)])
for _ in range(iterations):
print "Iteration %d" % _
coarse_map = np.histogramdd(population, bins=bins)[0]
population_loneliness = 1.0 / np.array([coarse_map[tuple(np.array([min(max(np.digitize(np.array([coordinate]), bin)[0], 0), grid_partitions) for coordinate, bin in zip(population[i], bins)]) - 1)] for i in range(len(population))])
bests = np.random.choice(pop_size, pop_size, p=population_loneliness/np.sum(population_loneliness))
population_velocity = w * population_velocity + np.random.random(pop_size)[:, np.newaxis] * (best_historical_individuals - population) + np.random.random(pop_size)[:, np.newaxis] * (population[bests] - population)
population = np.clip(population + population_velocity, lower_bounds, upper_bounds)
objectives = np.apply_along_axis(g, 1, population)
champions, champions_objectives = get_champions(population, objectives)
repository, repository_objectives = get_champions(repository + champions, repository_objectives + champions_objectives)
if len(repository) > repo_size:
coarse_map = np.histogramdd(population, bins=bins)[0]
population_loneliness = 1.0 / np.array([coarse_map[tuple(np.array([min(max(np.digitize(np.array([coordinate]), bin)[0], 0), grid_partitions) for coordinate, bin in zip(population[i], bins)]) - 1)] for i in range(len(population))])
surviving_indices = np.argsort(population_loneliness)[:repo_size]
repository = [repository[i] for i in surviving_indices]
repository_objectives = [repository_objectives[i] for i in surviving_indices]
for i in range(len(population)):
dominance_ = dominance(objectives[i], best_historical_objectives[i])
if dominance_ == 1 or (dominance_ == 0 and np.random.random() > 0.5):
best_historical_individuals[i] = population[i]
best_historical_objectives[i] = objectives[i]
# pl.clf()
# pl.plot(np.array(repository_objectives)[:, 0], np.array(repository_objectives)[:, 1], "o")
# pl.pause(0.001)
return (np.array(repository), np.array(repository_objectives))
开发者ID:nmiranda,项目名称:eobli,代码行数:53,代码来源:mopso.py
示例19: test_reuse_cumul_float
def test_reuse_cumul_float(self):
"""
"""
n_bins = np.array(self.n_bins, ndmin=1)
if len(self.sample.shape) == 2:
if len(n_bins) == self.sample.shape[1]:
shp = tuple([x for x in n_bins])
else:
shp = (self.n_bins,) * self.sample.shape[1]
cumul = np.zeros(shp, dtype=np.float32)
else:
shp = (self.n_bins,)
cumul = np.zeros(shp, dtype=np.float32)
result_c_1 = histogramnd(self.sample,
self.histo_range,
self.n_bins,
weights=self.weights,
last_bin_closed=True,
weighted_histo=cumul)
result_np_1 = np.histogramdd(self.sample,
bins=self.n_bins,
range=self.histo_range)
result_np_w_1 = np.histogramdd(self.sample,
bins=self.n_bins,
range=self.histo_range,
weights=self.weights)
# comparing "hits"
hits_cmp = np.array_equal(result_c_1[0],
result_np_1[0])
self.assertTrue(hits_cmp, msg=self.state_msg)
self.assertEqual(result_c_1[1].dtype, np.float32, msg=self.state_msg)
bins_min = [rng[0] for rng in self.histo_range]
bins_max = [rng[1] for rng in self.histo_range]
inrange_idx = _get_in_range_indices(self.sample,
bins_min,
bins_max,
minop=operator.ge,
maxop=operator.le)
weights_sum = \
self.weights[inrange_idx].astype(np.float32).sum(dtype=np.float64)
self.assertTrue(np.allclose(result_c_1[1].sum(dtype=np.float64),
weights_sum), msg=self.state_msg)
self.assertTrue(np.allclose(result_c_1[1].sum(dtype=np.float64),
result_np_w_1[0].sum(dtype=np.float64)),
msg=self.state_msg)
开发者ID:CaptainNemoz,项目名称:silx,代码行数:52,代码来源:test_histogramnd_vs_np.py
示例20: bhattacharyya_coefficient_discrete
def bhattacharyya_coefficient_discrete(data1, data2, bins=10):
'''
Computing Bhattacharyya coefficient using (multidimensional)
histograms.
'''
hist_range = zip(np.minimum(np.min(data1, axis=0), np.min(data2, axis=0)),
np.maximum(np.max(data1, axis=0), np.max(data2, axis=0)))
bins_total_volume = np.prod([ma-mi for mi, ma in hist_range])
hist1, _ = np.histogramdd(data1, bins=bins, range=hist_range, normed=True)
hist2, _ = np.histogramdd(data2, bins=bins, range=hist_range, normed=True)
return np.mean(np.sqrt(hist1*hist2))*bins_total_volume
开发者ID:kjohnsson,项目名称:modality,代码行数:13,代码来源:ClustData.py
注:本文中的numpy.histogramdd函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论