本文整理汇总了Python中numpy.ceil函数的典型用法代码示例。如果您正苦于以下问题:Python ceil函数的具体用法?Python ceil怎么用?Python ceil使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ceil函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, dataset_size, batch_size, num_batches=None, rng=None):
self._rng = make_np_rng(rng, which_method=["random_integers",
"shuffle"])
assert num_batches is None or num_batches >= 0
self._dataset_size = dataset_size
if batch_size is None:
if num_batches is not None:
batch_size = int(np.ceil(self._dataset_size / num_batches))
else:
raise ValueError("need one of batch_size, num_batches "
"for sequential batch iteration")
elif batch_size is not None:
if num_batches is not None:
max_num_batches = np.ceil(self._dataset_size / batch_size)
if num_batches > max_num_batches:
raise ValueError("dataset of %d examples can only provide "
"%d batches with batch_size %d, but %d "
"batches were requested" %
(self._dataset_size, max_num_batches,
batch_size, num_batches))
else:
num_batches = np.ceil(self._dataset_size / batch_size)
self._batch_size = batch_size
self._num_batches = int(num_batches)
self._next_batch_no = 0
self._idx = 0
self._batch_order = list(range(self._num_batches))
self._rng.shuffle(self._batch_order)
开发者ID:dwf,项目名称:pylearn2,代码行数:29,代码来源:iteration.py
示例2: max_lm
def max_lm(baselines, wavelengths, uwidth, vwidth=0.0):
"""Get the maximum (l,m) that a baseline is sensitive to.
Parameters
----------
baselines : np.ndarray
An array of baselines.
wavelengths : np.ndarray
An array of wavelengths.
uwidth : np.ndarray
Width of the receiver in the u-direction.
vwidth : np.ndarray
Width of the receiver in the v-direction.
Returns
-------
lmax, mmax : array_like
"""
umax = (np.abs(baselines[:, 0]) + uwidth) / wavelengths
vmax = (np.abs(baselines[:, 1]) + vwidth) / wavelengths
mmax = np.ceil(2 * np.pi * umax).astype(np.int64)
lmax = np.ceil((mmax**2 + (2*np.pi*vmax)**2)**0.5).astype(np.int64)
return lmax, mmax
开发者ID:TianlaiProject,项目名称:tlpipe,代码行数:26,代码来源:telescope.py
示例3: _filter_ridge_lines
def _filter_ridge_lines(cwt, ridge_lines, window_size=None, min_length=None,
min_snr=1, noise_perc=10):
"""
Filter ridge lines according to prescribed criteria. Intended
to be used for finding relative maxima.
Parameters
-------------
cwt : 2-D ndarray
Continuous wavelet transform from which
the ridge_lines were defined
ridge_lines: 1-D sequence
Each element should contain 2 sequences, the rows and columns
of the ridge line (respectively)
window_size: int, optional
Size of window to use to calculate noise floor.
Default is `cwt`.shape[1]/20
min_length: int, optional
Minimum length a ridge line needs to be acceptable.
Default is `cwt`.shape[0]/4, ie 1/4th the number of widths.
min_snr: float, optional
Minimum SNR ratio. Default 0. The signal is the value of
the cwt matrix at the shortest length scale (`cwt`[0,loc]), the noise is
the `noise_perc`th percentile of datapoints contained within
a window of `window_size` around `cwt`[0,loc]
noise_perc: float,optional
When calculating the noise floor, percentile of data points
examined below which to consider noise. Calculated using
scipy.stats.scoreatpercentile.
References
----------
Bioinformatics (2006) 22 (17): 2059-2065. doi: 10.1093/bioinformatics/btl355
http://bioinformatics.oxfordjournals.org/content/22/17/2059.long
"""
num_points = cwt.shape[1]
if min_length is None:
min_length = np.ceil(cwt.shape[0] / 4)
if window_size is None:
window_size = np.ceil(num_points / 20)
hf_window = window_size / 2
#Filter based on SNR
row_one = cwt[0, :]
noises = np.zeros_like(row_one)
for ind, val in enumerate(row_one):
window = np.arange(max([ind - hf_window, 0]), min([ind + hf_window, num_points]))
window = window.astype(int)
noises[ind] = scoreatpercentile(row_one[window], per=noise_perc)
def filt_func(line):
if len(line[0]) < min_length:
return False
snr = abs(cwt[line[0][0], line[1][0]] / noises[line[1][0]])
if snr < min_snr:
return False
return True
return filter(filt_func, ridge_lines)
开发者ID:AmitAronovitch,项目名称:scipy,代码行数:60,代码来源:_peak_finding.py
示例4: setupFakePulsar
def setupFakePulsar(nodes=range(1, 9), fpgaclk=360e6, frqs=cfs, sideband=-1):
n = np.arange(8)
clk = fpgaclk
if frqs is None:
frqs = (
18e9
- (np.ceil(150e6 / (clk * 4 / 1024.0)) * clk * 4 / 1024.0)
+ ((clk * 2) * (2 * n + 1))
- ((np.ceil(150e6 / (clk * 4 / 1024.0)) * clk * 4 / 1024.0) * n)
)
frqd = dict(zip(n + 1, frqs))
esr = fpgaclk * 8 # effective sample rate
pfb_rate = sideband * esr / (2 * 1024.0)
for node in nodes:
vsd[node].setParams(
EFSAMPFR=esr,
NCHAN=1024,
EXPOSURE=1e-6,
SUB0FREQ=frqd[node],
OBSFREQ=frqd[node],
CHAN_BW=pfb_rate,
FPGACLK=fpgaclk,
) # exposure should be ~0 to get every single spectrum
pass
开发者ID:gitj,项目名称:vegas_devel,代码行数:26,代码来源:vtest.py
示例5: affine_grid
def affine_grid(self,Hz,rhoz,Lam):
"""
Get data on regular spatial grid
"""
#First find dimensionless density params
Om0 = 8*pi*rhoz[0]/(3*Hz[0]**2)
OL0 = Lam/(3*Hz[0]**2)
Ok0 = 1-Om0-OL0
#Get t0
t0 = self.get_age(Om0,Ok0,OL0,Hz[0])
#Set affine parameter vals
dvo = uvs(self.z,1/(self.uz**2*Hz),k=3,s=0.0)
vzo = dvo.antiderivative()
vz = vzo(self.z)
vz[0] = 0.0
#Compute grid sizes that gives num error od err
NJ = int(ceil(vz[-1]/sqrt(self.err) + 1))
NI = int(ceil(3.0*(NJ - 1)*(t0 - self.tmin)/vz[-1] + 1))
#Get functions on regular grid
v = linspace(0,vz[-1],NJ)
delv = (v[-1] - v[0])/(NJ-1)
if delv > sqrt(self.err):
print 'delv > sqrt(err)'
Ho = uvs(vz,Hz,s=0.0,k=3)
H = Ho(v)
rhoo = uvs(vz,rhoz,s=0.0,k=3)
rho = rhoo(v)
uo = uvs(vz,self.uz,s=0.0,k=3)
u = uo(v)
u[0] = 1.0
return v,vzo,H,rho,u,NJ,NI,delv,Om0,OL0,Ok0,t0
开发者ID:b2themax,项目名称:Copernicus,代码行数:31,代码来源:CIVPSimp.py
示例6: qwtCanvasClip
def qwtCanvasClip(canvas, canvasRect):
x1 = np.ceil(canvasRect.left())
x2 = np.floor(canvasRect.right())
y1 = np.ceil(canvasRect.top())
y2 = np.floor(canvasRect.bottom())
r = QRect(x1, y1, x2-x1-1, y2-y1-1)
return canvas.borderPath(r)
开发者ID:petebachant,项目名称:python-qwt,代码行数:7,代码来源:plot_renderer.py
示例7: each_SASA
def each_SASA(sasas,sort_keys,kcat_cut=30,plot=True,meta=None):
num_sims=len(sort_keys)
labels=label_maker(sasas,kcat_cut=kcat_cut,name_list=sort_keys)
base_size = 20.
wide_factor = 1.5
color_dict={True:'r', False:'g', 'maybe':'b', 'wt':'m'}
ncols = int(np.ceil(np.sqrt(num_sims)))
nrows = int(np.ceil(float(num_sims)/ncols))
fig = plt.figure(figsize=(base_size,base_size*(float(nrows)/ncols)/wide_factor))
gs = gridspec.GridSpec(nrows,ncols,hspace=0.65,wspace=0.8)
axes = [plt.subplot(gs[plot_num/ncols,plot_num%ncols]) for plot_num in range(num_sims)]
max_SASA=0;ts_scaling=0.02
for plot_num,ax in enumerate(axes):
SASA=sasas[sort_keys[plot_num]]
ts_sasa=np.sum([SASA['base_sasa'][res]['sasa_vals'] for res in SASA['base_sasa']],axis=0)
name=SASA['name'];activity=labels[plot_num]
ts = np.array(range(len(ts_sasa)))*ts_scaling
ax.plot(ts,ts_sasa,color=color_dict[activity])
ax.set_title(name)
ax.tick_params(axis='y',which='both',left='off',right='off',labelleft='on')
ax.tick_params(axis='x',which='both',bottom='off',top='off',labelbottom='on')
max_SASA=max(max_SASA,max(ts_sasa))
min_SASA=0
if meta:
meta['kcat cut']=kcat_cut
meta['max sasa']=max_sasa
meta['ts scaling']=ts_scaling
else: meta={'kcat cut':kcat_cut,'max sasa':max_SASA,'ts scaling':ts_scaling}
for plot_num,ax in enumerate(axes):
ax.set_ylim(min_SASA,max_SASA)
if plot:
plt.show(block=False)
else: picturesave('fig.each-%s'%plotname,work.plotdir,backup=False,version=True,meta=meta)
开发者ID:ejjordan,项目名称:analyo,代码行数:34,代码来源:plot-sasa.py
示例8: return_unit_round_neighborhood
def return_unit_round_neighborhood(self, row, col, radius):
"""Return a list with (row, col, distance) of the units around a unit. This version uses a circle as radius, all the element inside the radius are taken as neighborood.
@param row index of the unit
@param col the column index of the unit
@param radius the radius of the distance to consider
"""
output_list = list()
if(radius <= 0): output_list.append((row, col, 0)); return output_list #return empty if radius=0
#Finding the square around the unit
#with wide=radius using the ceil of radius
row_range_min = row - int(np.ceil(radius))
if(row_range_min < 0): row_range_min = 0
row_range_max = row + int(np.ceil(radius))
if(row_range_max >= self._matrix_size): row_range_max = self._matrix_size - 1
col_range_min = col - int(np.ceil(radius))
if(col_range_min < 0): col_range_min = 0
col_range_max = col + int(np.ceil(radius))
if(col_range_max >= self._matrix_size): col_range_max = self._matrix_size - 1
for row_iter in range(row_range_min, row_range_max+1):
for col_iter in range(col_range_min, col_range_max+1):
#Finding the distances from the BMU
col_distance = np.abs(col - col_iter)
row_distance = np.abs(row - row_iter)
#Pitagora's Theorem to estimate distance
distance = np.sqrt( np.power(col_distance,2) + np.power(row_distance,2) )
#Store the unit only if the distance is
#less than the radius
if(distance <= radius): output_list.append((row_iter, col_iter, distance))
return output_list
开发者ID:mpatacchiola,项目名称:pyERA,代码行数:33,代码来源:som.py
示例9: init_log_binned_fx_buckets
def init_log_binned_fx_buckets(self):
# initializes the refex_log_binned_buckets with the vertical log bin values,
# computed based on p and the number of vertices in the graph
max_fx_value = np.ceil(np.log2(self.no_of_vertices) + self.TOLERANCE) # fixing value of p = 0.5,
# In our experiments, we found p = 0.5 to be a sensible choice:
# with each bin containing the bottom half of the remaining nodes.
log_binned_fx_keys = [value for value in xrange(0, int(max_fx_value))]
fx_bucket_size = []
starting_bucket_size = self.no_of_vertices
for idx in np.arange(0.0, max_fx_value):
starting_bucket_size *= self.p
fx_bucket_size.append(int(np.ceil(starting_bucket_size)))
total_slots_in_all_buckets = sum(fx_bucket_size)
if total_slots_in_all_buckets > self.no_of_vertices:
fx_bucket_size[0] -= (total_slots_in_all_buckets - self.no_of_vertices)
log_binned_buckets_dict = dict(zip(log_binned_fx_keys, fx_bucket_size))
for binned_value in sorted(log_binned_buckets_dict.keys()):
for count in xrange(0, log_binned_buckets_dict[binned_value]):
self.refex_log_binned_buckets.append(binned_value)
if len(self.refex_log_binned_buckets) != self.no_of_vertices:
raise Exception("Vertical binned bucket size not equal to the number of vertices!")
开发者ID:pratikgupte,项目名称:pyroles,代码行数:28,代码来源:features.py
示例10: _drawGraticules
def _drawGraticules(self,m,gd):
par = np.arange(np.ceil(gd.ymin),np.floor(gd.ymax)+1,1.0)
mer = np.arange(np.ceil(gd.xmin),np.floor(gd.xmax)+1,1.0)
merdict = m.drawmeridians(mer,labels=[0,0,0,1],fontsize=10,
linewidth=0.5,color='gray',zorder=GRATICULE_ZORDER)
pardict = m.drawparallels(par,labels=[1,0,0,0],fontsize=10,
linewidth=0.5,color='gray',zorder=GRATICULE_ZORDER)
#loop over meridian and parallel dicts, change/increase font, draw ticks
xticks = []
for merkey,mervalue in merdict.items():
merline,merlablist = mervalue
merlabel = merlablist[0]
merlabel.set_family('sans-serif')
merlabel.set_fontsize(12.0)
xticks.append(merline[0].get_xdata()[0])
yticks = []
for parkey,parvalue in pardict.items():
parline,parlablist = parvalue
parlabel = parlablist[0]
parlabel.set_family('sans-serif')
parlabel.set_fontsize(12.0)
yticks.append(parline[0].get_ydata()[0])
#plt.tick_params(axis='both',color='k',direction='in')
plt.xticks(xticks,())
plt.yticks(yticks,())
m.ax.tick_params(direction='out')
开发者ID:klin-usgs,项目名称:shakemap,代码行数:29,代码来源:mapmaker.py
示例11: sample_size_necessary_under_cph
def sample_size_necessary_under_cph(power, ratio_of_participants, p_exp, p_con,
postulated_hazard_ratio, alpha=0.05):
"""
This computes the sample size for needed power to compare two groups under a Cox
Proportional Hazard model.
References:
https://cran.r-project.org/web/packages/powerSurvEpi/powerSurvEpi.pdf
Parameters:
power: power to detect the magnitude of the hazard ratio as small as that specified by postulated_hazard_ratio.
ratio_of_participants: ratio of participants in experimental group over control group.
p_exp: probability of failure in experimental group over period of study.
p_con: probability of failure in control group over period of study
postulated_hazard_ratio: the postulated hazard ratio
alpha: type I error rate
Returns:
n_exp, n_con: the samples sizes need for the experiment and control group, respectively, to achieve desired power
"""
z = lambda p: stats.norm.ppf(p)
m = 1.0 / ratio_of_participants \
* ((ratio_of_participants * postulated_hazard_ratio + 1.0) / (postulated_hazard_ratio - 1.0)) ** 2 \
* (z(1. - alpha / 2.) + z(power)) ** 2
n_exp = m * ratio_of_participants / (ratio_of_participants * p_exp + p_con)
n_con = m / (ratio_of_participants * p_exp + p_con)
return int(np.ceil(n_exp)), int(np.ceil(n_con))
开发者ID:springcoil,项目名称:lifelines,代码行数:30,代码来源:statistics.py
示例12: create_mask
def create_mask(Nx,Ny,frac,
rmin = 0.5,
rmax = 2):
"""
create a mask Nx by Ny pixels
frac: 0 <= frac <= 1: fraction of pixels to be covered
"""
mask = numpy.ones((Nx,Ny))
ncovered = 0
goal = frac*Nx*Ny
while ncovered < goal:
x = Nx*numpy.random.random()
y = Ny*numpy.random.random()
r = rmin + numpy.random.random()*(rmax-rmin)
xmin = max(0,int(numpy.floor(x-r)))
xmax = min(Nx,int(numpy.ceil(x+r)))
ymin = max(0,int(numpy.floor(y-r)))
ymax = min(Ny,int(numpy.ceil(y+r)))
for ix in range(xmin,xmax):
for iy in range(ymin,ymax):
if (x-ix)**2 + (y-iy)**2 < r**2:
ncovered += mask[ix,iy]
mask[ix,iy] = 0
return mask
开发者ID:akr89,项目名称:Thesis,代码行数:29,代码来源:tools.py
示例13: dispims_color
def dispims_color(M, border=0, bordercolor=[0.0, 0.0, 0.0], savePath=None, *imshow_args, **imshow_keyargs):
""" Display an array of rgb images.
The input array is assumed to have the shape numimages x numpixelsY x numpixelsX x 3
"""
bordercolor = numpy.array(bordercolor)[None, None, :]
numimages = len(M)
M = M.copy()
for i in range(M.shape[0]):
M[i] -= M[i].flatten().min()
M[i] /= M[i].flatten().max()
height, width, three = M[0].shape
assert three == 3
n0 = numpy.int(numpy.ceil(numpy.sqrt(numimages)))
n1 = numpy.int(numpy.ceil(numpy.sqrt(numimages)))
im = numpy.array(bordercolor)*numpy.ones(
((height+border)*n1+border,(width+border)*n0+border, 1),dtype='<f8')
for i in range(n0):
for j in range(n1):
if i*n1+j < numimages:
im[j*(height+border)+border:(j+1)*(height+border)+border,
i*(width+border)+border:(i+1)*(width+border)+border,:] = numpy.concatenate((
numpy.concatenate((M[i*n1+j,:,:,:],
bordercolor*numpy.ones((height,border,3),dtype=float)), 1),
bordercolor*numpy.ones((border,width+border,3),dtype=float)
), 0)
imshow_keyargs["interpolation"]="nearest"
pylab.imshow(im, *imshow_args, **imshow_keyargs)
if savePath == None:
pylab.show()
else:
pylab.savefig(savePath)
开发者ID:TongZZZ,项目名称:ift6266h13,代码行数:34,代码来源:dispims.py
示例14: _scale_to_res
def _scale_to_res(self):
"""Change self._A and _extent to render an image whose
resolution is matched to the eventual rendering."""
ax = self.axes
ext = ax.transAxes.transform([1, 1]) - ax.transAxes.transform([0, 0])
xlim, ylim = ax.get_xlim(), ax.get_ylim()
dx, dy = xlim[1] - xlim[0], ylim[1] - ylim[0]
y0 = max(self.miny, ylim[0] - 5)
y1 = min(self._full_res.shape[0] + self.miny, ylim[1] + 5)
x0 = max(self.minx, xlim[0] - 5)
x1 = min(self._full_res.shape[1] + self.minx, xlim[1] + 5)
y0, y1, x0, x1 = map(int, [y0, y1, x0, x1])
sy = int(max(1, min((y1 - y0) / 5., np.ceil(dy / ext[1]))))
sx = int(max(1, min((x1 - x0) / 5., np.ceil(dx / ext[0]))))
# have we already calculated what we need?
if sx == self._sx and sy == self._sy and \
x0 == self._bounds[0] and x1 == self._bounds[1] and \
y0 == self._bounds[2] and y1 == self._bounds[3]:
return
self._A = self._full_res[y0 - self.miny:y1 - self.miny:sy,
x0 - self.minx:x1 - self.minx:sx]
x1 = x0 + self._A.shape[1] * sx
y1 = y0 + self._A.shape[0] * sy
self.set_extent([x0 - .5, x1 - .5, y0 - .5, y1 - .5])
self._sx = sx
self._sy = sy
self._bounds = (x0, x1, y0, y1)
self.changed()
开发者ID:rkrug,项目名称:grass-ci,代码行数:35,代码来源:plots.py
示例15: check_orbits
def check_orbits(p1, t1, p2, t2, tmn, tmx, tol):
n1 = t1 + p1 * np.arange(np.floor((tmn-t1)/p1), np.ceil((tmx-t1)/p1))
n1 = n1[(tmn <= n1) * (n1 <= tmx)]
n2 = t2 + p2 * np.arange(np.floor((tmn-t2)/p2), np.ceil((tmx-t2)/p2))
n2 = n2[(tmn <= n2) * (n2 <= tmx)]
delta = np.fabs(n1[:, None] - n2[None, :])
return max(len(n1), len(n2)) == np.sum(delta < tol)
开发者ID:dfm,项目名称:ketu,代码行数:7,代码来源:feature_extract.py
示例16: plot_rand
def plot_rand(txyxidata, b,X, outfile):
""" Plot stochastic forces and response of x """
me = "LE_Plot.plot_rand: "
if os.path.isfile(outfile): return me+"skip"
t0 = time.time()
showplot = False
t, x, eta, xi = txyxidata
del txyxidata
tmax = np.ceil(t.max())
## Plot walk
fs = 25
winsize = int(tmax/80)
fig, (ax1, ax2, ax3) = plt.subplots(nrows=3, sharex=True)
fig.suptitle(outfile)#+"\n"+str(argv)[1:-1])
envelope_plot(t, xi, winsize, ax=ax1)
ax1.set_ylabel("$\\xi$",fontsize=fs)
envelope_plot(t, eta, winsize, ax=ax2)
ax2.set_ylabel("$\eta$",fontsize=fs)
envelope_plot(t, x, winsize, ax=ax3)
ax3.plot([0,t.max()],[X,X],"k--"); ax3.plot([0,t.max()],[-X,-X],"k--")
ax3.set_xlabel("$t$",fontsize=fs);ax3.set_ylabel("$x$",fontsize=fs)
etalim = np.ceil(abs(eta).max()) ## Not perfect
#fig.tight_layout()
plt.savefig(outfile)
print me+"Plot saved as",outfile
print me+"Plotting random data:",round(time.time()-t0,1),"seconds"
if showplot: plt.show()
plt.close(fig)
return
开发者ID:Allium,项目名称:ColouredNoise,代码行数:33,代码来源:LE_Plot.py
示例17: calc_slit_box_aps_1id
def calc_slit_box_aps_1id(slit_box_corners, inclip=(1, 10, 1, 10)):
"""
Calculate the clip box based on given slip corners.
Parameters
----------
slit_box_corners : np.ndarray
Four corners of the slit box as a 4x2 matrix
inclip : tuple, optional
Extra inclipping to avoid clipping artifacts
Returns
-------
Tuple:
Cliping indices as a tuple of four
(clipFromTop, clipToBottom, clipFromLeft, clipToRight)
"""
return (
np.floor(slit_box_corners[:, 0].min()).astype(
int) + inclip[0], # clip top row
np.ceil(slit_box_corners[:, 0].max()).astype(
int) - inclip[1], # clip bottom row
np.floor(slit_box_corners[:, 1].min()).astype(
int) + inclip[2], # clip left col
np.ceil(slit_box_corners[:, 1].max()).astype(
int) - inclip[3], # clip right col
)
开发者ID:tekinbicer,项目名称:tomopy,代码行数:28,代码来源:alignment.py
示例18: interp
def interp(pic,flow):
ys=np.arange(pic.shape[0]*pic.shape[1])/pic.shape[1]
ud=(flow[:,:,0].reshape(-1)+ys)%pic.shape[0]
xs=np.arange(pic.shape[0]*pic.shape[1])%pic.shape[1]
lr=(flow[:,:,1].reshape(-1)+xs)%pic.shape[1]
u=np.int32(np.floor(ud))
d=np.int32(np.ceil(ud))%pic.shape[0]
udiffs=ud-u
udiffs=np.dstack((udiffs,udiffs,udiffs))
l=np.int32(np.floor(lr))
r=np.int32(np.ceil(lr))%pic.shape[1]
ldiffs=lr-l
ldiffs=np.dstack((ldiffs,ldiffs,ldiffs))
ul=pic[u,l,:]
ur=pic[u,r,:]
dl=pic[d,l,:]
dr=pic[d,r,:]
udl=ul*(1-udiffs)+dl*udiffs
udr=ur*(1-udiffs)+dr*udiffs
ans=np.zeros(pic.shape)
ans[ys,xs,:]=udl*(1-ldiffs)+udr*ldiffs
return ans
开发者ID:solomongarber,项目名称:texture_sampler,代码行数:26,代码来源:controller.py
示例19: get_spectral_magnitude
def get_spectral_magnitude(y_data,time_data, fs):
tStep = np.max(time_data)/len(time_data)
timeV = np.arange(0, np.max(time_data), tStep)
numsamp = 512 #timeDomainVectorLength(timeV)
if (len(y_data) < numsamp):
y_data = np.resize(y_data, (numsamp,))
window = hann(numsamp)
## setup the fft spectrum arrays
mag_spectrum = np.zeros([numsamp,int(np.ceil(float(len(timeV))/numsamp))])
#print 'time.len= %d, numsamp=%d, loop:%d' % (len(timeV), numsamp, int(np.ceil(float(len(timeV))/numsamp)))
for k in range(0,int(np.ceil(float(len(timeV))/numsamp))):
slice_dat = y_data[k*numsamp:numsamp*(k+1)]
if (len(slice_dat) < numsamp):
if (len(slice_dat) < numsamp/2): # WE DISCARDS LAST SLICE POINTS IF < NUMSAMP/2
break;
slice_dat = np.resize(slice_dat,(numsamp,))
#multiply it with the window and transform it into frequency domain
spectrum_dat = fft(slice_dat*window);
#get the spectrum mag @ each of the 256 frequency points and store it
#print 'k:',k,' spectrum_dat.len:',len(spectrum_dat)
mag_spectrum[:,k]= 20 * np.log10(abs(spectrum_dat))
mag_spectrum[:,k]= abs(spectrum_dat)
#print "fs= %.4g, NFFT= %ld, y_data.shape= %d, mag_spectrum= %dx%d" % (fs, numsamp,np.shape(y_data)[0], np.shape(mag_spectrum)[0],np.shape(mag_spectrum)[1])
## DOUBLE CHECK THE SIZE OF THE MATRIX
avg_fft_foreach = np.mean(mag_spectrum, axis=1)
# print "np.shape(avg_fft_foreach):", np.shape(avg_fft_foreach)
return avg_fft_foreach
开发者ID:abitofalchemy,项目名称:Scriptus,代码行数:28,代码来源:tc_comp_fft.py
示例20: _search_fine
def _search_fine(sino, srad, step, init_cen, ratio, drop):
"""
Fine search for finding the rotation center.
"""
Nrow, Ncol = sino.shape
centerfliplr = (Ncol + 1.0) / 2.0 - 1.0
# Use to shift the sinogram 2 to the raw CoR.
shiftsino = np.int16(2 * (init_cen - centerfliplr))
_copy_sino = np.roll(np.fliplr(sino[1:]), shiftsino, axis=1)
lefttake = 0
righttake = Ncol - 1
if init_cen <= centerfliplr:
lefttake = np.ceil(srad + 1)
righttake = np.floor(2 * init_cen - srad - 1)
else:
lefttake = np.ceil(
init_cen - (Ncol - 1 - init_cen) + srad + 1)
righttake = np.floor(Ncol - 1 - srad - 1)
Ncol1 = righttake - lefttake + 1
mask = _create_mask(2 * Nrow - 1, Ncol1, 0.5 * ratio * Ncol, drop)
numshift = np.int16((2 * srad + 1.0) / step)
listshift = np.linspace(-srad, srad, num=numshift)
listmetric = np.zeros(len(listshift), dtype='float32')
num1 = 0
for i in listshift:
_sino = ndimage.interpolation.shift(
_copy_sino, (0, i), prefilter=False)
sinojoin = np.vstack((sino, _sino))
listmetric[num1] = np.sum(np.abs(np.fft.fftshift(
pyfftw.interfaces.numpy_fft.fft2(
sinojoin[:, lefttake:righttake + 1]))) * mask)
num1 = num1 + 1
minpos = np.argmin(listmetric)
return init_cen + listshift[minpos] / 2.0
开发者ID:JStuckner,项目名称:tomopy,代码行数:35,代码来源:rotation.py
注:本文中的numpy.ceil函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论