本文整理汇总了Python中numpy.nanmedian函数的典型用法代码示例。如果您正苦于以下问题:Python nanmedian函数的具体用法?Python nanmedian怎么用?Python nanmedian使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了nanmedian函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: plot_divergences
def plot_divergences(Ns, Ks, ivars):
divs = np.array([divergence(ivar, Truth) for ivar in ivars])
small = (Ns * Ks) < 300
med = ((Ns * Ks) > 3000) * ((Ns * Ks) < 5000)
big = (Ns * Ks) > 60000
Ksteps = 2. ** np.arange(0, 9)
mediansmalldivs = np.array([_hoggmedian((divs[small])[np.isclose(Ks[small], Kstep)]) for Kstep in Ksteps])
medianmeddivs = np.array([_hoggmedian((divs[med])[np.isclose(Ks[med], Kstep)]) for Kstep in Ksteps])
medianbigdivs = np.array([_hoggmedian((divs[big])[np.isclose(Ks[big], Kstep)]) for Kstep in Ksteps])
plt.clf()
plt.axhline(np.median(divs[small]), color="k", alpha=0.25)
plt.axhline(np.median(divs[med] ), color="k", alpha=0.25)
plt.axhline(np.median(divs[big] ), color="k", alpha=0.25)
plt.plot(Ks[small], divs[small], "k_", ms= 6, alpha=0.5)
plt.plot(Ks[med], divs[med], "k_", ms=12, alpha=0.5)
plt.plot(Ks[big], divs[big], "k_", ms=18, alpha=0.5)
good = np.isfinite(mediansmalldivs)
plt.plot(Ksteps[good], mediansmalldivs[good], "k_", ms= 6, mew=4)
plt.plot(Ksteps, medianmeddivs, "k_", ms=12, mew=4)
plt.plot(Ksteps, medianbigdivs, "k_", ms=18, mew=4)
plt.loglog()
plt.xlim(np.min(Ks) / 1.5, np.max(Ks) * 1.5)
plt.ylim(np.nanmedian(divs[big]) / 30., np.nanmedian(divs[small]) * 30.)
plt.xlabel("number of photons per image $K$")
plt.ylabel("divergence from the Truth")
hogg_savefig("divergences.png")
return None
开发者ID:davidwhogg,项目名称:DiffractionMicroscopy,代码行数:27,代码来源:plot_gaussian_results.py
示例2: subtract
def subtract(args):
for im_name in args.input1:
if args.overwrite:
new_name = im_name
elif args.suffix:
new_name = utilities.add_suffix_prefix(im_name, suffix=args.suffix)
# Read image, separate data and header
im = fits.open(im_name)
data = im[0].data
hdr = im[0].header
# Extract the overscan region. Notice that what we call x,y are the second and first axes
y0, y1, x0, x1 = args.region
overscan = data.copy()[x0:x1, y0:y1]
# Average over the short axis
if overscan.shape[0] < overscan.shape[1]:
average = numpy.nanmedian(overscan, axis=0)
# Fit a polynomial and return the fitted values
fitted_overscan = fit_pol(average, args.deg)
data[:, y0:y1] -= fitted_overscan
else:
average = numpy.nanmedian(overscan, axis=1)
# Fit a polynomial and return the fitted values
fitted_overscan = fit_pol(average, args.deg)
data[x0:x1, :] = (data[x0:x1, :].T - fitted_overscan).T
# Write to the output file
hdr.add_comment("Overscan region subtracted. Region: [{0}:{1},{2}:{3}]".format(x0, x1, y0, y1))
fits.writeto(new_name, data, hdr, clobber=True)
return None
开发者ID:javierblasco,项目名称:repipy,代码行数:35,代码来源:subtract_overscan.py
示例3: calc_norm_summary_tables
def calc_norm_summary_tables(accuracy_tbl, time_tbl):
"""
Calculate normalized performance/ranking summary, as numpy
matrices as usual for convenience, and matrices of additional
statistics (min, max, percentiles, etc.)
Here normalized means relative to the best which gets a 1, all
others get the ratio resulting from dividing by the performance of
the best.
"""
# Min across all minimizers, i.e. for each fit problem what is the lowest chi-squared and the lowest time
min_sum_err_sq = np.nanmin(accuracy_tbl, 1)
min_runtime = np.nanmin(time_tbl, 1)
# create normalised tables
norm_acc_rankings = accuracy_tbl / min_sum_err_sq[:, None]
norm_runtimes = time_tbl / min_runtime[:, None]
summary_cells_acc = np.array([np.nanmin(norm_acc_rankings, 0),
np.nanmax(norm_acc_rankings, 0),
nanmean(norm_acc_rankings, 0),
nanmedian(norm_acc_rankings, 0)
])
summary_cells_runtime = np.array([np.nanmin(norm_runtimes, 0),
np.nanmax(norm_runtimes, 0),
nanmean(norm_runtimes, 0),
nanmedian(norm_runtimes, 0)
])
return norm_acc_rankings, norm_runtimes, summary_cells_acc, summary_cells_runtime
开发者ID:DanNixon,项目名称:mantid,代码行数:31,代码来源:post_processing.py
示例4: _make_tuples
def _make_tuples(self, key):
# Get behavior filename
behavior_path = (experiment.Session() & key).fetch1('behavior_path')
local_path = lab.Paths().get_local_path(behavior_path)
filename = (experiment.Scan.BehaviorFile() & key).fetch1('filename')
full_filename = os.path.join(local_path, filename)
# Read file
data = h5.read_behavior_file(full_filename)
# Get counter timestamps and convert to seconds
ts = h5.ts2sec(data['ts'], is_packeted=True)
# Read temperature (if available) and invalidate points with unreliable timestamps
temp_raw = data.get('temperature', None)
if temp_raw is None:
raise PipelineException('Scan {animal_id}-{session}-{scan_idx} does not have '
'temperature data'.format(**key))
temp_raw[np.isnan(ts)] = float('nan')
# Read temperature and smooth it
temp_celsius = (temp_raw * 100 - 32) / 1.8 # F to C
sampling_rate = int(round(1 / np.nanmedian(np.diff(ts)))) # samples per second
smooth_temp = signal.low_pass_filter(temp_celsius, sampling_rate, cutoff_freq=1,
filter_size=2 * sampling_rate)
# Resample at 1 Hz
downsampled_ts = ts[::sampling_rate]
downsampled_temp = smooth_temp[::sampling_rate]
# Insert
self.insert1({**key, 'temp_time': downsampled_ts,
'temperatures': downsampled_temp,
'median_temperature': np.nanmedian(downsampled_temp)})
self.notify(key)
开发者ID:dimitri-yatsenko,项目名称:pipeline,代码行数:35,代码来源:temperature.py
示例5: find_bounds
def find_bounds(model):
"""
Return the median upper and lower bound of the metabolic model.
Bounds can vary from model to model. Cobrapy defaults to (-1000, 1000) but
this may not be the case for merged or autogenerated models. In these
cases, this function is used to iterate over all the bounds of all the
reactions and find the median bound values in the model, which are
then used as the 'most common' bounds.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
"""
lower_bounds = np.asarray([rxn.lower_bound for rxn in model.reactions],
dtype=float)
upper_bounds = np.asarray([rxn.upper_bound for rxn in model.reactions],
dtype=float)
lower_bound = np.nanmedian(lower_bounds[lower_bounds != 0.0])
upper_bound = np.nanmedian(upper_bounds[upper_bounds != 0.0])
if np.isnan(lower_bound):
LOGGER.warning("Could not identify a median lower bound.")
lower_bound = -1000.0
if np.isnan(upper_bound):
LOGGER.warning("Could not identify a median upper bound.")
upper_bound = 1000.0
return lower_bound, upper_bound
开发者ID:biosustain,项目名称:memote,代码行数:29,代码来源:helpers.py
示例6: flaremeter
def flaremeter(data):
''' Obtain median of data across baselines, polarizations, and frequencies to create a
time series indicated whether a flare has occurred. Values returned will be close
to unity if no flare. Returns:
tlevel: Array of levels at each time, nominally near unity
bflag: Array of flags indicating nominal background (where True) or
elevated background (where False) indicating possible flare
'''
nbl,npol,nf,nt = data.shape
tlevel = np.zeros(nt,'float')
background = np.sqrt(np.abs(data[:,0,:,:])**2 + np.abs(data[:,1,:,:])**2)
init_bg = np.nanmedian(background,2) # Initially take background as median over entire time range
bflag = np.ones(nt,'bool') # flags indicating "good" background times (not in flare)
for i in range(nt):
good, = np.where(bflag[:i] == True) # List of indexes of good background times up to current time
ngood = len(good) # Truncate list of indexes to last 100 elements (or fewer)
if ngood > 100:
good = good[ngood-100:]
# Calculate median over good background times
bg = np.nanmedian(background[:,:,good],2)
else:
# If there haven't been 100 times with good backgrounds yet, just use the initial one.
# This is supposed to avoid startup transients.
bg = init_bg
# Generate levels for each baseline and frequency for this time
level = np.sqrt(abs(data[:,0,:,i])**2 + abs(data[:,1,:,i])**2)/bg
# Take median over baseline and frequency to give a single number for this time
tlevel[i] = np.nanmedian(level)
if tlevel[i] > 1.05:
# If the level of the current time is higher than 1.05, do not include this time in future backgrounds
bflag[i] = False
return tlevel, bflag
开发者ID:binchensolar,项目名称:eovsa,代码行数:32,代码来源:flare_monitor.py
示例7: sigmaclip
def sigmaclip(data, factor, replacement=None, median=False, maxiter = 100):
std = np.std(data)
iteration=0
if median: center = np.nanmedian(data)
else: center = np.nanmean(data)
if not replacement: replacement = np.nan
elif replacement == 'mean': replacement = center
indx = (data>(center+std*factor))+(data<(center-std*factor))
while np.sum(indx) > 0 and iteration < maxiter:
#print indx, np.sum(indx)
#pl.plot(data)
#pl.plot([0,len(data)],[center+std*factor,center+std*factor])
#pl.plot([0,len(data)],[center-std*factor,center-std*factor])
data[indx] = replacement
std = np.std(data)
if median: center = np.nanmedian(data)
else: center = np.nanmean(data)
if not replacement: replacement = np.nan
elif replacement == 'mean': replacement = center
indx = (data>(center+std*factor))+(data<(center-std*factor))
#print indx, np.sum(indx)
#pl.plot(data,'ko')
#pl.show()
iteration+=1
return data
开发者ID:fedhere,项目名称:detect120,代码行数:26,代码来源:detect120Utils.py
示例8: make_NRCS_image
def make_NRCS_image( nobj, bandname, fn='', dir='.', max=np.nan, min=np.nan,
**kwargs):
if not fn:
if 'reduced' in bandname:
fn = bandname[:9]+'.png'
else:
fn = bandname+'.png'
resize(nobj)
try:
s0 = 10.0*np.log10(nobj[bandname])
except:
n_obj.undo()
raise
s0[np.where(np.isinf(s0))]=np.nan
#if nobj.fileName[-2:]=='nc':
# s0 = flipdim(nobj,s0)
caption='dB'
if np.isnan(min):
min = np.nanmedian(s0,axis=None)-2.0*np.nanstd(s0,axis=None)
if np.isnan(max):
max = np.nanmedian(s0,axis=None)+2.0*np.nanstd(s0,axis=None)
nansatFigure(s0, min, max, dir, fn)
nobj.undo()
return fn
开发者ID:nansencenter,项目名称:nansen-cloud,代码行数:25,代码来源:tools.py
示例9: cutoff
def cutoff(self, df, z_score=3.0):
"""
Cut off extreme values using Median Absolute Deviation
Parameters
----------
df : pd.DataFrame
Returns
-------
pd.DataFrame
"""
df = self._align_univariate(df)
df = self._mask_non_index_member(df)
axis = 1
x = df.values
median = np.nanmedian(x, axis=axis).reshape(-1, 1)
diff = x - median
diff_abs = np.abs(diff)
mad = np.nanmedian(diff_abs, axis=axis).reshape(-1, 1)
mask = diff_abs > z_score * mad
x[mask] = 0
x = x + z_score * mad * np.sign(diff * mask) + mask * median
return pd.DataFrame(index=df.index, columns=df.columns, data=x)
开发者ID:sukeyisme,项目名称:JAQS,代码行数:29,代码来源:py_expression_eval.py
示例10: test_multiFringes
def test_multiFringes(self):
"""Test that multi-fringe results are handled correctly by the task.
"""
self.config.filters.append("_unknown_")
self.config.large = 16
task = FringeTask(name="multiFringeMock", config=self.config)
config = isrMock.IsrMockConfig()
config.fringeScale = [750.0, 240.0, 220.0]
config.fringeX0 = [100.0, 150.0, 200.0]
config.fringeY0 = [0.0, 200.0, 0.0]
dataRef = isrMock.FringeDataRefMock(config=config)
exp = dataRef.get("raw")
medianBefore = np.nanmedian(exp.getImage().getArray())
fringes = task.readFringes(dataRef, assembler=None)
solution, rms = task.run(exp, **fringes.getDict())
medianAfter = np.nanmedian(exp.getImage().getArray())
stdAfter = np.nanstd(exp.getImage().getArray())
self.assertLess(medianAfter, medianBefore)
self.assertFloatsAlmostEqual(medianAfter, 3002.233, atol=1e-4)
self.assertFloatsAlmostEqual(stdAfter, 3549.9375, atol=1e-4)
deviation = np.abs(solution - config.fringeScale)
self.assertTrue(np.all(deviation / rms < 1.0))
开发者ID:lsst,项目名称:ip_isr,代码行数:27,代码来源:test_fringes.py
示例11: triangleMAPs
def triangleMAPs(savefilename,basename):
with open(savefilename,'rb') as savefile:
bf= numpy.array(pickle.load(savefile))
samples= numpy.array(pickle.load(savefile))
bf_g15= numpy.array(pickle.load(savefile))
samples_g15= numpy.array(pickle.load(savefile))
bf_zero= numpy.array(pickle.load(savefile))
samples_zero= numpy.array(pickle.load(savefile))
labels= []
for jj in range(samples.shape[2]):
labels.append(r"$\mathrm{param}\ %i$" % jj)
maps= define_rcsample.MAPs()
for ii, map in enumerate(maps.map()):
if ii >= len(bf): break
tfeh= numpy.nanmedian(map['FE_H'])
tafe= numpy.nanmedian(map[define_rcsample._AFETAG])
for tbf,tsamples,ext in zip([bf,bf_g15,bf_zero],
[samples,samples_g15,samples_zero],
['fid','g15','zero']):
try:
triangle.corner(tsamples[ii,].T,quantiles=[0.16, 0.5, 0.84],
labels=labels,
show_titles=True,title_args={"fontsize": 12},
bins=21)
except ValueError: pass
else:
bovy_plot.bovy_text(r'$[\mathrm{{Fe/H}}] = {feh:.1f},$'\
.format(feh=tfeh)+'\n'
+r'$[\alpha/\mathrm{{Fe}}] = {afe:.2f}$'\
.format(afe=tafe),
top_left=True,size=16.)
bovy_plot.bovy_end_print(basename+"_%i_%s.png" % (ii,ext))
return None
开发者ID:NatalieP-J,项目名称:apogee-maps,代码行数:33,代码来源:triangleMAPs.py
示例12: collapse_cube
def collapse_cube(w1, w2):
""" Collapse a MUSE data cube.
Arguments
cube : MUSE data cube name containing both data and stat extensions.
iext : Initial extension to be used. Default is one for combined cubes.
"""
fits = "slice_w{0}_{1}.fits".format(w1, w2)
outfits = "collapsed_w{0}_{1}.fits".format(w1, w2)
data = pf.getdata(fits, 0)
error = pf.getdata(fits, 1)
h = pf.getheader(fits, 0)
h2 = pf.getheader(fits, 1)
h["NAXIS"] = 2
del h["NAXIS3"]
h2["NAXIS"] = 2
del h2["NAXIS3"]
print "Starting collapsing process..."
start = time.time()
w = wavelength_array(fits)
# newdata = np.trapz(data, dx=np.diff(w)[0], axis=0)
# newdata = np.nansum(data, axis=0) * np.diff(w)[0]
newdata = np.nanmedian(data, axis=0)
noise = 1.482602 / np.sqrt(6.) * np.nanmedian(np.abs(2.* data - \
np.roll(data, 2, axis=0) - np.roll(data, -2, axis=0)), \
axis=0)
end = time.time()
print "Collapsing lasted {0} minutes.".format((end - start)/60.)
hdu = pf.PrimaryHDU(newdata, h)
hdu2 = pf.ImageHDU(noise, h2)
hdulist = pf.HDUList([hdu, hdu2])
hdulist.writeto(outfits, clobber=True)
return
开发者ID:kadubarbosa,项目名称:hydramuse,代码行数:35,代码来源:misc.py
示例13: Scatter
def Scatter(y, win=13, remove_outliers=False):
'''
Return the scatter in ppm based on the median running standard deviation
for a window size of :py:obj:`win` = 13 cadences (for K2, this
is ~6.5 hours, as in VJ14).
:param ndarray y: The array whose CDPP is to be computed
:param int win: The window size in cadences. Default `13`
:param bool remove_outliers: Clip outliers at 5 sigma before computing \
the CDPP? Default `False`
'''
if remove_outliers:
# Remove 5-sigma outliers from data
# smoothed on a 1 day timescale
if len(y) >= 50:
ys = y - Smooth(y, 50)
else:
ys = y
M = np.nanmedian(ys)
MAD = 1.4826 * np.nanmedian(np.abs(ys - M))
out = []
for i, _ in enumerate(y):
if (ys[i] > M + 5 * MAD) or (ys[i] < M - 5 * MAD):
out.append(i)
out = np.array(out, dtype=int)
y = np.delete(y, out)
if len(y):
return 1.e6 * np.nanmedian([np.std(yi) / np.sqrt(win)
for yi in Chunks(y, win, all=True)])
else:
return np.nan
开发者ID:rodluger,项目名称:everest,代码行数:33,代码来源:mathutils.py
示例14: savePredictors
def savePredictors(indata):
''' compute velocity quantiles for each inter-saccade event
'''
import numpy as np
data,coderid,gazeLag=indata
try:
preds=[]
coords=[]
data.extractBasicEvents()
data.driftCorrection()
if coderid!=-1:
importFailed=data.importComplexEvents(coderid=coderid)
if importFailed: return [[data.vp,-1]]
g=data.getGaze();totdur=float(g.shape[0])
hz=100#hz of the coordinate output
padstart=20# insert nans at start to allow lag
if gazeLag>=0:
gg=data.getGaze(hz=hz)[:,np.newaxis,[7,8]]
tr=data.getTraj(hz=hz)
mnn=min(gg.shape[0],tr.shape[0])
tt=data.getGaze(hz=hz)[:mnn,0]
gg=gg[:mnn,:,:];tr=tr[:mnn,:,:]
vel=data.getVelocity()
for si in range(len(data.sev)):
if si+2<len(data.sev): e=data.sev[si+1][0]
else: e=-1
s=data.sev[si][1];d=e-s
if g[s,0]-gazeLag<0 and g[e,0]-gazeLag<=0: continue
preds.append([data.vp,data.block,data.trial,
data.sev[si][-1],si,s,d,s/totdur,d/totdur])
if gazeLag>=0:
sel1=np.logical_and(tt>(g[s,0]),tt<(g[e,0]))
sel2=np.logical_and(tt>(g[s,0]-gazeLag),tt<(g[e,0]-gazeLag))
#print '1',sel1.sum(),sel2.sum()
trt=tr[sel2,:,:];
#print '2',trt.shape
trt=trt[:min(sel2.sum(),sel1.sum()),:,:]
#print '3',trt.shape
temp=np.ones((sel1.sum(),14,2))*np.nan
#print '4',temp.shape
temp[-sel2.sum():,:,:]=trt
coords.append(np.concatenate([gg[sel1,:,:],temp],axis=1))
tps=[s,s+d/4.,s+d/2.,s+3*d/4.,e]
tps=np.int32(np.round(tps))
for ti in range(len(tps)-1):
preds[-1].append(np.nanmedian(vel[tps[ti]:tps[ti+1]]))
dist=np.nanmedian(data.dist[tps[ti]:tps[ti+1],:],0)
di=np.argsort(dist)[:4]#take four nearest agents
preds[-1].extend(dist[di])
dev=np.abs(data.dev[tps[ti]:tps[ti+1],:])
dev=np.nanmedian(dev[:,di],0)
preds[-1].extend(dev)
if len(preds):
if gazeLag>=0: return [preds,coords]
else: return [preds,[]]
else: return [[[data.vp,-1]],[]]
except:
print 'Error at vp %d b %d t %d'%(data.vp,data.block,data.trial)
raise
开发者ID:simkovic,项目名称:Chase,代码行数:59,代码来源:Preprocess.py
示例15: plot_lc_white
def plot_lc_white(self, ax=None):
# ax.plot(self.time, self.flux_r, lw=1)
# ax.plot(self.time, self.trtime, lw=1)
# ax.plot(self.time, self.trposi+4*(np.percentile(self.flux_r, [99])[0]-1), lw=1)
ax.plot(self.time, self.flux_r-self.trposi+np.nanmedian(self.trposi)
-self.trtime+np.nanmedian(self.trtime), '.')
[ax.axvline(self.bls.tc+i*self._rbls['bls_period'], alpha=0.25, ls='--', lw=1) for i in range(35)]
setp(ax,xlim=self.time[[0,-1]], xlabel='Time', ylabel='Normalised flux')
开发者ID:hpparvi,项目名称:k2ps,代码行数:8,代码来源:psearch.py
示例16: values_to_sizes
def values_to_sizes(values, size, size_range):
maxval, minval = np.nanmax(values), np.nanmin(values)
try:
medval = np.nanmedian(values)
except KeyError:
medval = np.nanmedian(values.values)
sizes = (values-medval)/(maxval-medval)*size_range + size
return sizes
开发者ID:vlas-sokolov,项目名称:pyscatter-3d,代码行数:8,代码来源:pyscatter3d.py
示例17: SysRem
def SysRem(time, flux, err, ncbv=5, niter=50, sv_win=999,
sv_order=3, **kwargs):
'''
Applies :py:obj:`SysRem` to a given set of light curves.
:param array_like time: The time array for all of the light curves
:param array_like flux: A 2D array of the fluxes for each of the light \
curves, shape `(nfluxes, ntime)`
:param array_like err: A 2D array of the flux errors for each of the \
light curves, shape `(nfluxes, ntime)`
:param int ncbv: The number of signals to recover. Default 5
:param int niter: The number of :py:obj:`SysRem` iterations to perform. \
Default 50
:param int sv_win: The Savitsky-Golay filter window size. Default 999
:param int sv_order: The Savitsky-Golay filter order. Default 3
'''
nflx, tlen = flux.shape
# Get normalized fluxes
med = np.nanmedian(flux, axis=1).reshape(-1, 1)
y = flux - med
# Compute the inverse of the variances
invvar = 1. / err ** 2
# The CBVs for this set of fluxes
cbvs = np.zeros((ncbv, tlen))
# Recover `ncbv` components
for n in range(ncbv):
# Initialize the weights and regressors
c = np.zeros(nflx)
a = np.ones(tlen)
f = y * invvar
# Perform `niter` iterations
for i in range(niter):
# Compute the `c` vector (the weights)
c = np.dot(f, a) / np.dot(invvar, a ** 2)
# Compute the `a` vector (the regressors)
a = np.dot(c, f) / np.dot(c ** 2, invvar)
# Remove this component from all light curves
y -= np.outer(c, a)
# Save this regressor after smoothing it a bit
if sv_win >= len(a):
sv_win = len(a) - 1
if sv_win % 2 == 0:
sv_win -= 1
cbvs[n] = savgol_filter(a - np.nanmedian(a), sv_win, sv_order)
return cbvs
开发者ID:rodluger,项目名称:everest,代码行数:58,代码来源:sysrem.py
示例18: get_bb_ratio
def get_bb_ratio(bb_height, bb_width, quality, zp_r):
"""Returns the Bright Band ratio of each PR bin
With *SR*, we refer to precipitation radars based on space-born platforms
such as TRMM or GPM.
This function basically applies the Bright Band (BB) information as
provided by the corresponding SR datasets per beam, namely BB height and
width, as well as quality flags of the SR beams. A BB ratio of <= 0
indicates that a bin is located below the melting layer (ML), >=1
above the ML, and in between 0 and 1 inside the ML.
Parameters
----------
bb_height : :class:`numpy:numpy.ndarray`
Array of shape (nscans, nbeams) containing the SR beams' BB heights
in meters.
bb_width : :class:`numpy:numpy.ndarray`
Array of shape (nscans, nbeams) containing the SR beams' BB widths
in meters.
quality : :class:`numpy:numpy.ndarray`
Array of shape (nscans, nbeams) containing the SR beams' BB quality
index.
zp_r : :class:`numpy:numpy.ndarray`
Array of SR bin altitudes of shape (nscans, nbeams, nbins).
Returns
-------
ratio : :class:`numpy:numpy.ndarray`
Array of shape (nscans, nbeams, nbins) containing the BB ratio of
every SR bin.
- ratio <= 0: below ml
- 0 < ratio < 1: between ml
- 1 <= ratio: above ml
ibb : :class:`numpy:numpy.ndarray`
Boolean array containing the indices of SR bins connected to the
BB.
"""
# parameters for bb detection
ibb = (bb_height > 0) & (bb_width > 0) & (quality == 1)
# set non-bb-pixels to np.nan
bb_height = bb_height.copy()
bb_height[~ibb] = np.nan
bb_width = bb_width.copy()
bb_width[~ibb] = np.nan
# get median of bb-pixels
bb_height_m = np.nanmedian(bb_height)
bb_width_m = np.nanmedian(bb_width)
# approximation of melting layer top and bottom
zmlt = bb_height_m + bb_width_m / 2.
zmlb = bb_height_m - bb_width_m / 2.
# get ratio connected to brightband height
ratio = (zp_r - zmlb) / (zmlt - zmlb)
return ratio, ibb
开发者ID:heistermann,项目名称:wradlib,代码行数:58,代码来源:qual.py
示例19: snr
def snr(flux, axis=0):
""" Calculates the S/N ratio of a spectra.
Translated from the IDL routine der_snr.pro """
signal = np.nanmedian(flux, axis=axis)
noise = 1.482602 / np.sqrt(6.) * np.nanmedian(np.abs(2.*flux - \
np.roll(flux, 2, axis=axis) - np.roll(flux, -2, axis=axis)), \
axis=axis)
return signal, noise, signal / noise
开发者ID:kadubarbosa,项目名称:hydramuse,代码行数:9,代码来源:misc.py
示例20: draw
def draw(self):
#return np.fmax(self.ads.draw(), self.cds.draw())
adens = self.ads.draw()
cdens = self.cds.draw()
dens = cdens.copy()
for ii in range(dens.shape[0]):
if np.nanmedian(adens[ii]) > np.nanmedian(cdens[ii]):
dens[ii] = adens[ii]
return dens
开发者ID:autocorr,项目名称:besl,代码行数:9,代码来源:dpdf_mc.py
注:本文中的numpy.nanmedian函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论