本文整理汇总了Python中scipy.logical_and函数的典型用法代码示例。如果您正苦于以下问题:Python logical_and函数的具体用法?Python logical_and怎么用?Python logical_and使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了logical_and函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: coordreduce
def coordreduce(self,coorddict):
assert type(coorddict)==dict, "Coorddict needs to be a dictionary"
ncoords = self.Cart_Coords.shape[0]
coordlist = ['x','y','z','r','theta','phi']
coordkeysorg = coorddict.keys()
coordkeys = [ic for ic in coordkeysorg if ic in coordlist]
ckeep = sp.ones(ncoords,dtype=bool)
for ic in coordkeys:
currlims = coorddict[ic]
if ic=='x':
tempcoords = self.Cart_Coords[:,0]
elif ic=='y':
tempcoords = self.Cart_Coords[:,1]
elif ic=='z':
tempcoords = self.Cart_Coords[:,2]
elif ic=='r':
tempcoords = self.Sphere_Coords[:,0]
elif ic=='theta':
tempcoords = self.Sphere_Coords[:,1]
elif ic=='phi':
tempcoords = self.Sphere_Coords[:,2]
keeptemp = sp.logical_and(tempcoords>=currlims[0],tempcoords<currlims[1])
ckeep = sp.logical_and(ckeep,keeptemp)
# prune the arrays
self.Cart_Coords=self.Cart_Coords[ckeep]
self.Sphere_Coords=self.Sphere_Coords[ckeep]
self.Param_List=self.Param_List[ckeep]
self.Velocity=self.Velocity[ckeep]
开发者ID:hhuangmeso,项目名称:RadarDataSim,代码行数:31,代码来源:IonoContainer.py
示例2: eliminatePercentileTails
def eliminatePercentileTails(self, mskDds, loPercentile=10.0, hiPercentile=90.0):
"""
Trims lower and/or upper image histogram tails by replacing :samp:`mskDds`
voxel values with :samp:`mskDds.mtype.maskValue()`.
"""
rootLogger.info("Eliminating percentile tails...")
rootLogger.info("Calculating element frequencies...")
elems, counts = elemfreq(mskDds)
rootLogger.info("elems:\n%s" % (elems,))
rootLogger.info("counts:\n%s" % (counts,))
cumSumCounts = sp.cumsum(counts, dtype="float64")
percentiles = 100.0*(cumSumCounts/float(cumSumCounts[-1]))
percentileElems = elems[sp.where(sp.logical_and(percentiles > loPercentile, percentiles < hiPercentile))]
loThresh = percentileElems[0]
hiThresh = percentileElems[-1]
rootLogger.info("Masking percentiles range (%s,%s) = (%s,%s)" % (loPercentile, hiPercentile, loThresh, hiThresh))
mskDds.asarray()[...] = \
sp.where(
sp.logical_and(
sp.logical_and(mskDds.asarray() >= loThresh, mskDds.asarray() <= hiThresh),
mskDds.asarray() != mskDds.mtype.maskValue()
),
mskDds.asarray(),
mskDds.mtype.maskValue()
)
rootLogger.info("Done eliminating percentile tails.")
开发者ID:pymango,项目名称:pymango,代码行数:26,代码来源:label_spherical_cavities.py
示例3: main
def main():
args = getArguments(getParser())
# prepare logger
logger = Logger.getInstance()
if args.debug: logger.setLevel(logging.DEBUG)
elif args.verbose: logger.setLevel(logging.INFO)
# constants
colours = {'i': 10, 'o': 11}
# load volumes
marker_data, _ = load(args.marker)
contour_data, _ = load(args.contour)
# perform check
contour_data = contour_data == colours[args.type]
marker_data_fg = marker_data == 1
marker_data_bg = marker_data == 2
if scipy.logical_and(contour_data, marker_data_fg).any():
logger.warning('Intersection between {} and {} (type {}) in foreground.'.format(args.marker, args.contour, args.type))
elif scipy.logical_and(contour_data, marker_data_bg).any():
logger.warning('Intersection between {} and {} (type {}) in background.'.format(args.marker, args.contour, args.type))
else:
print "No intersection."
开发者ID:AlexanderRuesch,项目名称:medpy,代码行数:25,代码来源:medpy_check_marker_intersection.py
示例4: getRegion
def getRegion(self,size=3e4,min_nSNPs=1,chrom_i=None,pos_min=None,pos_max=None):
"""
Sample a region from the piece of genotype X, chrom, pos
minSNPnum: minimum number of SNPs contained in the region
Ichrom: restrict X to chromosome Ichrom before taking the region
cis: bool vector that marks the sorted region
region: vector that contains chrom and init and final position of the region
"""
if (self.chrom is None) or (self.pos is None):
bim = plink_reader.readBIM(self.bfile,usecols=(0,1,2,3))
chrom = SP.array(bim[:,0],dtype=int)
pos = SP.array(bim[:,3],dtype=int)
else:
chrom = self.chrom
pos = self.pos
if chrom_i is None:
n_chroms = chrom.max()
chrom_i = int(SP.ceil(SP.rand()*n_chroms))
pos = pos[chrom==chrom_i]
chrom = chrom[chrom==chrom_i]
ipos = SP.ones(len(pos),dtype=bool)
if pos_min is not None:
ipos = SP.logical_and(ipos,pos_min<pos)
if pos_max is not None:
ipos = SP.logical_and(ipos,pos<pos_max)
pos = pos[ipos]
chrom = chrom[ipos]
if size==1:
# select single SNP
idx = int(SP.ceil(pos.shape[0]*SP.rand()))
cis = SP.arange(pos.shape[0])==idx
region = SP.array([chrom_i,pos[idx],pos[idx]])
else:
while 1:
idx = int(SP.floor(pos.shape[0]*SP.rand()))
posT1 = pos[idx]
posT2 = pos[idx]+size
if posT2<=pos.max():
cis = chrom==chrom_i
cis*= (pos>posT1)*(pos<posT2)
if cis.sum()>min_nSNPs: break
region = SP.array([chrom_i,posT1,posT2])
start = SP.nonzero(cis)[0].min()
nSNPs = cis.sum()
if self.X is None:
rv = plink_reader.readBED(self.bfile,useMAFencoding=True,start = start, nSNPs = nSNPs,bim=bim)
Xr = rv['snps']
else:
Xr = self.X[:,start:start+nSnps]
return Xr, region
开发者ID:PMBio,项目名称:limix,代码行数:59,代码来源:simulator.py
示例5: separate_cal
def separate_cal(data, n_bins_cal, cal_mask=None) :
"""Function separates data into cal_on and cal off.
No Guarantee that data argument remains unchanged."""
# Allowcate memeory for output
ntime, npol, nfreq = data.shape
n_bins_after_cal = ntime//n_bins_cal
out_data = sp.zeros((n_bins_after_cal, npol, 2, nfreq), dtype=sp.float32)
# Get the phase offset of the cal.
try :
if cal_mask is None:
first_on, n_blank = get_cal_mask(data, n_bins_cal)
else :
first_on, n_blank = cal_mask
except ce.DataError :
print "Discarded record due to bad profile. "
out_data[:] = float('nan')
else :
# How many samples for each cal state.
n_cal_state = n_bins_cal//2 - n_blank
first_off = (first_on + n_bins_cal//2) % n_bins_cal
# Reshape data to add an index to average over.
data.shape = (n_bins_after_cal, n_bins_cal) + data.shape[1:]
# Get the masks for the on and off data.
inds = sp.arange(n_bins_cal)
if first_on == min((sp.arange(n_cal_state) +
first_on)% n_bins_cal) :
on_mask = sp.logical_and(inds >= first_on, inds <
first_on+n_cal_state)
else :
on_mask = sp.logical_or(inds >= first_on, inds <
(first_on + n_cal_state) % n_bins_cal)
if first_off == min((sp.arange(n_cal_state) +
first_off)% n_bins_cal) :
off_mask = sp.logical_and(inds >= first_off, inds <
first_off + n_cal_state)
else :
off_mask = sp.logical_or(inds >= first_off, inds <
(first_off + n_cal_state) % n_bins_cal)
# Find cal on and cal off averages. Always use mean not median due to
# discretization noise.
# This loop is much faster than the built in numpy mean() for some
# reason.
for ii in range(n_bins_cal) :
if on_mask[ii]:
out_data[:,:,0,:] += data[:,ii,:,:]
elif off_mask[ii]:
out_data[:,:,1,:] += data[:,ii,:,:]
out_data[:,:,0,:] /= sp.sum(on_mask)
out_data[:,:,1,:] /= sp.sum(off_mask)
return out_data
开发者ID:adam-lewis,项目名称:analysis_IM,代码行数:58,代码来源:psrfits_to_sdfits.py
示例6: _commonx
def _commonx(self, other, res='coarsest', source='linspace'):
"""Merge x-axis discretizations of this object and another.
If method is "linspace", make a new uniform spacing.
If method is "original", use one of the original discretizations.
If res (resolution) is "self" or "this", use the resolution of this
object.
If res is "other", use the resolution of the other object.
If res is "coarsest", use the coarsest discretization of the two
objects.
If res is "finest", use the finest discretization of the two objects.
If res is "medium", use a medium discretization
(implies method "linspace")."""
# 2012-06-27 - 2013-06-24
# if an empty function object is given
if len(self) == 0 or len(other) == 0:
return scipy.empty(shape=(0,))
# determine extremal values
min1, max1 = min(self.x), max(self.x) # use self.box()[:2]
min2, max2 = min(other.x), max(other.x) # use other.box()[:2]
newmin = max(min1, min2)
newmax = min(max1, max2)
# choose coarsest discretization
### maybe offer option to use "coarse", "fine", "medium" discretization
cand1 = self.x[scipy.logical_and(self.x >= newmin, self.x <= newmax)]
cand2 = other.x[scipy.logical_and(other.x >= newmin,
other.x <= newmax)]
if res is not None and 'other'.startswith(res):
winner = cand2
elif res is not None and \
('self'.startswith(res) or 'this'.startswith(res)):
winner = cand1
elif res is not None and 'finest'.startswith(res):
winner = cand1 if len(cand1) > len(cand2) else cand2
elif res is not None and 'medium'.startswith(res):
source = 'linspace'
winner = [0]*scipy.ceil(scipy.mean(len(cand1), len(cand2)))
else:
winner = cand1 if len(cand1) < len(cand2) else cand2
if source is not None and 'linspace'.startswith(source):
newx = scipy.linspace(newmin, newmax, len(winner))
else:
# res may not be "medium" here!
newx = winner
return newx
开发者ID:proggy,项目名称:cofunc,代码行数:51,代码来源:__init__.py
示例7: Dirac
def Dirac(x, sigma):
from scipy import logical_and, pi, cos
f = (1.0 / 2.0 / sigma) * (1 + cos(pi * x / sigma))
b = logical_and(x <= sigma, x >= -sigma)
f = f * b
return f
开发者ID:kelidas,项目名称:scratch,代码行数:7,代码来源:fit_manual.py
示例8: getX
def getX(self,standardized=True,maf=None):
"""
return SNPs, if neccessary standardize them
"""
X = SP.copy(self.X)
# test for missing values
isnan = SP.isnan(X)
for i in isnan.sum(0).nonzero()[0]:
# set to mean
X[isnan[:,i],i] = X[~isnan[:,i],i].mean()
if maf!=None:
LG.debug('filter SNPs')
LG.debug('... number of SNPs(before filtering): %d'%X.shape[1])
idx_snps = SP.logical_and(X[self.idx_samples].mean(0)>0.1,X[self.idx_samples].mean(0)<0.9)
LG.debug('... number of SNPs(after filtering) : %d'%idx_snps.sum())
else:
idx_snps = SP.ones(self.n_f,dtype=bool)
if standardized:
LG.debug('standardize SNPs')
X = X[self.idx_samples][:,idx_snps]
X-= X.mean(0)
X /= X.std(0,dtype=NP.float32)
X /= SP.sqrt(X.shape[1])
return X
return X[self.idx_samples][:,idx_snps]
开发者ID:PMBio,项目名称:pygp_kronsum,代码行数:29,代码来源:data.py
示例9: DFT_PSD
def DFT_PSD(data,movingwin=[0.201, 0.051], Fs = 1000, pad=0, fpass=[1,100]):
'''Discrete Fourier Transform
Input:
data: format is np.array that is time_window x samples
'''
num_trials = data.shape[1]
N = data.shape[0] #ms of trials
Nwin=round(Fs*movingwin[0])
Nstep=round(Fs*movingwin[1])
winstart=np.arange(0,N-Nwin,Nstep)
nw=len(winstart)
f = np.fft.fftfreq(int(movingwin[0]*Fs))
f = Fs*f[f>=0]
f_ind = scipy.logical_and(f>=fpass[0], f<=fpass[1])
#set(f[f>=fpass[0]] ) & set(f[f<=fpass[1]])
#f_ind = np.array(list(f_ind))
S = np.zeros(( num_trials, nw, sum(f_ind) ))
for n in range(nw):
datawin=data[winstart[n]:winstart[n]+Nwin,:]
sp = np.fft.rfft(datawin.T)
psd_est = abs(sp)**2
S[:,n,:] = psd_est[:,f_ind]
t=(winstart+round(Nwin/2))/float(Fs)
return S, f[f_ind], t
开发者ID:pkhanna104,项目名称:beta_extraction_code,代码行数:31,代码来源:simple_spec.py
示例10: cut
def cut(self, range=None, lower=None, upper=None):
"""Cut away all data points whose x-value is outside of the given
"range", or those that are smaller than "lower" or greater than
"upper"."""
# 2012-07-12
# get range
if range is None:
# default range captures all values
range = self.box()[:2]
else:
range = list(range)
if scipy.array(range).shape != (2,):
raise ValueError('range must be 2-tuple')
# overwrite range with lower and upper value
range = list(range)
if lower is not None:
range[0] = lower
if upper is not None:
range[1] = upper
#if range[0] >= range[1]:
#raise ValueError, 'lower bound must be smaller than upper bound'
### so then, nothing is kept, just fine
# cut away data points
keep = scipy.logical_and(self.x >= range[0], self.x <= range[1])
self.x = self.x[keep]
self.y = self.y[keep]
开发者ID:proggy,项目名称:cofunc,代码行数:30,代码来源:__init__.py
示例11: learning_curve_metrics
def learning_curve_metrics(hdf_list, epoch_size=56, n_factors=5):
#hdf_list = [3822, 3834, 3835, 3840]
#obstacle learning: hdf_list = [4098, 4100, 4102, 4104, 4114, 4116, 4118, 4119]
rew_ix_list = []
te_refs = []
rpm_list = []
hdf_dict = {}
perc_succ = []
time_list = []
offs = 0
#f, ax = plt.subplots()
for te in hdf_list:
hdf_t = dbfn.TaskEntry(te)
hdf = hdf_t.hdf
hdf_dict[te] = hdf
rew_ix, rpm = pa.get_trials_per_min(hdf, nmin=2,rew_per_min_cutoff=0,
ignore_assist=True, return_rpm=True)
ix = 0
#ax.plot(rpm)
trial_ix = np.array([i for i in hdf.root.task_msgs[:] if
i['msg'] in ['reward','timeout_penalty','hold_penalty','obstacle_penalty'] ], dtype=hdf.root.task_msgs.dtype)
while (ix+epoch_size) < len(rew_ix):
start_rew_ix = rew_ix[ix]
end_rew_ix = rew_ix[ix+epoch_size]
msg_ix_mod = np.nonzero(scipy.logical_and(trial_ix['time']<=end_rew_ix, trial_ix['time']>start_rew_ix))[0]
all_msg = trial_ix[msg_ix_mod]
perc_succ.append(len(np.nonzero(all_msg['msg']=='reward')[0]) / float(len(all_msg)))
rew_ix_list.append(rew_ix[ix:ix+epoch_size])
rpm_list.append(np.mean(rpm[ix:ix+epoch_size]))
te_refs.append(te)
time_list.append((0.5*(start_rew_ix+end_rew_ix))+offs)
ix += epoch_size
offs = offs+len(hdf.root.task)
#For each epoch, fit FA model (stick w/ 5 factors for now):
ratio = []
for te, r_ix in zip(te_refs, rew_ix_list):
print te, len(r_ix)
update_bmi_ix = np.nonzero(np.diff(np.squeeze(hdf.root.task[:]['internal_decoder_state'][:, 3, 0])))[0] + 1
bin_spk, targ_pos, targ_ix, z, zz = pa.extract_trials_all(hdf_dict[te], r_ix, time_cutoff=1000, update_bmi_ix=update_bmi_ix)
zscore_X, mu = pa.zscore_spks(bin_spk)
FA = skdecomp.FactorAnalysis(n_components=n_factors)
FA.fit(zscore_X)
#SOT Variance Ratio by target
#Priv var / mean
Cov_Priv = np.sum(FA.noise_variance_)
U = np.mat(FA.components_).T
Cov_Shar = np.trace(U*U.T)
ratio.append(Cov_Shar/(Cov_Shar+Cov_Priv))
开发者ID:pkhanna104,项目名称:fa_analysis,代码行数:59,代码来源:prelim_analysis.py
示例12: bias
def bias(a,b):
'''
bias
'''
a,b = sp.array(a),sp.array(b)
mask = sp.logical_and(sp.isfinite(a),sp.isfinite(b))
a, b = a[mask], b[mask]
return a.mean()-b.mean()
开发者ID:johannesro,项目名称:waveverification,代码行数:8,代码来源:dataanalysis.py
示例13: get_kin_sig_shenoy
def get_kin_sig_shenoy(kin_sig, bins=np.linspace(0,3000,3000), start_bin=1200,first_local_max_method=False,
after_start_est=300+300, kin_est = 1000, anim='seba'):
kin_feat = np.zeros((kin_sig.shape[0], 5))
for r in range(kin_sig.shape[0]):
spd_after_go = kin_sig[r,start_bin:]
if first_local_max_method: #Done only on BMI 3d, assuming Fs = 60 Hz.
d_spd = np.diff(spd_after_go)
#Est. number of bins RT should come after:
aft = after_start_est/float(1000)*60 #Aft is in iteration for bmi3d
rch = kin_est/float(1000)*60 #rch is in iteration for bmi3d
#Find first cross from + --> -
max_ind = np.array([i for i, s in enumerate(d_spd[:-1]) if scipy.logical_and(s>0, d_spd[i+1]<0)]) #derivative crosses zero w/ negative slope
z = np.nonzero(scipy.logical_and(max_ind>aft, max_ind<(rch+aft)))[0] #local maxima that fit estimate of rxn time --> rch time
#How to choose:
if len(z)>0:
z_ind = np.argmax(spd_after_go[max_ind[z]]) #choose the biggest
kin_feat[r,1] = bins[max_ind[z[z_ind]]+start_bin] #write down the time
maxbin = max_ind[z[z_ind]]+start_bin
else:
print ' no local maxima found within range :/ '
kin_feat[r,1] = bins[int(start_bin+aft+rch)]
maxbin = start_bin+aft+rch
else:
kin_feat[r,1] = bins[ start_bin + np.argmax(spd_after_go) ]
maxbin = start_bin + np.argmax(spd_after_go)
kin_feat[r,0] = kin_sig[r,int(maxbin)]
perc = [0.2, 0.5, 0.1]
for p, per in enumerate(perc):
percent0 = kin_feat[r,0]*per #Bottom Threshold
indz = range(0, int(maxbin-start_bin)) #0 - argmax_index
indzz = indz[-1:0:-1] #Reverse
datz = spd_after_go[indzz]
try:
x = np.nonzero(datz<percent0)[0][0]
except:
x = len(datz)
kin_feat[r,2+p] = bins[int(maxbin-x)]
return kin_feat
开发者ID:pkhanna104,项目名称:beta_extraction_code,代码行数:46,代码来源:psycho_metrics.py
示例14: get_trials_per_min
def get_trials_per_min(hdf,nmin=2, rew_per_min_cutoff=0, ignore_assist=False, return_rpm=False,
return_per_succ=False, plot=False):
'''
Summary: Getting trials per minute from hdf file
Input param: hdf: hdf file to use
Input param: nmin: number of min to use a rectangular window
Input param: rew_per_min_cutoff: ignore rew_ix after a
certain rew_per_min low threshold is passed
Output param: rew_ix = rewarded indices in hdf file
'''
rew_ix = np.array([t[1] for it, t in enumerate(hdf.root.task_msgs[:]) if t[0]=='reward'])
tm = np.zeros((np.max(rew_ix)+1))
tm[rew_ix] += 1
if hasattr(hdf.root.task, 'assist_level'):
assist_ix = np.nonzero(hdf.root.task[:]['assist_level']==0)[0]
else:
assist_ix = np.zeros((len(hdf.root.task)))
#Each row occurs ~1/60 sec, so:
minute = 60*60;
min_wind = np.ones((nmin*minute))/float(nmin)
rew_per_min_tmp = np.convolve(min_wind, tm, mode='same')
#Now smooth please:
smooth_wind = np.ones((3*minute))/float(3*minute)
rew_per_min = pk_convolve(smooth_wind, rew_per_min_tmp)
if rew_per_min_cutoff > 0:
ix = np.nonzero(rew_per_min < rew_per_min_cutoff)[0]
if len(ix)>0:
cutoff_ix = ix[0]
else:
cutoff_ix = rew_ix[-1]
else:
cutoff_ix = rew_ix[-1]
if ignore_assist:
try:
beg_zer_assist_ix = assist_ix[0]
except:
print 'No values w/o assist for filename: ', hdf.filename
beg_zer_assist_ix = rew_ix[-1]+1
else:
beg_zer_assist_ix = 0
if plot:
plt.plot(np.arange(len(tm))/float(minute), rew_per_min)
plt.show()
ix_final = scipy.logical_and(rew_ix <= cutoff_ix, rew_ix >= beg_zer_assist_ix)
if return_rpm:
return rew_ix[ix_final], rew_per_min[rew_ix[ix_final]]
else:
return rew_ix[ix_final]
开发者ID:pkhanna104,项目名称:fa_analysis,代码行数:56,代码来源:prelim_analysis.py
示例15: filter_M_T
def filter_M_T(gmr_name, gmr_characteristics):
if gmr_characteristics[0] == 'None':
# gmr_characteristics has not been defined because filtering of aftershocks is not of interest
return 'Inf'
else:
Mw_multiplier = gmr_characteristics[-1]
[M_m, Tg_m, Mw, Tg] = find_M_T(gmr_name, gmr_characteristics)
available_aftershocks = len(Mw[scipy.logical_and(Mw<M_m*Mw_multiplier, Tg<Tg_m)])
return available_aftershocks
开发者ID:dynaryu,项目名称:rmtk,代码行数:10,代码来源:double_MSA_on_SDOF.py
示例16: makehist
def makehist(testpath,npulses):
"""
This functions are will create histogram from data made in the testpath.
Inputs
testpath - The path that the data is located.
npulses - The number of pulses in the sim.
"""
sns.set_style("whitegrid")
sns.set_context("notebook")
params = ['Ne', 'Te', 'Ti', 'Vi']
histlims = [[1e10, 3e11], [1000., 3000.], [100., 2500.], [-400., 400.]]
erlims = [[-2e11, 2e11], [-1000., 1000.], [-800., 800], [-400., 400.]]
erperlims = [[-100., 100.]]*4
lims_list = [histlims, erlims, erperlims]
errdict = makehistdata(params, testpath)[:4]
ernames = ['Data', 'Error', 'Error Percent']
# Two dimensiontal histograms
pcombos = [i for i in itertools.combinations(params, 2)]
c_rows = int(math.ceil(float(len(pcombos))/2.))
(figmplf, axmat) = plt.subplots(c_rows, 2, figsize=(12, c_rows*6), facecolor='w')
axvec = axmat.flatten()
for icomn, icom in enumerate(pcombos):
curax = axvec[icomn]
str1, str2 = icom
_, _, _ = make2dhist(testpath, PARAMDICT[str1], PARAMDICT[str2], figmplf, curax)
filetemplate = str(Path(testpath).joinpath('AnalysisPlots', 'TwoDDist'))
plt.tight_layout()
plt.subplots_adjust(top=0.95)
figmplf.suptitle('Pulses: {0}'.format(npulses), fontsize=20)
fname = filetemplate+'_{0:0>5}Pulses.png'.format(npulses)
plt.savefig(fname)
plt.close(figmplf)
# One dimensiontal histograms
for ierr, iername in enumerate(ernames):
filetemplate = str(Path(testpath).joinpath('AnalysisPlots', iername))
(figmplf, axmat) = plt.subplots(2, 2, figsize=(20, 15), facecolor='w')
axvec = axmat.flatten()
for ipn, iparam in enumerate(params):
plt.sca(axvec[ipn])
if sp.any(sp.isinf(errdict[ierr][iparam])):
continue
binlims = lims_list[ierr][ipn]
bins = sp.linspace(binlims[0], binlims[1], 100)
xdata = errdict[ierr][iparam]
xlog = sp.logical_and(xdata >= binlims[0], xdata < binlims[1])
histhand = sns.distplot(xdata[xlog], bins=bins, kde=True, rug=False)
axvec[ipn].set_title(iparam)
figmplf.suptitle(iername +' Pulses: {0}'.format(npulses), fontsize=20)
fname = filetemplate+'_{0:0>5}Pulses.png'.format(npulses)
plt.savefig(fname)
plt.close(figmplf)
开发者ID:jswoboda,项目名称:RadarDataSim,代码行数:55,代码来源:statstest.py
示例17: nonna_select_data
def nonna_select_data(data, outlier_threshold, level='high'):
"""
This function returns a list of indexed after identifying the main outliers. It applies
a cut on the data to remove exactly a fraction (1-outlier_threshold) of all data points.
By default the cut is applied only at the higher end of the data values, but the
parameter level can be used to change this
Input arguments:
data = vector containing all data points
outlier_threshold = remove outliers until we are left with exactly this fraction of the
original data
level = 'high|low|both' determines if the outliers are removed only from the
high values end, the low values end of both ends.
Output:
idx = index of selected (good) data
"""
# histogram all the data values
n,x = scipy.histogram(data, len(data)/10)
# compute the cumulative distribution and normalize
nn = scipy.cumsum(n)
nn = nn / float(max(nn))
if level=='high':
# select the value such that a fraction outlier_threshold of the data lies below it
if outlier_threshold < 1:
val = x[pylab.find(nn/float(max(nn)) >= outlier_threshold)[0]]
else:
val = max(data)
# use that fraction of data only
idx = data <= val
elif level=='low':
# select the value such that a fraction outlier_threshold of the data lies above it
if outlier_threshold < 1:
val = x[pylab.find(nn/float(max(nn)) <= (1-outlier_threshold))[-1]]
else:
val = min(data)
# use that fraction of data only
idx = data >= val
elif level=='both':
# select the value such that a fraction outlier_threshold/2 of the data lies below it
if outlier_threshold < 1:
Hval = x[pylab.find(nn/float(max(nn)) >= 1-(1-outlier_threshold)/2)[0]]
else:
Hval = max(data)
# select the value such that a fraction outlier_threshold/2 of the data lies above it
if outlier_threshold < 1:
Lval = x[pylab.find(nn/float(max(nn)) <= (1-outlier_threshold)/2)[-1]]
else:
Lval = min(data)
# use that fraction of data only
idx = scipy.logical_and(data >= Lval, data <= Hval)
return idx
开发者ID:TristanShoemaker,项目名称:VIRGO_NONNA,代码行数:55,代码来源:nonna_functions.py
示例18: get_scan_IF_inds
def get_scan_IF_inds(self, scan_ind, IF_ind) :
"""Gets the record indices of the fits file that correspond to the
given scan and IF.
Note that the scans are numbered with 0 corresponding to the first scan
in the file i.e., it is not the session scan number."""
# TODO: Should check valid scan IF, and raise value errors as apropriate
thescan = self.scan_set[scan_ind]
theIF = self.IF_set[IF_ind]
# Find all the records that correspond to this IF and this scan.
# These indicies *should now be ordered in time, cal (on off)
# and in polarization, once the IF is isolated.
(inds_sif,) = sp.where(sp.logical_and(self._IFs_all==theIF,
self._scans_all==thescan))
ncal = len(sp.unique(self.fitsdata.field('CAL')[inds_sif]))
npol = len(sp.unique(self.fitsdata.field('CRVAL4')[inds_sif]))
# Reform to organize by pol, cal, etc.
ntimes = len(inds_sif)//npol//ncal
inds_sif = sp.reshape(inds_sif, (ntimes, npol, ncal))
if self.verify_ordering > 0:
# We expect noise cal to be on for every second record.
for thecal in range(ncal) :
tmp = sp.unique(self.fitsdata.field('CAL')[inds_sif[:,:,thecal]])
if len(tmp) > 1 :
raise ce.DataError("Calibration (ON/OFF) not in "
"perfect order in file: "+self.fname)
# Polarization should cycle through 4 modes (-5,-7,-8,-6)
for thepol in range(npol) :
tmp = sp.unique(self.fitsdata.field('CRVAL4')
[inds_sif[:,thepol,:]])
if len(tmp) > 1 :
raise ce.DataError("Polarizations not in perfect order in "
"file: "+self.fname)
# We expect the entries to be sorted in time and for time to not
# change across pol and cal.
lastLST = 0
for ii in range(ntimes) :
# Sometimes won't have the LST.
try :
thisLST = self.fitsdata.field('LST')[inds_sif[ii,0,0]]
# If 'LST' is missing raises a KeyError in later versions of
# pyfits, and a NameError in earlier ones.
except (KeyError, NameError) :
break
if not (sp.allclose(self.fitsdata.field('LST')
[inds_sif[ii,:,:]] - thisLST, 0)) :
raise ce.DataError("LST change across cal or pol in "
"file: " + self.fname)
return inds_sif
开发者ID:adam-lewis,项目名称:analysis_IM,代码行数:54,代码来源:fitsGBT.py
示例19: timereduce
def timereduce(self, timelims=None,timesselected=None):
assert (timelims is not None) or (timesselected is not None), "Need a set of limits or selected set of times"
if timelims is not None:
tkeep = sp.logical_and(self.Time_Vector>=timelims[0],self.Time_Vector<timelims[1])
if timesselected is not None:
tkeep = sp.in1d(self.Time_Vector,timesselected)
# prune the arrays
self.Time_Vector=self.Time_Vector[tkeep]
self.Param_List=self.Param_List[:,tkeep]
self.Velocity=self.Velocity[:,tkeep]
开发者ID:hhuangmeso,项目名称:RadarDataSim,代码行数:11,代码来源:IonoContainer.py
示例20: evaluate
def evaluate(self, rotMatrix):
"""
Evaluate the correlation for the given orientation (rotation).
:rtype: :obj:`float`
:return: Correlation of normalised bin counts.
"""
trnsCoords = rotMatrix.dot(self.trnsCoords.T).T
fixdDensity = self.fixdSphHist.getBinCounts(trnsCoords)
msk = sp.where(sp.logical_and(fixdDensity > self.fixdLoThrsh, fixdDensity <= self.fixdHiThrsh))
return sp.stats.pearsonr(self.trnsDensity[msk], fixdDensity[msk])[0]
开发者ID:pymango,项目名称:pymango,代码行数:11,代码来源:_sphericalHistRegistration.py
注:本文中的scipy.logical_and函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论