本文整理汇总了Python中scipy.sort函数的典型用法代码示例。如果您正苦于以下问题:Python sort函数的具体用法?Python sort怎么用?Python sort使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了sort函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: dataSubset
def dataSubset(fittingData,numDatapoints,seed=345,maxNumIndepParams=None):
"""
By default, add one timepoint for each independent parameter first,
then increase the number of timepoints per independent parameter.
Timepoints are added randomly for each independent parameter.
Independent parameters are added in the order of indepParamsList.
"""
scipy.random.seed(seed)
subset = []
numIndepParams = len(fittingData)
if maxNumIndepParams is None: maxNumIndepParams = numIndepParams
numDatapoints = int(numDatapoints)
for i in range(min(numDatapoints,maxNumIndepParams)):
varNames = scipy.sort( fittingData[i].keys() )
allTimes = scipy.sort( fittingData[i][varNames[0]].keys() )
possibleIndices = range(len(allTimes))
scipy.random.shuffle(possibleIndices)
N = numDatapoints/maxNumIndepParams
if i < numDatapoints%maxNumIndepParams: N += 1
timeIndices = possibleIndices[:N]
times = scipy.array(allTimes)[timeIndices]
s = {}
for var in varNames:
s[var] = dict([(t,fittingData[i][var][t]) for t in times])
subset.append(s)
return subset
开发者ID:EmoryUniversityTheoreticalBiophysics,项目名称:SirIsaac,代码行数:30,代码来源:fitAllParallel.py
示例2: check
def check( x ):
y = sl.canonicalise( x )
yr = y[0,:]
yc = y[:,0]
assert all( yr == sc.sort( yr ) )
assert all( yc == sc.sort( yc ) )
开发者ID:arunchaganty,项目名称:spectral,代码行数:7,代码来源:tests.py
示例3: quantify_intron_retention
def quantify_intron_retention(event, gene, counts_segments, counts_edges, counts_seg_pos):
cov = sp.zeros((2, ), dtype='float')
sg = gene.splicegraph
segs = gene.segmentgraph
seg_lens = segs.segments[1, :] - segs.segments[0, :]
seg_shape = segs.seg_edges.shape
order = 'C'
offset = 0
### find exons corresponding to event
idx_exon1 = sp.where((sg.vertices[0, :] == event.exons1[0, 0]) & (sg.vertices[1, :] == event.exons1[0, 1]))[0]
idx_exon2 = sp.where((sg.vertices[0, :] == event.exons1[1, 0]) & (sg.vertices[1, :] == event.exons1[1, 1]))[0]
### find segments corresponding to exons
seg_exon1 = sp.sort(sp.where(segs.seg_match[idx_exon1, :])[1])
seg_exon2 = sp.sort(sp.where(segs.seg_match[idx_exon2, :])[1])
seg_all = sp.arange(seg_exon1[0], seg_exon2[-1])
seg_intron = sp.setdiff1d(seg_all, seg_exon1)
seg_intron = sp.setdiff1d(seg_intron, seg_exon2)
assert(seg_intron.shape[0] > 0)
### compute exon coverages as mean of position wise coverage
# intron_cov
cov[0] = sp.sum(counts_segments[seg_intron] * seg_lens[seg_intron]) / sp.sum(seg_lens[seg_intron])
### check intron confirmation as sum of valid intron scores
### intron score is the number of reads confirming this intron
# intron conf
idx = sp.where(counts_edges[:, 0] == sp.ravel_multi_index([seg_exon1[-1], seg_exon2[0]], seg_shape, order=order) + offset)[0]
cov[1] = counts_edges[idx, 1]
return cov
开发者ID:ratschlab,项目名称:spladder,代码行数:35,代码来源:quantify.py
示例4: _remdup
def _remdup(a,amax=None):
"""Remove duplicates from vector a
"""
scipy.sort(a)
flag = 0
for x in range(1,len(a)):
if (a[x-1]+1) - (a[x]+1) == 0:
flag = 1
return flag
开发者ID:myw,项目名称:dataiap,代码行数:9,代码来源:genetic.py
示例5: QQPlot
def QQPlot(arguments,pv,unique_pv,fname):
font_size = 18
mpl.rcParams['font.family']="sans-serif"
mpl.rcParams['font.sans-serif']="Arial"
mpl.rcParams['font.size']=font_size
#mpl.rcParams['figure.dpi'] = 300
mpl.rcParams['font.weight']='medium'
mpl.rcParams['figure.facecolor'] = 'white'
mpl.rcParams['lines.linewidth'] = 1
mpl.rcParams['axes.facecolor'] = 'white'
mpl.rcParams['patch.edgecolor'] = 'white'
mpl.rcParams['grid.linestyle'] = '-'
mpl.rcParams['grid.color'] = 'LightGray'
if arguments.ignore!=None:
if arguments.ignore in fname:
return
if arguments.distinct:
pv = unique_pv
pl.figure(figsize=(5,5))
pv_uni = (sp.arange(1.0/float(pv.shape[0]),1,1.0/float(pv.shape[0]+1)))
pl.plot(-sp.log10(pv_uni),-sp.log10(sp.sort(pv_uni)),'b--')
pl.ylim(0,(-sp.log10(pv[:])).max()+1)
pl.plot(-sp.log10(pv_uni),-sp.log10(sp.sort(pv[:],axis=0)),'.',color="#F68E55",markersize=12,markeredgewidth=0,alpha=1)
#plot theoretical expectations
if arguments.estpv:
datapoints=10**(sp.arange(sp.log10(0.5),sp.log10(pv.shape[0]-0.5)+0.1,0.1))
beta_alpha=sp.zeros(datapoints.shape[0])
beta_nalpha=sp.zeros(datapoints.shape[0])
beta_tmp=sp.zeros(datapoints.shape[0])
for n in xrange(datapoints.shape[0]):
m=datapoints[n]
beta_tmp[n]=stats.beta.ppf(0.5,m,pv.shape[0]-m)
beta_alpha[n]=stats.beta.ppf(0.05,m,pv.shape[0]-m)
beta_nalpha[n]=stats.beta.ppf(1-0.05,m,pv.shape[0]-m)
estimated_pvals=datapoints/pv.shape[0]
lower_bound = -sp.log10(estimated_pvals-(beta_tmp-beta_alpha))
upper_bound = -sp.log10(estimated_pvals+(beta_nalpha-beta_tmp))
pl.fill_between(-sp.log10(estimated_pvals),lower_bound,upper_bound,color='#00BFF3',alpha=0.4,linewidth=0)
if arguments.title:
pl.title("Phenotype: %s"%(fname))
pl.xlabel('Expected $-log10(p-value)$')
pl.ylabel('Observed $-log10(p-value)$')
if arguments.gc:
gc = sp.median(stats.chi2.isf(pv,1))/0.456
pl.text(4,1,"$\hat \lambda=%.2f$"%(gc))
remove_border()
pl.subplots_adjust(left=0.14,bottom=0.13,right=0.97,top=0.95,wspace=0.45)
pl.savefig(os.path.join(arguments.out,'qqplot_' + fname + '.' + arguments.iformat) )
pl.close()
开发者ID:dominikgrimm,项目名称:easyGWASCore,代码行数:51,代码来源:plotting.py
示例6: get_coords
def get_coords(self, trafo=False):
if self.event_type != 'mult_exon_skip':
if trafo:
#return sp.sort(sp.unique(sp.c_[self.exons1_col.ravel(), self.exons2_col.ravel()]))
return sp.sort(sp.r_[self.exons1_col.ravel(), self.exons2_col.ravel()])
else:
#return sp.sort(sp.unique(sp.c_[self.exons1.ravel(), self.exons2.ravel()]))
return sp.sort(sp.r_[self.exons1.ravel(), self.exons2.ravel()])
else:
if trafo:
return sp.sort(sp.r_[self.exons1_col.ravel()[:4], self.exons2_col.ravel()[-4:]])
else:
return sp.sort(sp.r_[self.exons1.ravel()[:4], self.exons2.ravel()[-4:]])
开发者ID:jiahsinhuang,项目名称:spladder,代码行数:14,代码来源:event.py
示例7: quantify_mult_exon_skip
def quantify_mult_exon_skip(event, gene, counts_segments, counts_edges):
cov = sp.zeros((2, ), dtype='float')
sg = gene.splicegraph
segs = gene.segmentgraph
seg_lens = segs.segments[1, :] - segs.segments[0, :]
seg_shape = segs.seg_edges.shape[0]
order = 'C'
offset = 0
### find exons corresponding to event
idx_exon_pre = sp.where((sg.vertices[0, :] == event.exons2[0, 0]) & (sg.vertices[1, :] == event.exons2[0, 1]))[0]
idx_exon_aft = sp.where((sg.vertices[0, :] == event.exons2[-1, 0]) & (sg.vertices[1, :] == event.exons2[-1, 1]))[0]
seg_exons = []
for i in range(1, event.exons2.shape[0] - 1):
tmp = sp.where((sg.vertices[0, :] == event.exons2[i, 0]) & (sg.vertices[1, :] == event.exons2[i, 1]))[0]
seg_exons.append(sp.where(segs.seg_match[tmp, :])[1])
### find segments corresponding to exons
seg_exon_pre = sp.sort(sp.where(segs.seg_match[idx_exon_pre, :])[1])
seg_exon_aft = sp.sort(sp.where(segs.seg_match[idx_exon_aft, :])[1])
seg_exons_u = sp.sort(sp.unique([x for sublist in seg_exons for x in sublist]))
### inner exons_cov
cov[0] = sp.sum(counts_segments[seg_exons_u] * seg_lens[seg_exons_u]) / sp.sum(seg_lens[seg_exons_u])
### check intron confirmation as sum of valid intron scores
### intron score is the number of reads confirming this intron
# exon_pre_exon_conf
idx1 = sp.where(counts_edges[:, 0] == sp.ravel_multi_index([seg_exon_pre[-1], seg_exons[0][0]], seg_shape, order=order) + offset)[0]
if len(idx1.shape) > 0 and idx1.shape[0] > 0:
cov[0] += counts_edges[idx1[0], 1]
# exon_exon_aft_conf
idx2 = sp.where(counts_edges[:, 0] == sp.ravel_multi_index([seg_exons[-1][-1], seg_exon_aft[0]], seg_shape, order=order) + offset)[0]
if len(idx2.shape) > 0 and idx2.shape[0] > 0:
cov[0] += counts_edges[idx2[0], 1]
# exon_pre_exon_aft_conf
idx3 = sp.where(counts_edges[:, 0] == sp.ravel_multi_index([seg_exon_pre[-1], seg_exon_aft[0]], seg_shape, order=order) + offset)[0]
if len(idx3.shape) > 0 and idx3.shape[0] > 0:
cov[1] = counts_edges[idx3[0], 1]
for i in range(len(seg_exons) - 1):
# sum_inner_exon_conf
idx4 = sp.where(counts_edges[:, 0] == sp.ravel_multi_index([seg_exons[i][-1], seg_exons[i+1][0]], seg_shape, order=order) + offset)[0]
if len(idx4.shape) > 0 and idx4.shape[0] > 0:
cov[0] += counts_edges[idx4[0], 1]
return cov
开发者ID:ratschlab,项目名称:spladder,代码行数:50,代码来源:quantify.py
示例8: NGorN50
def NGorN50(file_path='contigs.txt', genomesize=None):
contigs, num_contig = file_parser(file_path)
print( "Total number of contigs: %d " %(num_contig) ) # Expect 20
# Sort the contigs in reverse order in an array e.g.
# array([79, 23, 10])
contigs = scipy.sort(contigs)[::-1]
#print(contigs)
# Calculate sum to compare against for N50s or NG50
if genomesize == None:
contig_sum = contigs.sum()/2
print( "50 Contig Sum is: %d" % (contig_sum) )
else:
contig_sum = int(genomesize)/2
print ("50 Genome Size specified: %d" %(contig_sum))
for counter in range(1, num_contig+1):
# TODO: Consider memoizing this if you need to reuse this script for large contigs for performance gains.
# Check the accumulated sum against the comparison
if contigs[0:counter].sum() > contig_sum:
print( "Partial Contig Sum is: %d, with counter: %d, and contig length %d"
% (contigs[0:counter].sum(), counter, contigs[counter-1]) )
# Only need to find the first case
break
开发者ID:djphan,项目名称:N50-Calculator,代码行数:26,代码来源:N50_Calculator.py
示例9: generateNoteLength
def generateNoteLength(self):
length = (60. / self.wavetempo) * self.time_freq_fs
note_length = sp.array([2**i for i in range(5)]) / 4.
note_length *= length
note_huten = sp.array(
[note_length[i-1]+note_length[i] for i in range(1, 5)])
note_length = sp.r_[note_length, note_huten]
note_length = sp.sort(note_length)
note_length_pair = []
for i in range(note_length.size):
try:
upper = (note_length[i+1] - note_length[i])/2
upper += note_length[i]
except IndexError:
upper = note_length[i] * 2
try:
lower = note_length_pair[-1][1]
except IndexError:
lower = 0
note_length_pair.append((lower, upper))
if(self.output_form == 'MML'):
note_name = ['16', '16.', '8', '8.', '4', '4.', '2', '2.', '1']
elif(self.output_form == 'PMX'):
note_name = ['1', '1d', '8', '8d', '4', '4d', '2', '2d', '0']
return (note_name, note_length_pair)
开发者ID:mackee,项目名称:utakata,代码行数:27,代码来源:utakata_time_freq.py
示例10: bulk_bands_calculator
def bulk_bands_calculator(self,s,sub,kx,ky,kz):
''' Calculate the band energies for the specified kx, ky, and kz values.
The 3x3 Hamiltonian for wurtzite crystals is used for the valence,
while a 1x1 Hamiltonian is used for the conduction band. The model is
from the chapter by Vurgaftman and Meyer in the book by Piprek.
'''
E = scipy.zeros((4,len(s.Eg0)))
E[0,:] = s.Eg0+s.delcr+s.delso/3+\
hbar**2/(2*s.mepara)*(kx**2+ky**2)+\
hbar**2/(2*s.meperp)*(kz**2)+\
(s.a1+s.D1)*s.epszz+(s.a2+s.D2)*(s.epsxx+s.epsyy)
L = hbar**2/(2*m0)*(s.A1*kz**2+s.A2*(kx+ky)**2)+\
s.D1*s.epszz+s.D2*(s.epsxx+s.epsyy)
T = hbar**2/(2*m0)*(s.A3*kz**2+s.A4*(kx+ky)**2)+\
s.D3*s.epszz+s.D4*(s.epsxx+s.epsyy)
F = s.delcr+s.delso/3+L+T
G = s.delcr-s.delso/3+L+T
K = hbar**2/(2*m0)*s.A5*(kx+1j*ky)**2+s.D5*(s.epsxx-s.epsyy)
H = hbar**2/(2*m0)*s.A6*(kx+1j*ky)*kz+s.D6*(s.epsxz)
d = scipy.sqrt(2)*s.delso/3
for ii in range(len(s.Eg0)):
mat = scipy.matrix([[ F[ii], K[ii], -1j*H[ii] ],
[ K[ii], G[ii], -1j*H[ii]+d[ii]],
[-1j*H[ii], -1j*H[ii]+d[ii], L[ii] ]])
w,v = scipy.linalg.eig(mat)
E[1:,ii] = scipy.flipud(scipy.sort(scipy.real(w)))
return E
开发者ID:puluoning,项目名称:ledsim,代码行数:27,代码来源:material.py
示例11: termFrequencyMatrix
def termFrequencyMatrix(directory,stopwords,termlist):
""" The student must code this. """
filenames = sp.sort(os.listdir(directory))
frequencyMatrix = sp.zeros((len(termlist),len(filenames)))
for i in xrange(len(filenames)):
frequencyMatrix[:,i] = termVector(directory + filenames[i],stopwords,termlist)
return frequencyMatrix.astype(float)
开发者ID:KathleenF,项目名称:numerical_computing,代码行数:7,代码来源:LSI.py
示例12: traj_ensemble_quantiles
def traj_ensemble_quantiles(traj_set, quantiles=(0.025, 0.5, 0.975)):
"""
Return a list of trajectories, each one corresponding the a given passed-in
quantile.
"""
all_values = scipy.array([traj.values for traj in traj_set])
sorted_values = scipy.sort(all_values, 0)
q_trajs = []
for q in quantiles:
# Calculate the index corresponding to this quantile. The q is because
# Python arrays are 0 indexed
index = q * (len(sorted_values) - 1)
below = int(scipy.floor(index))
above = int(scipy.ceil(index))
if above == below:
q_values = sorted_values[below]
else:
# Linearly interpolate...
q_below = (1.0*below)/(len(sorted_values)-1)
q_above = (1.0*above)/(len(sorted_values)-1)
q_values = sorted_values[below] + (q - q_below)*(sorted_values[above] - sorted_values[below])/(q_above - q_below)
q_traj = copy.deepcopy(traj_set[0])
q_traj.values = q_values
q_trajs.append(q_traj)
return q_trajs
开发者ID:Colbert-Sesanker,项目名称:Networks,代码行数:27,代码来源:Ensembles.py
示例13: draw_graph
def draw_graph(graph):
# create networkx graph
G=nx.Graph()
ordered_node_list = scipy.sort([int(i[1::]) for i in graph])
# add nodes
#for node in graph:
# G.add_node(node)
for num in ordered_node_list:
G.add_node('n'+str(num))
# add edges
for i in graph:
for j in graph[i][1::]:
G.add_edge(i,j)
colors = ['b','r','g','c','w','k']
node_color = [colors[graph[node][0]] for node in graph]
# draw graph
#pos = nx.shell_layout(G)
pos = nx.spring_layout(G,iterations=100)
nx.draw(G, pos, node_color = node_color)
# show graph
plt.axis('off')
plt.show()
开发者ID:atombear,项目名称:kami_solver,代码行数:31,代码来源:KamiSolve.py
示例14: subsetsWithFits
def subsetsWithFits(fileNumString,onlyNew=False):
"""
Find data subsets (N) that have models that have been fit to
all conditions.
onlyNew (False) : Optionally include only subsets that have
fits that are not included in the current
combined fitProbs.
"""
fpd = loadFitProbData(fileNumString)
saveFilename = fpd.values()[0]['saveFilename']
Nlist = []
for N in scipy.sort(fpd.keys()):
# find models that have been fit to all conditions
if len(fpd[N]['fitProbDataList']) == 1:
fitModels = fpd[N]['fitProbDataList'][0]['logLikelihoodDict'].keys()
else:
fitModels = scipy.intersect1d([ fp['logLikelihoodDict'].keys() \
for fp in fpd[N]['fittingProblemList'] ])
if onlyNew:
Nfilename = directoryPrefixNonly(fileNumString,N)+'/'+saveFilename
fileExists = os.path.exists(Nfilename)
if not fileExists: # no combined file exists
if len(fitModels) > 0:
Nlist.append(N)
else: # check which fit models are currently included in the saved file
fpMultiple = load(Nfilename)
fitModelsSaved = fpMultiple.logLikelihoodDict.keys()
if len(scipy.intersect1d(fitModels,fitModelsSaved)) < len(fitModels):
Nlist.append(N)
else:
if len(fitModels) > 0:
Nlist.append(N)
return Nlist
开发者ID:EmoryUniversityTheoreticalBiophysics,项目名称:SirIsaac,代码行数:35,代码来源:fitAllParallel.py
示例15: quantify_mutex_exons
def quantify_mutex_exons(event, gene, counts_segments, counts_edges):
sg = gene.splicegraph
segs = gene.segmentgraph
seg_lens = segs.segments[1, :] - segs.segments[0, :]
seg_shape = segs.seg_edges.shape[0]
order = 'C'
offset = 0
### find exons corresponding to event
idx_exon_pre = sp.where((sg.vertices[0, :] == event.exons1[0, 0]) & (sg.vertices[1, :] == event.exons1[0, 1]))[0]
idx_exon_aft = sp.where((sg.vertices[0, :] == event.exons1[-1, 0]) & (sg.vertices[1, :] == event.exons1[-1, 1]))[0]
idx_exon1 = sp.where((sg.vertices[0, :] == event.exons1[1, 0]) & (sg.vertices[1, :] == event.exons1[1, 1]))[0]
idx_exon2 = sp.where((sg.vertices[0, :] == event.exons2[1, 0]) & (sg.vertices[1, :] == event.exons2[1, 1]))[0]
### find segments corresponding to exons
seg_exon_pre = sp.sort(sp.where(segs.seg_match[idx_exon_pre, :])[1])
seg_exon_aft = sp.sort(sp.where(segs.seg_match[idx_exon_aft, :])[1])
seg_exon1 = sp.sort(sp.where(segs.seg_match[idx_exon1, :])[1])
seg_exon2 = sp.sort(sp.where(segs.seg_match[idx_exon2, :])[1])
# exon1 cov
cov[0] = sp.sum(counts_segments[seg_exon1] * seg_lens[seg_exon1]) / sp.sum(seg_lens[seg_exon1])
# exon2 cov
cov[1] = sp.sum(counts_segments[seg_exon2] * seg_lens[seg_exon2]) / sp.sum(seg_lens[seg_exon2])
### check intron confirmation as sum of valid intron scores
### intron score is the number of reads confirming this intron
# exon_pre_exon1_conf
idx1 = sp.where(counts_edges[:, 0] == sp.ravel_multi_index([seg_exon_pre[-1], seg_exon1[0]], seg_shape, order=order) + offset)[0]
if len(idx1.shape) > 0 and idx1.shape[0] > 0:
cov[0] += counts_edges[idx1[0], 1]
# exon_pre_exon2_conf
idx2 = sp.where(counts_edges[:, 0] == sp.ravel_multi_index([seg_exon_pre[-1], seg_exon2[0]], seg_shape, order=order) + offset)[0]
if len(idx2.shape) > 0 and idx2.shape[0] > 0:
cov[1] += counts_edges[idx2[0], 1]
# exon1_exon_aft_conf
idx3 = sp.where(counts_edges[:, 0] == sp.ravel_multi_index([seg_exon1[-1], seg_exon_aft[0]], seg_shape, order=order) + offset)[0]
if len(idx3.shape) > 0 and idx3.shape[0] > 0:
cov[0] += counts_edges[idx3[0], 1]
# exon2_exon_aft_conf
idx4 = sp.where(counts_edges[:, 0] == sp.ravel_multi_index([seg_exon2[-1], seg_exon_aft[0]], seg_shape, order=order) + offset)[0]
if len(idx4.shape) > 0 and idx4.shape[0] > 0:
cov[1] += counts_edges[idx4[0], 1]
return cov
开发者ID:ratschlab,项目名称:spladder,代码行数:47,代码来源:quantify.py
示例16: _verify_eqrm_flags
def _verify_eqrm_flags(eqrm_flags):
"""
Check that the values in eqrm_flags are consistant with how EQRM works.
Args:
eqrm_flags: A DictKeyAsAttributes instance.
"""
if not allclose(eqrm_flags.atten_periods,
sort(eqrm_flags.atten_periods)):
raise AttributeSyntaxError(
"Syntax Error: Period values are not ascending")
if eqrm_flags.save_hazard_map == True and eqrm_flags.is_scenario == True:
raise AttributeSyntaxError(
'Cannot save the hazard map for a scenario.')
if eqrm_flags.atten_variability_method == 1 and \
eqrm_flags.run_type == 'risk_csm':
raise AttributeSyntaxError(
'Cannot use spawning when doing a risk_csm simulation.')
if eqrm_flags.amp_variability_method == 1:
raise AttributeSyntaxError(
'Cannot spawn on amplification.')
if eqrm_flags.event_set_handler == 'load' and \
eqrm_flags.event_set_load_dir is None:
raise AttributeSyntaxError(
'event_set_load_dir must be set if event_set_handler is load.')
if eqrm_flags.event_set_handler == 'load' and \
not os.path.exists(eqrm_flags.event_set_load_dir):
raise AttributeSyntaxError(
'event_set_load_dir %s must exist if event_set_handler is load.' %
eqrm_flags.event_set_load_dir)
# Only do these checks if different from output_dir
# (output_dir gets created if not exists)
if eqrm_flags.data_array_storage != eqrm_flags.output_dir and \
not os.path.exists(eqrm_flags.data_array_storage):
raise AttributeSyntaxError(
'data_array_storage %s must exist and be accessible from %s' %
(eqrm_flags.data_array_storage, socket.gethostname()))
if eqrm_flags.fault_source_tag is None and \
eqrm_flags.zone_source_tag is None:
raise AttributeSyntaxError(
'Either fault_source_tag or zone_source_tag must be set.')
# Check to see if a parameter is defined that is incompatible with the
# defined run_type
# Note: _add_default_values should have already dealt with adding
# incompatible defaults
for param in CONV_NEW:
if not is_param_compatible(param, eqrm_flags):
raise AttributeSyntaxError(
"Attribute " + param['new_para'] +
" not compatible with run_type=" + eqrm_flags['run_type'] +
" - compatible run_type values are " + str(param['run_type']))
开发者ID:dynaryu,项目名称:eqrm,代码行数:59,代码来源:parse_in_parameters.py
示例17: calculateEV_montecarlo
def calculateEV_montecarlo(fFn, zRV, nDraws=10000):
global g_montecarloDraws
if (not (zRV, nDraws) in g_montecarloDraws):
g_montecarloDraws[(zRV, nDraws)] = scipy.sort(zRV.rvs(size=nDraws))
draws = g_montecarloDraws[(zRV, nDraws)]
vals = map(fFn, draws)
EV = scipy.mean(vals)
return EV
开发者ID:Twizanex,项目名称:bellman,代码行数:8,代码来源:myfuncs.py
示例18: calculateEV_montecarlo2
def calculateEV_montecarlo2(grid, fArray, zRV, nDraws=10000):
global g_montecarloDraws
if (not (zRV, nDraws) in g_montecarloDraws):
g_montecarloDraws[(zRV, nDraws)] = scipy.sort(zRV.rvs(size=nDraws))
draws = g_montecarloDraws[(zRV, nDraws)]
fn = linterp.LinInterp1D(grid, fArray)
EV = fn.applySorted(draws) / nDraws
return EV
开发者ID:Twizanex,项目名称:bellman,代码行数:8,代码来源:myfuncs.py
示例19: __init__
def __init__(self, N, vectors, coverage_ratio=0.2):
"""
Performs exact nearest neighbour search on the data set.
vectors can either be a numpy matrix with all the vectors
as columns OR a python array containing the individual
numpy vectors.
"""
# We need a dict from vector string representation to index
self.vector_dict = {}
self.N = N
self.coverage_ratio = coverage_ratio
# Get numpy array representation of input
self.vectors = numpy_array_from_list_or_numpy_array(vectors)
# Build map from vector string representation to vector
for index in range(self.vectors.shape[1]):
self.vector_dict[self.__vector_to_string(
self.vectors[:, index])] = index
# Get transposed version of vector matrix, so that the rows
# are the vectors (needed by cdist)
vectors_t = numpy.transpose(self.vectors)
# Determine the indices of query vectors used for comparance
# with approximated search.
query_count = numpy.floor(self.coverage_ratio *
self.vectors.shape[1])
self.query_indices = []
for k in range(int(query_count)):
index = numpy.floor(k*(self.vectors.shape[1]/query_count))
index = min(index, self.vectors.shape[1]-1)
self.query_indices.append(int(index))
print '\nStarting exact search (query set size=%d)...\n' % query_count
# For each query vector get radius of closest N neighbours
self.nearest_radius = {}
self.exact_search_time_per_vector = 0.0
for index in self.query_indices:
v = vectors_t[index, :].reshape(1, self.vectors.shape[0])
exact_search_start_time = time.time()
D = cdist(v, vectors_t, 'euclidean')
# Get radius of closest N neighbours
self.nearest_radius[index] = scipy.sort(D)[0, N]
# Save time needed for exact search
exact_search_time = time.time() - exact_search_start_time
self.exact_search_time_per_vector += exact_search_time
print '\Done with exact search...\n'
# Normalize search time
self.exact_search_time_per_vector /= float(len(self.query_indices))
开发者ID:gonzo66,项目名称:thinking_web,代码行数:58,代码来源:distanceratioexperiment.py
示例20: filterBid
def filterBid(allids, sbids):
'''
gets two id list
returns matching index
'''
if sp.unique(sbids).shape[0] != sbids.shape[0]:
warnings.warn("superset ids are not unique: Making it unique")
sbids = sp.unique(sbids)
if sp.unique(allids).shape[0] != allids.shape[0]:
warnings.warn("Subset ids are not unique: Making it unique")
allids = sp.unique(allids)
if sp.sum(sp.sort(allids) == allids) != allids.shape[0]:
warnings.warn("Superset ids are not sorted: Sorting it")
allids = sp.sort(allids)
if sp.sum(sp.sort(sbids) == sbids) != sbids.shape[0]:
warnings.warn('subset ids are not sorted: Sorting it')
sbids = sp.sort(sbids)
return sp.where(sp.in1d(allids, sbids))[0]
开发者ID:KjongLehmann,项目名称:m53,代码行数:18,代码来源:usefulTools.py
注:本文中的scipy.sort函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论