• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python scipy.median函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中scipy.median函数的典型用法代码示例。如果您正苦于以下问题:Python median函数的具体用法?Python median怎么用?Python median使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了median函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: get_cluster_distribution

def get_cluster_distribution(g, method = 'average'):
	""" 
		The clustering coefficient distribution grouped by degree. Similar to the histogram shows the possible degree k,
		and average/median clustering coefficient of nodes with degree k in graph g.

		Parameters:
		-----------
			g: NetworkX Graph
			method: str, ('average', 'median'), (default = 'average')
		Returns:
		--------
			xdata, ydata, a 2-tuple of array, (k, avg_cc(V_k)), where V_k are the nodes with degree k
	"""
	g = to_undirected(g)
	k = nx.clustering(g)
	d = g.degree()
	ck = defaultdict(list)
	for n in g.nodes_iter():
		ck[d[n]].append(k[n])
	xdata, ydata = list(), list()
	
	if method == 'average':
		for x, y in ifilter(lambda x: x[0] > 1 and average(x[1]) > 0, ck.iteritems()):
			xdata.append(x)
			ydata.append(average(y))
	elif method == 'median':
		for x, y in ifilter(lambda x: x[0] > 1 and median(x[1]) > 0, ck.iteritems()):
			xdata.append(x)
			ydata.append(median(y))
	else:
		raise NameError("method should be 'average' or 'mean'")
	xdata = array(xdata)
	ydata = array(ydata)
	return(xdata, ydata)
开发者ID:kaeaura,项目名称:churn_prediction_proj,代码行数:34,代码来源:featureExtractor.py


示例2: plot_collated

def plot_collated(r_set="truth", infl_set="varinfl-0.25", subplots=True, save=False):
    d = cl("%s/output-2013/sim3-results_r-%s_%s"%(DATA_DIR,r_set, infl_set))
    coverages = SP.array(range(20,200,20) + range(200,1001,100)) #range(200,500,50) + range(500,1001,100))
    if r_set == "truth": coverages = SP.array(range(20,200,20) + range(200,500,50) + range(500,1001,100))
    afs = map(lambda x:"%.2f"%x, [0.7,0.85,0.99])
    models = ['sQTL','Smooth','ML','MP']
    p = 0
    colors = 'bgry'
    if subplots: PL.figure(figsize=(14,10))
    for feature in 'FX':
        for af in afs:
            if subplots: PL.subplot(2,3,p+1)
            else: PL.figure()
            p += 1
            lines = []
            
            for i,model in enumerate(models):
                I = SP.where(d[af][model][feature].var(axis=0) > 1e-10)[0]
                err = d[af][model][feature][:,I].var(axis=1)**0.5
                lines.append(PL.plot(coverages + 2*i,SP.median(d[af][model][feature][:,I],axis=1), "-o", linewidth=3, markersize=9, color=colors[i])[0])
                PL.errorbar(coverages + 2*i, SP.median(d[af][model][feature][:,I],axis=1), yerr=err, fmt="-o", linewidth=1, markersize=9,color=colors[i])
            PL.xticks(coverages)
            #PL.xlim(min(coverages),max(coverages))
            PL.title("%s %s - %s"%(infl_set, feature, af))
            PL.xlim(15,220)

            if feature == "X": PL.ylim(0,8)
            if p == 1:  PL.legend(lines, models)
            if save: PL.savefig("/Users/leopold/doc/write/manuscripts/2011_X_sQTL/figures/figure2013-3_2%s.pdf"%("ABCDEF"[p-1:p]))
    PL.show()
开发者ID:PMBio,项目名称:sqtl,代码行数:30,代码来源:collate_infer.py


示例3: PrintValues

def PrintValues( outfile, values,  options, prefix = "",titles = None):

    if options.flat or options.aggregate_column:

        if options.add_header:
            if prefix: outfile.write( "prefix\t" )
            
            if titles: outfile.write( "column\t" )
                
            print "\t".join( ("nval", "min", "max", "mean", "median", "stddev", "sum", "q1", "q3" ) )
        
        for x in range(len(values)):

            vals = values[x]

            if len(vals) == 0:

                if options.output_empty:
                    if titles: outfile.write( titles[x] + "\t" )
                    if prefix: outfile.write( prefix + "\t" )

                    outfile.write( "0" + "\tna" * 8  + "\n" )

                continue

            if titles: outfile.write( titles[x] + "\t" )
            if prefix: outfile.write( prefix + "\t" )

            vals.sort()
            if len(vals) > 4:
                q1 = options.value_format % vals[len(vals) // 4]
                q3 = options.value_format % vals[len(vals) * 3 // 4]
            else:
                q1 = options.value_format % vals[0]
                q3 = options.value_format % vals[-1]

            outfile.write( "\t".join( ( "%i" % len(vals),
                                        options.value_format % float(min(vals)),
                                        options.value_format % float(max(vals)),
                                        options.value_format % scipy.mean(vals),
                                        options.value_format % scipy.median(vals),
                                        options.value_format % scipy.std(vals),                                      
                                        options.value_format % reduce( lambda x, y: x+y, vals),
                                        q1, q3,
                                        )) + "\n")
            
    else:

        if titles:
            print "category\t%s" % string.join(titles,"\t")

        print "count\t%s"  % (string.join( map(lambda v: "%i" % len(v), values), "\t"))
        print "min\t%s"    % (string.join( map(lambda v: options.value_format % min(v), values), "\t"))
        print "max\t%s"    % (string.join( map(lambda v: options.value_format % max(v), values), "\t"))
        print "mean\t%s"   % (string.join( map(lambda v: options.value_format % scipy.mean(v), values), "\t"))
        print "median\t%s" % (string.join( map(lambda v: options.value_format % scipy.median(v), values), "\t"))
        print "stddev\t%s" % (string.join( map(lambda v: options.value_format % scipy.std(v), values), "\t"))
        print "sum\t%s"    % (string.join( map(lambda v: options.value_format % reduce( lambda x,y: x+y, v), values), "\t"))
        print "q1\t%s"     % (string.join( map(lambda v: options.value_format % scipy.stats.scoreatpercentile(v,per=25), values), "\t"))
        print "q3\t%s"     % (string.join( map(lambda v: options.value_format % scipy.stats.scoreatpercentile(v,per=75), values), "\t"))
开发者ID:siping,项目名称:cgat,代码行数:60,代码来源:data2stats.py


示例4: subtract_overscan

def subtract_overscan(data,x,y):

   """This function finds the median values in each of the four overscan
      regions and subtracts them from the appropriate regions of the
      input data file.  It then converts the results back to electrons
      rather than ADU"""

   # Define bias region limits
   bx1 = slice(0,15,1)
   bx2 = slice(2065,2080,1)
   y1 = slice(0,1024,1)
   y2 = slice(1024,2048,1)

   # Define limits of regions associated with the four amps
   x1 = slice(16,1040)
   x2 = slice(1040,2064)

   # Define median values of overscan regions from appropriate data regions
   newdata = data.astype(scipy.float32)
   overscan = scipy.zeros((4,1))
   overscan[0] = scipy.median(newdata[y1,bx1].ravel())
   overscan[1] = scipy.median(newdata[y2,bx1].ravel())
   overscan[2] = scipy.median(newdata[y1,bx2].ravel())
   overscan[3] = scipy.median(newdata[y2,bx2].ravel())

   # Subtract overscan
   newdata[y1,x1] = newdata[y1,x1] - overscan[0]
   newdata[y2,x1] = newdata[y2,x1] - overscan[1]
   newdata[y1,x2] = newdata[y1,x2] - overscan[2]
   newdata[y2,x2] = newdata[y2,x2] - overscan[3]

   newdata = newdata[y,x]
   return newdata
开发者ID:cdfassnacht,项目名称:CodeCDF,代码行数:33,代码来源:forsim_redux.py


示例5: WriteRadius

def WriteRadius(mali, identifiers, prefix="", gap_char="-"):
    """write percent identities in pairwise comparisons both for nucleotide acids and amino acids."""

    pides_na = []
    seq_aa = []

    for x in range(0, len(identifiers)):

        seq_aa.append(Genomics.TranslateDNA2Protein(mali[identifiers[x]]))

        for y in range(x + 1, len(identifiers)):
            if x == y:
                continue
            pides_na.append(MaliIO.getPercentIdentity(
                mali[identifiers[x]], mali[identifiers[y]], gap_char))

    pides_aa = []
    for x in range(0, len(identifiers) - 1):
        for y in range(x + 1, len(identifiers)):
            pides_aa.append(
                MaliIO.getPercentIdentity(seq_aa[x], seq_aa[y], gap_char))

    print "%s\tpide\t%i\t" % (prefix, len(pides_na)) +\
          string.join(map(lambda x: "%.2f" % x, (min(pides_na),
                                                 max(pides_na),
                                                 scipy.mean(pides_na),
                                                 scipy.median(pides_na),
                                                 numpy.std(pides_na))), "\t") + "\t" +\
          string.join(map(lambda x: "%.2f" % x, (min(pides_aa),
                                                 max(pides_aa),
                                                 scipy.mean(pides_aa),
                                                 scipy.median(pides_aa),
                                                 numpy.std(pides_aa))), "\t")
开发者ID:lesheng,项目名称:cgat,代码行数:33,代码来源:evaluate_mali.py


示例6: lossTraces

def lossTraces(fwrap, aclass, dim, maxsteps, storesteps=None, x0=None,
               initNoise=0., minLoss=1e-10, algoparams={}):
    """ Compute a number of loss curves, for the provided settings,
    stored at specific storestep points. """
    if not storesteps:
        storesteps = range(maxsteps + 1)
    
    # initial points, potentially noisy
    if x0 is None:
        x0 = ones(dim) + randn(dim) * initNoise
    
    # tracking progress by callback
    paramtraces = {'index':-1}
    def storer(a):
        lastseen = paramtraces['index']
        for ts in [x for x in storesteps if x > lastseen and x <= a._num_updates]:
            paramtraces[ts] = a.bestParameters.copy()
        paramtraces['index'] = a._num_updates
        
    # initialization    
    algo = aclass(fwrap, x0, callback=storer, **algoparams)
    print algo, fwrap, dim, maxsteps,
    
    # store initial step   
    algo.callback(algo)
    algo.run(maxsteps)

    # process learning curve
    del paramtraces['index']
    paramtraces = array([x for _, x in sorted(paramtraces.items())])
    oloss = mean(fwrap.stochfun.expectedLoss(ones(100) * fwrap.stochfun.optimum))
    ls = abs(fwrap.stochfun.expectedLoss(ravel(paramtraces)) - oloss) + minLoss
    ls = reshape(ls, paramtraces.shape)
    print median(ls[-1])
    return ls
开发者ID:bitfort,项目名称:py-optim,代码行数:35,代码来源:experiments.py


示例7: plotAllCombinations

def plotAllCombinations(aclasses, avariants,
                        fclasses, fvariants,
                        trials, maxsteps, maxbatchsize=10):
    fundic = {}    
    ploti = 1
    rows = sum([len(avariants[ac]) for ac in aclasses]) + len(aclasses) - 1
    cols = len(fvariants) * len(fclasses) + len(fclasses) - 1
    f_mid = int(median(range(len(fvariants))))
    for ac_id, aclass in enumerate(aclasses):
        a_mid = int(median(range(len(avariants[aclass]))))
        for as_id, aparams in enumerate(avariants[aclass]):
            if as_id == 0 and ac_id > 0:
                ploti += cols
            
            for fc_id, fclass in enumerate(fclasses):
                if fc_id not in fundic:
                    # shared samples across all uses of one function
                    fun = fclass()
                    fwrap = FunctionWrapper(trials, fun, record_samples=True)
                    fwrap.nextSamples(maxbatchsize * (maxsteps+10))
                    fundic[fc_id] = fwrap._seen
                data = fundic[fc_id]
                for fs_id, fsettings in enumerate(fvariants):
                    if fs_id == 0 and fc_id > 0:
                        ploti += 1
                    fun = fclass(**fsettings)
                    provider = DataFunctionWrapper(data, fun, shuffling=False)            
                    pylab.subplot(rows, cols, ploti); ploti += 1
                    plotHeatmap(provider, aclass, aparams, trials, maxsteps)
                    if ac_id == 0 and as_id == 0 and fs_id == f_mid:
                        pylab.title(fclass.__name__[5:])
                    if fs_id == 0 and as_id == a_mid:
                        pylab.ylabel(aclass.__name__[:6])
    pylab.subplots_adjust(left=0.1, bottom=0.01, right=0.99, top=0.9, wspace=0.05, hspace=0.05)        
开发者ID:Andres-Hernandez,项目名称:py-optim,代码行数:34,代码来源:test_comparisons.py


示例8: plot_hist_compare

   def plot_hist_compare(self,which_case):
        plt.ylabel('Percentage of points')
        plt.xlabel('Percentage RMS relative error')
        
        def yto_percent(y, x):
            s = str(sp.around((y/(len(self.REL_ERR)*1.0)*100),2))
            if matplotlib.rcParams['text.usetex'] is True:
                return s + r'$\%$'
            else:
                return s + '%'     

        def xto_percent(y, x):
            s = str(y*100)
            if matplotlib.rcParams['text.usetex'] is True:
                return s + r'$\%$'
            else:
                    return s + '%' 
        
        thermo1, thermo2, = self.select[which_case]
        #Plot the SU2 error
        i=0;
        self.REL_ERR = 0;
        for v in self.variables[sp.where\
        ((self.variables!=thermo1) * (self.variables!=thermo2))]:
            i=i+1;
            self.REL_ERR = self.REL_ERR + \
            ((getattr(self.SU2[which_case],v)-getattr(self.RandomSamples,v))/\
            (getattr(self.RandomSamples,v)))**2;
        self.REL_ERR = sp.sqrt(self.REL_ERR)/i
        plt.hist(self.REL_ERR, bins=25, color='k', alpha=0.3, label='SU2')
        print 'Error max SU2', max(self.REL_ERR)
        setattr(self.SU2[which_case],"median_ERR",sp.median(self.REL_ERR));
        
        #Plot the SciPy error
        i =0;
        self.REL_ERR = 0;
        for v in self.variables[sp.where\
        ((self.variables!=thermo1) * (self.variables!=thermo2))]:
            i=i+1;
            self.REL_ERR = self.REL_ERR + \
            ((getattr(self.SciPy[which_case],v)-getattr(self.RandomSamples,v))/\
            (getattr(self.RandomSamples,v)))**2;
        self.REL_ERR = sp.sqrt(self.REL_ERR)/i
        
        plt.hist(self.REL_ERR, bins=25, color='c', alpha=0.5, label='SciPy')
        print 'Error max SciPy', max(self.REL_ERR)
        setattr(self.SciPy[which_case],"median_ERR",sp.median(self.REL_ERR));

        
        formatter_y = FuncFormatter(yto_percent)
        formatter_x = FuncFormatter(xto_percent)
        plt.gca().yaxis.set_major_formatter(formatter_y)
        plt.gca().xaxis.set_major_formatter(formatter_x)
        plt.grid(which='both')
        plt.legend()

       
        return       
开发者ID:MatejKosec,项目名称:LUTStandAlone,代码行数:58,代码来源:ConvergenceLibrary.py


示例9: _printStuff

 def _printStuff(self):
     print self._num_updates,
     for n, a in self._print_quantities:
         #print n, type(a)
         if abs(median(a)) > 1e4 or abs(median(a)) < 1e-3:
             print n, median(a), '\t',
         else:
             print n, round(median(a), 4), '\t',
     print
开发者ID:Andres-Hernandez,项目名称:py-optim,代码行数:9,代码来源:vsgd.py


示例10: calculate_varPrior

def calculate_varPrior(disp_raw, disp_fitted, idx, varLogDispSamp):

    logRes = sp.log(disp_raw[idx]) - sp.log(disp_fitted[idx])
    stdLogRes = sp.median(abs(logRes - sp.median(logRes))) * 1.4826

    varLogRes = stdLogRes ** 2
    varPrior = varLogRes - varLogDispSamp

    return max(varPrior, 0.1)
开发者ID:jiahsinhuang,项目名称:spladder,代码行数:9,代码来源:spladder_test.py


示例11: __call__

 def __call__(self, x):
     res = median([self.f(x) for _ in range(int(self.resample_over))])
     if self.num_evals % self.batchsize == 0 and self.num_evals > 0:
         alt_res = median([self.f(x) for _ in range(int(self.resample_over))])
         self._adaptResampling(res, alt_res)
         res = 0.5 * res + 0.5 * alt_res
     self.recents[self.num_evals % self.batchsize] = res
     self.num_evals += 1
     return res
开发者ID:chenguodan,项目名称:pybrain,代码行数:9,代码来源:aptativeresampling.py


示例12: MAD

def MAD(a, c=0.6745):
    """
    Median Absolute Deviation along first axis of an array:

    median(abs(a - median(a))) / c

    """

    a = N.asarray(a, N.float64)
    d = N.multiply.outer(median(a), N.ones(a.shape[1:]))
    return median(N.fabs(a - d) / c)
开发者ID:mbentz80,项目名称:jzigbeercp,代码行数:11,代码来源:scale.py


示例13: mad_clipping

def mad_clipping(input_data, sigma_clip_level, return_length=False):
    medval = median(input_data)
    sigma = 1.4826 * median(abs(medval - input_data))
    high_sigma_clip_limit = medval + sigma_clip_level * sigma
    low_sigma_clip_limit = medval - sigma_clip_level * sigma
    clipped_data = input_data[(input_data>(low_sigma_clip_limit)) &            \
                              (input_data<(high_sigma_clip_limit))]
    new_medval = median(clipped_data)
    new_sigma = 1.4826 * median(abs(medval - clipped_data))
    if return_length:
        return new_medval, new_sigma, len(clipped_data)
    else:
        return new_medval, new_sigma
开发者ID:ckleinastro,项目名称:DECam_coaddition,代码行数:13,代码来源:coadd_images.py


示例14: mad_clipping

def mad_clipping(input_data, sigma_clip_level):
    medval = median(input_data)
    sigma = 1.48 * median(abs(medval - input_data))
    high_sigma_clip_limit = medval + sigma_clip_level * sigma
    low_sigma_clip_limit = medval - sigma_clip_level * sigma
    clipped_data = []
    for value in input_data:
        if (value > low_sigma_clip_limit) and (value < high_sigma_clip_limit):
            clipped_data.append(value)
    clipped_data_array = array(clipped_data)
    new_medval = median(clipped_data_array)
    new_sigma = 1.48 * median(abs(medval - clipped_data_array))
    return clipped_data_array, new_medval, new_sigma
开发者ID:ckleinastro,项目名称:aperture_photometry_scripts,代码行数:13,代码来源:photometry.py


示例15: __amp_detect

    def __amp_detect(self, x):
        
        ref = np.floor(self.min_ref_per*self.sr/1000.0)
        
        # HIGH-PASS FILTER OF THE DATA
        (b,a) = signal.ellip(2, 0.1, 40, [self.fmin_detect*2.0/self.sr,self.fmax_detect*2.0/self.sr], btype='bandpass', analog=0, output='ba')
        xf_detect = signal.filtfilt(b, a, x)
        (b,a) = signal.ellip(2, 0.1, 40, [self.fmin_sort*2.0/self.sr,self.fmax_sort*2.0/self.sr], btype='bandpass', analog=0, output='ba')
        xf = signal.filtfilt(b, a, x)
        
        
        noise_std_detect = scipy.median(np.abs(xf_detect))/0.6745;
        noise_std_sorted = scipy.median(np.abs(xf))/0.6745;
       
        thr = self.stdmin * noise_std_detect        #thr for detection is based on detected settings.
        thrmax = self.stdmax * noise_std_sorted     #thrmax for artifact removal is based on sorted settings.
        
        # LOCATE SPIKE TIMES
        nspk = 0;
        xaux = np.argwhere(xf_detect[self.w_pre+1:len(xf_detect)-self.w_post-1-1] > thr) + self.w_pre + 1
        xaux = np.resize(xaux,len(xaux))
        xaux0 = 0;
        index = []
        for i in range(len(xaux)):
            if xaux[i] >= (xaux0 + ref):
            # after find a peak it begin search after ref over the last xaux
                iaux = xf[xaux[i]:xaux[i]+np.floor(ref/2.0)].argmax(0)    # introduces alignment
                nspk = nspk + 1
                index.append(iaux + xaux[i])
                xaux0 = index[nspk-1];
        
        # SPIKE STORING (with or without interpolation)
        ls = self.w_pre + self.w_post
        spikes = np.zeros([nspk,ls+4])
        xf = np.concatenate((xf,np.zeros(self.w_post)),axis=0)
        
        for i in range(nspk):                          # Eliminates artifacts
            if np.max( np.abs( xf[index[i]-self.w_pre:index[i]+self.w_post] )) < thrmax :
                spikes[i,:] = xf[index[i]-self.w_pre-1:index[i]+self.w_post+3]
     
        aux = np.argwhere(spikes[:,self.w_pre] == 0)       #erases indexes that were artifacts
        if len(aux) != 0:
            aux = aux.reshape((1,len(aux)))[0]
            spikes = np.delete(spikes, aux, axis = 0)
            index = np.delete(index,aux)
 
        if self.interpolation == 'y':
            # Does interpolation
            spikes = self.__int_spikes(spikes)

        return spikes, thr, index
开发者ID:sergio2pi,项目名称:NeuroDB,代码行数:51,代码来源:spike.py


示例16: analysis

def analysis(records, analysis_function):
    """Read in the results of one of the tools and calculate certain statistics.
       fn is a function for reading in the results (e.g. cleangingTools.parseScopaInfo)
       """
    trim = []
    left_trim = []
    right_trim = []

    tp, fp, tn, fn = [0]*4   # true positive, false positive, ...
    for i,seq_record in enumerate(records):
        id, present, actual_start, actual_end, found, predicted_start, predicted_end = analysis_function(seq_record)[-1]

        if present:
            if found:
                tp = tp + 1
                left_trim.append(int(actual_start) - int(predicted_start))
                right_trim.append(int(predicted_end) - int(actual_end))
                trim.append(left_trim[-1] + right_trim[-1])
            else:
                fn = fn + 1
        else:
            if found:
                fp = fp + 1
            else:
                tn = tn + 1

    sensitivity = float(tp) / (tp + fn) if tp + fn > 0 else -1
    specificity = float(tn) / (tn + fp) if tn + fp > 0 else -1
    if len(trim) > 0:
        pct_correct = len(filter(lambda x: x==0, trim)) / float(len(trim))
        avg_trim = scipy.mean(trim)
        median_trim = scipy.median(trim)
        SoS_trim = scipy.mean(map(lambda x : x*x, trim))
        avg_left = scipy.mean(left_trim)
        median_left = scipy.median(left_trim)
        avg_right = scipy.mean(right_trim)
        median_right = scipy.median(right_trim)

        overArr = filter(lambda x : x > 0, trim)
        pct_over = len(overArr) / float(len(trim))
        avg_over = scipy.mean(overArr) if len(overArr) > 0 else -99999
        median_over = scipy.median(overArr) if len(overArr) > 0 else -99999

        underArr = filter(lambda x : x < 0, trim)
        pct_under = len(underArr) / float(len(trim))
        avg_under = scipy.mean(underArr) if len(underArr) > 0 else -99999
        median_under = scipy.median(underArr) if len(underArr) > 0 else -99999
    else:
        return [sensitivity, specificity] + [9999]*10

    return [sensitivity, specificity, pct_correct, avg_trim, median_trim, avg_left, median_left, avg_right, median_right, SoS_trim, pct_over, avg_over, median_over, pct_under, avg_under, median_under]
开发者ID:mortonjt,项目名称:SCOPE,代码行数:51,代码来源:karro_sim2.py


示例17: print_all_stats

def print_all_stats(ctx, series):
    ftime = get_ftime(series)
    start = 0 
    end = ctx.interval
    print('start-time, samples, min, avg, median, 90%, 95%, 99%, max')
    while (start < ftime):  # for each time interval
        end = ftime if ftime < end else end
        sample_arrays = [ s.get_samples(start, end) for s in series ]
        samplevalue_arrays = []
        for sample_array in sample_arrays:
            samplevalue_arrays.append( 
                [ sample.value for sample in sample_array ] )
        #print('samplevalue_arrays len: %d' % len(samplevalue_arrays))
        #print('samplevalue_arrays elements len: ' + \
               #str(map( lambda l: len(l), samplevalue_arrays)))
        # collapse list of lists of sample values into list of sample values
        samplevalues = reduce( array_collapser, samplevalue_arrays, [] )
        #print('samplevalues: ' + str(sorted(samplevalues)))
        # compute all stats and print them
        myarray = scipy.fromiter(samplevalues, float)
        mymin = scipy.amin(myarray)
        myavg = scipy.average(myarray)
        mymedian = scipy.median(myarray)
        my90th = scipy.percentile(myarray, 90)
        my95th = scipy.percentile(myarray, 95)
        my99th = scipy.percentile(myarray, 99)
        mymax = scipy.amax(myarray)
        print( '%f, %d, %f, %f, %f, %f, %f, %f, %f' % (
            start, len(samplevalues), 
            mymin, myavg, mymedian, my90th, my95th, my99th, mymax))

        # advance to next interval
        start += ctx.interval
        end += ctx.interval
开发者ID:huyanhua,项目名称:fio,代码行数:34,代码来源:fiologparser.py


示例18: domain_length

    def domain_length(self,face_1,face_2):
        r'''
        Calculate the distance between two faces of the network

        Parameters
        ----------
        face_1 and face_2 : array_like
            Lists of pores belonging to opposite faces of the network

        Returns
        -------
        The length of the domain in the specified direction

        Notes
        -----
        - Does not yet check if input faces are perpendicular to each other
        '''
        #Ensure given points are coplanar before proceeding
        if misc.iscoplanar(self['pore.coords'][face_1]) and misc.iscoplanar(self['pore.coords'][face_2]):
            #Find distance between given faces
            x = self['pore.coords'][face_1]
            y = self['pore.coords'][face_2]
            Ds = misc.dist(x,y)
            L = sp.median(sp.amin(Ds,axis=0))
        else:
            logger.warning('The supplied pores are not coplanar. Length will be approximate.')
            f1 = self['pore.coords'][face_1]
            f2 = self['pore.coords'][face_2]
            distavg = [0,0,0]
            distavg[0] = sp.absolute(sp.average(f1[:,0]) - sp.average(f2[:,0]))
            distavg[1] = sp.absolute(sp.average(f1[:,1]) - sp.average(f2[:,1]))
            distavg[2] = sp.absolute(sp.average(f1[:,2]) - sp.average(f2[:,2]))
            L = max(distavg)
        return L
开发者ID:Maggie1988,项目名称:OpenPNM,代码行数:34,代码来源:__MatFile__.py


示例19: updateProperties

    def updateProperties( self, values):
        """update properties.

        If values is an vector of strings, each entry will be converted
        to float. Entries that can not be converted are ignored.
        """
        values = [x for x in values if x != None ]

        if len(values) == 0:
            raise ValueError( "no data for statistics" )

        ## convert
        self.mNErrors = 0
        if type(values[0]) not in (types.IntType, types.FloatType):
            n = []
            for x in values:
                try:
                    n.append( float(x) )
                except ValueError:
                    self.mNErrors += 1
        else:
            n = values

        ## use a non-sort algorithm later.
        n.sort()
        self.mQ1 = n[len(n) / 4]
        self.mQ3 = n[len(n) * 3 / 4]
        
        self.mCounts = len(n)
        self.mMin = min(n)
        self.mMax = max(n)
        self.mMean = scipy.mean( n )
        self.mMedian = scipy.median( n )
        self.mSampleStd = scipy.std( n )
        self.mSum = reduce( lambda x, y: x+y, n )
开发者ID:Rfam,项目名称:rfam-website,代码行数:35,代码来源:Stats.py


示例20: avgFoundAfter

def avgFoundAfter(decreasingTargetValues, listsOfActualValues, batchSize=1, useMedian=False):
    """ Determine the average number of steps to reach a certain value (for the first time),
    given a list of value sequences.
    If a value is not always encountered, the length of the longest sequence is used.
    Returns an array. """
    from scipy import sum

    numLists = len(listsOfActualValues)
    longest = max(map(len, listsOfActualValues))
    # gather a list of indices of first encounters
    res = [[0] for _ in range(numLists)]
    for tval in decreasingTargetValues:
        for li, l in enumerate(listsOfActualValues):
            lres = res[li]
            found = False
            for i in range(lres[-1], len(l)):
                if l[i] <= tval:
                    lres.append(i)
                    found = True
                    break
            if not found:
                lres.append(longest)
    tmp = array(res)
    if useMedian:
        resx = median(tmp, axis=0)[1:]
    else:
        resx = sum(tmp, axis=0)[1:] / float(numLists)
    return resx * batchSize
开发者ID:firestrand,项目名称:pybrain-gpu,代码行数:28,代码来源:utilities.py



注:本文中的scipy.median函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python scipy.meshgrid函数代码示例发布时间:2022-05-27
下一篇:
Python scipy.mean函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap