• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python pylab.median函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中pylab.median函数的典型用法代码示例。如果您正苦于以下问题:Python median函数的具体用法?Python median怎么用?Python median使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了median函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: add_to_results

def add_to_results(model, name):
    df = getattr(model, name)
    model.results['param'].append(name)
    model.results['bias'].append(df['abs_err'].mean())
    model.results['mae'].append((pl.median(pl.absolute(df['abs_err'].dropna()))))
    model.results['mare'].append(pl.median(pl.absolute(df['rel_err'].dropna())))
    model.results['pc'].append(df['covered?'].mean())
开发者ID:aflaxman,项目名称:gbd,代码行数:7,代码来源:validate_covariates.py


示例2: trace_fibers

def trace_fibers(flatim, params):

    trace_im1 = pf.getdata(flatim)*0

    imdat, imhead = pf.getdata(flatim), pf.getheader(flatim)

    ###  separating fibers in first column and assigning fibers ids
    print '\n\tSEARCHING FOR FIBERS BETWEEN x=0 & y=[' +str(params.LO_BUFFER) +':'+str(len(imdat[:,0])-params.HI_BUFFER)+']'
    fiber_peaks_pix = pl.find( imdat[:,0] > pl.median(imdat[:,0]) )
    fiber_peaks_pix,fiber_peaks_flx = sep_peaks( fiber_peaks_pix, imdat[:,0] )
    if pl.median(fiber_peaks_pix[0])<=params.LO_BUFFER:
        fiber_peaks_pix = fiber_peaks_pix[1:]
        fiber_peaks_flx = fiber_peaks_flx[1:]
    if (len(imdat[:,0])-pl.median(fiber_peaks_pix[-1]))<=params.HI_BUFFER:
        fiber_peaks_pix = fiber_peaks_pix[:-1]
        fiber_peaks_flx = fiber_peaks_flx[:-1]
    print '\t  --> FOUND ', len(fiber_peaks_pix), ' FIBER PEAKS'

    ###  creating array for fibers
    fibers0 = []
    id_cnt = 1
    for f in range(len(fiber_peaks_pix)):
        while params.FIBERS_EXCLUDE.tolist().count(id_cnt)==1: id_cnt+=1

        fibx,fiby = fiber_peaks_pix[f],fiber_peaks_flx[f]
        peakx = fibx[ fiby.tolist().index(max(fiby)) ]
        yrange = pl.arange(  peakx-params.FIBER_WIDTH/2  ,  peakx+params.FIBER_WIDTH/2+1  )

        fibers0.append( fiber(id_cnt, 0,     yrange     ))
        id_cnt+=1

##  TRACING FIBERS ALONG X-AXIS INDIVIDUALLY
    for fib in fibers0:
        for x in range(1,len(imdat)):
##  FIRST, TAKE THE FLUXES IN THE PIXELS AT x
##  THAT ENCOMPASSED THE PEAK AT x-1
            fluxes = imdat[ fib.xy[-1][1] , x ]
##  NEXT, FIND THE VERTICAL SHIFT TO CENTER ON
##  THE PEAK FLUX AT x. MAXIMUM SHIFT IS DETERMINED
##  FROM THE FIBER_WIDTH PARAMETER.
            deltay = range( -len(fluxes)/2+1 , len(fluxes)/2+1 )[ fluxes.tolist().index(max(fluxes)) ]

##  RECORD THE NEW Y-PIXELS THAT ARE CENTERD ON
##  THE FIBER AT x.
            fib.xy.append( [ x, fib.xy[-1][1]+deltay ] )

##  FLAG PIXELS FOR FIBER IN FIRST-PASS TRACE IMAGE
            trace_im1[fib.xy[-1][1],x] = fib.id



    trc0 = 'trace_pass1.fits'
    print '\n\tWRITING INITIAL TRACING TO ', trc0
    try: pf.writeto(trc0, trace_im1, header=imhead)
    except:
        os.remove(trc0)
        pf.writeto(trc0, trace_im1, header=imhead)
        
    return fibers0
开发者ID:boada,项目名称:vp_art,代码行数:59,代码来源:vp_art_trace_fibers.py


示例3: combine_output

def combine_output(J, T, model, dir, reps, save=False):
    """
    Combine output on absolute error, relative error, csmf_accuracy, and coverage from from
    multiple runs of validate_once. Either saves the output to the disk, or returns arays
    for each. 
    """

    cause = pl.zeros(J*T, dtype='f').view(pl.recarray)
    time = pl.zeros(J*T, dtype='f').view(pl.recarray)
    abs_err = pl.zeros(J*T, dtype='f').view(pl.recarray) 
    rel_err = pl.zeros(J*T, dtype='f').view(pl.recarray)
    coverage = pl.zeros(J*T, dtype='f').view(pl.recarray)
    csmf_accuracy = pl.zeros(J*T, dtype='f').view(pl.recarray)

    for i in range(reps): 
        metrics = pl.csv2rec('%s/metrics_%s_%i.csv' % (dir, model, i))
        cause = pl.vstack((cause, metrics.cause))
        time = pl.vstack((time, metrics.time))
        abs_err = pl.vstack((abs_err, metrics.abs_err))
        rel_err = pl.vstack((rel_err, metrics.rel_err))
        coverage = pl.vstack((coverage, metrics.coverage))
        csmf_accuracy = pl.vstack((csmf_accuracy, metrics.csmf_accuracy))

    cause = cause[1:,]
    time = time[1:,]    
    abs_err = abs_err[1:,]
    rel_err = rel_err[1:,]
    coverage = coverage[1:,]
    csmf_accuracy = csmf_accuracy[1:,]

    mean_abs_err = abs_err.mean(0)
    median_abs_err =  pl.median(abs_err, 0)
    mean_rel_err = rel_err.mean(0)
    median_rel_err = pl.median(rel_err, 0)
    mean_csmf_accuracy = csmf_accuracy.mean(0)
    median_csmf_accuracy = pl.median(csmf_accuracy, 0)
    mean_coverage_bycause = coverage.mean(0)
    mean_coverage = coverage.reshape(reps, T, J).mean(0).mean(1)
    percent_total_coverage = (coverage.reshape(reps, T, J).sum(2)==3).mean(0)
    mean_coverage = pl.array([[i for j in range(J)] for i in mean_coverage]).ravel()
    percent_total_coverage = pl.array([[i for j in range(J)] for i in percent_total_coverage]).ravel()

    models = pl.array([[model for j in range(J)] for i in range(T)]).ravel()
    true_cf = metrics.true_cf
    true_std = metrics.true_std
    std_bias = metrics.std_bias

    all = pl.np.core.records.fromarrays([models, cause[0], time[0], true_cf, true_std, std_bias, mean_abs_err, median_abs_err, mean_rel_err, median_rel_err, 
                                         mean_csmf_accuracy, median_csmf_accuracy, mean_coverage_bycause, mean_coverage, percent_total_coverage], 
                                        names=['model', 'cause', 'time', 'true_cf', 'true_std', 'std_bias', 'mean_abs_err', 'median_abs_err', 
                                         'mean_rel_err', 'median_rel_err', 'mean_csmf_accuracy', 'median_csmf_accuracy', 
                                         'mean_covearge_bycause', 'mean_coverage', 'percent_total_coverage'])   
    
    if save: 
        pl.rec2csv(all, '%s/%s_summary.csv' % (dir, model)) 
    else: 
        return all
开发者ID:aflaxman,项目名称:pymc-cod-correct,代码行数:57,代码来源:validate_models.py


示例4: loadFile

def loadFile(objectFileName):
    oimg = pyfits.open(objectFileName)

    # Load the IFU data -- Row-stacked spectra
    odata = oimg[1].data
    oError = oimg[2].data
    odata_dim = odata.shape
    wcs = astWCS.WCS(objectFileName, extensionName=1)
    owavelengthStartEnd = wcs.getImageMinMaxWCSCoords()[0:2]
    fiberNumber = wcs.getImageMinMaxWCSCoords()[2:4]
    owavelengthStep = oimg[1].header['CDELT1']

    owavelengthRange = [owavelengthStartEnd[0] + i * owavelengthStep
                        for i in range(odata_dim[1])]

    # Check to make sure we got it right
    if not owavelengthRange[-1] == owavelengthStartEnd[-1]:
        print 'The ending wavelenghts do not match... Exiting'
        sys.exit(1)
    else:
        # make median sky
        specs = pyl.array([flux for flux in odata])
        skySpec = pyl.median(specs, axis=0)

    RSS = []
    for i in range(int(fiberNumber[1])):
        #oflux = odata[i] - oskyflux
        oflux = odata[i] - skySpec
        oflux[pyl.isnan(oflux)] = 0.0
        oErrorFlux = oError[i]
        #oflux = odata[i]

        # Mask out extreme values in spectrum
        # Just because edges dodgy in efosc
        med = pyl.median(oflux)
        oflux[pyl.greater(abs(oflux), 10.0 * med)] = 0.0001

        objSED = astSED.SED(wavelength=owavelengthRange, flux=oflux)
        #skySED = astSED.SED(wavelength=owavelengthRange, flux=oskyflux)
        skySED = astSED.SED(wavelength=owavelengthRange, flux=skySpec)
        errSED = astSED.SED(wavelength=owavelengthRange, flux=oErrorFlux)

        #  make it > 0 everywhere
        objSED.flux = objSED.flux - objSED.flux.min()
        objSED.flux = objSED.flux / objSED.flux.max()
        errSED.flux = errSED.flux - errSED.flux.min()
        errSED.flux = errSED.flux / errSED.flux.max()
        skySED.flux = skySED.flux - skySED.flux.min()
        skySED.flux = skySED.flux / skySED.flux.max()

        RSS.append({'object': objSED, 'sky': skySED, 'error': errSED})

    return RSS
开发者ID:boada,项目名称:vpCluster,代码行数:53,代码来源:plot_spectra.py


示例5: one_ci

  def one_ci(v, ci, bootstraps):
    v = pylab.array(v)
    v = pylab.ma.masked_array(v,pylab.isnan(v)).compressed()
    if v.size == 0:
      return pylab.nan, 0, 0 #Nothing to compute

    r = pylab.randint(v.size, size=(v.size, bootstraps))
    booted_samp = pylab.array([pylab.median(v[r[:,n]]) for n in xrange(bootstraps)])
    booted_samp.sort()

    med = pylab.median(booted_samp)
    idx_lo = int(bootstraps * ci/2.0)
    idx_hi = int(bootstraps * (1.0-ci/2))

    return med, med-booted_samp[idx_lo], booted_samp[idx_hi]-med
开发者ID:kghose,项目名称:neurapy,代码行数:15,代码来源:stats.py


示例6: remove_discontinuity

def remove_discontinuity(value, xgap=10, ygap=200):
    """
    Remove discontinuity (sudden jump) in a series of values.
    Written by Denis, developed for LLC Fringe Counts data.
    value : list or numpy.array
    xgap  : "width" of index of the list/array to adjust steps
    ygap  : threshold value to detect discontinuity
    """
    difflist = pl.diff(value)
    discont_index = pl.find(abs(difflist) > ygap)

    if len(discont_index) == 0:
        return value
    else:
        discont_index = pl.append(discont_index, len(difflist))

    # find indice at discontinuities
    discont = {"start": [], "end": []}
    qstart = discont_index[0]
    for i in range(len(discont_index) - 1):
        if discont_index[i + 1] - discont_index[i] > xgap:
            qend = discont_index[i]
            discont["start"].append(qstart - xgap)
            discont["end"].append(qend + xgap)
            qstart = discont_index[i + 1]

    # add offsets at discontinuities
    result = pl.array(value)
    for i in range(len(discont["end"])):
        result[0 : discont["start"][i]] += result[discont["end"][i]] - result[discont["start"][i]]

    # remove the median
    result = result - pl.median(result)
    return result
开发者ID:itoledoc,项目名称:coneHighFreq,代码行数:34,代码来源:tmUtils.py


示例7: makePlots

 def makePlots(self, ax, x, fNum, fColor, fMarker, feedstock):
     
     x.getQuery()
     
     if x.queryString.startswith('No'):
         pass    
     
     elif x.queryString.startswith('FR'):
         data = [1,1]
         ax.plot([fNum]*2,[1,1],fColor,marker=fMarker,markersize=2)
         
     else:
         cur = self.conn.cursor()
         print x.queryString
         cur.execute(x.queryString)
         #[all data]
         data = cur.fetchall()
         cur.close()
         medVal = median(data)
         maxVal = max(data)
         minVal = min(data)
         
         ax.plot([fNum],medVal,fColor,marker='_', markersize=7)
 
         #Plot the max/min values
         ax.plot([fNum]*2,[maxVal, minVal],fColor,marker=fMarker, markersize=2)    
         
         self.writeResults(feedstock, str(maxVal[0]), str(medVal), str(minVal[0]))
开发者ID:NoahFisher,项目名称:NREL-AQ-Full,代码行数:28,代码来源:ContributionFigure.py


示例8: flow_rate_hist

def flow_rate_hist(sheets):
    ant_rates = []
    weights = []
    for sheet in sheets:
        ants, seconds, weight = flow_rate(sheet)
        ant_rate = seconds / ants
        #ant_rate = ants / seconds
        ant_rates.append(ant_rate)
        weights.append(float(weight))
        #weights.append(seconds)

    weights = pylab.array(weights)
    weights /= sum(weights)

    #print "ants per second"
    print "seconds per ant"
    mu = pylab.mean(ant_rates)
    print "mean", pylab.mean(ant_rates)
    wmean = pylab.average(ant_rates, weights=weights)
    print "weighted mean", wmean
    print "median", pylab.median(ant_rates)
    print "std", pylab.std(ant_rates, ddof=1)
    ant_rates = pylab.array(ant_rates)
    werror = (ant_rates - mu) * weights
    print "weighted std", ((sum(werror ** 2))) ** 0.5
    print "weighted std 2", (pylab.average((ant_rates - mu)**2, weights=weights)) ** 0.5
    pylab.figure()
    pylab.hist(ant_rates)
    pylab.savefig('ant_flow_rates.pdf', format='pdf')
    pylab.close()
开发者ID:arjunc12,项目名称:Ants,代码行数:30,代码来源:flow_rate.py


示例9: scatter_times

def scatter_times(name, sheets):
    means = []
    medians = []
    delays = []
    mean_points = []
    med_points = []
    for sheet, delay in sheets:
        delays.append(delay)
        times = get_times(sheet)
        mean = pylab.mean(times)
        median = pylab.median(times)
        means.append(mean)
        medians.append(median)
        mean_points.append((mean, sheet))
        med_points.append((median, sheet)) 
    
    print "----------mean points-----------"    
    for mean, sheet in sorted(mean_points):
        print mean, sheet
    print "----------median points-----------"
    for median, sheet in sorted(med_points):
        print median, sheet
          
    pylab.scatter(delays, means, color='r')
    pylab.scatter(delays, medians, color='b')
    print "show"
    pylab.show()
开发者ID:arjunc12,项目名称:Ants,代码行数:27,代码来源:cluster_intervals.py


示例10: pitch_estimate

def pitch_estimate(dw):
    step = 8
    wsize = 2048
    wfun = pl.ones
    wa = 3
    lo, hi = 50, 700
    hist_params = dict(bins=800, lw=0, range=[lo,hi], rwidth=1.0,
        normed=True, log=True)

    subplots = wplot.Subplots(6, 1,
        yticks=[0,.1,.25,.5,1],
        xlim=(120,240),
        autoscalex_on=False)

    for wfun in [pl.hanning, pl.hamming, pl.blackman, pl.bartlett, pl.ones]:
        cc = chunker.Chunker(dw, window=wfun(wsize), step=step)
        acs = [cc.ac for c in cc.chunks()]
        pp = [chunker.find_peak(a, lo, hi, wa=wa) for a in acs]
        mm = pl.median(pp)
        subplots(
            title='window: %s(%d) step=%s range=%s wa=%d' % (wfun.func_name, wsize, step, [lo,hi], wa),
            xticks=[mm]+range(lo,hi+50,50))
        subplots.next()
        freq, bins, patches = pl.hist(pp, **hist_params)

    print 'Ok!'
开发者ID:antiface,项目名称:dsp-2,代码行数:26,代码来源:epoch-ZFR.py


示例11: mare

def mare(model, data_type):
    try:
        pred = model.vars[data_type]['p_pred'].trace().mean(0)
    except:
        pred = 0    
    obs = model.get_data(data_type)['value']
    mare = pl.median((abs(pred - obs)/obs)*100)
    return mare
开发者ID:peterhm,项目名称:gbd,代码行数:8,代码来源:dmco_methods.py


示例12: plot_histogram

def plot_histogram(histogram, html_writer, title='', max_pathway_length=8, xmin=None, xlim=20, error_bars=True, min_to_show=20, legend_loc='upper left'):
    fig = pylab.figure()

    pylab.hold(True)

    reps = 1000
    
    y_offset = 0
    offset_step = 0.007
    colors = {1:'r', 2:'orange', 3:'green', 4:'cyan', 5:'blue', 'Rest':'violet', 'Not first':'k--', 'No known regulation':'grey', 'Activated':'green', 'Inhibited':'r', 'Mixed regulation':'blue'}
    for key, value in histogram.iteritems():
        if len(value) >= min_to_show:
            m = stats.cmedian(value)
            
            sample_std = None
            
            if error_bars:
                sample_vals = []
                i = 0
                while i < reps:
                    samples = []
                    while len(samples) < len(value):
                        samples.append(random.choice(value))
                    sample_vals.append(pylab.median(samples))
                    i += 1
                
                sample_std = pylab.std(sample_vals)
                        
            plotting.cdf(value, label='%s (med=%.1f, N=%d)' % \
                (key, m, len(value)),
                style=colors.get(key, 'grey'), std=sample_std, y_offset=y_offset)
            y_offset += offset_step
            

    xmin = -1 * xlim if xmin == None else xmin
    pylab.xlim(xmin, xlim)
    pylab.xlabel('Irreversability')
    #pylab.xlabel('deltaG')
    pylab.ylabel('Cumulative distribution')
    legendfont = matplotlib.font_manager.FontProperties(size=11)
    pylab.legend(loc=legend_loc, prop=legendfont)
    pylab.title(title)
    pylab.hold(False)
    
    if 'Not first' in histogram:
        print '%s, first vs. non-first ranksum test: ' % title + '(%f, %f)' % stats.ranksums(histogram[1], histogram['Not first'])
    
    if 'Inhibited' in histogram:
        print '%s, inhibited vs. non-regulated ranksum test: ' % title + '(%f, %f)' % stats.ranksums(histogram['Inhibited'], histogram['No known regulation'])
         
    
    #for k1, h1 in histogram.iteritems():
    #    for k2, h2 in histogram.iteritems():
    #        print k1, k2, stats.ranksums(h1, h2)
    
    return fig
开发者ID:issfangks,项目名称:milo-lab,代码行数:56,代码来源:reversibility.py


示例13: get_stats

def get_stats(typeid,date):
    global db
    cur = db.cursor()
    cur.execute("SELECT AVG(price),SUM(volremain),SUM(volenter) - SUM(volremain),bid FROM archive_market WHERE typeid = %s AND (reportedtime) :: date = %s GROUP BY orderid,bid", [typeid, date])
    a = cur.fetchall()
    avg_b = array(zeros(len(a)),dtype=float)
    vol_b = array(zeros(len(a)),dtype=float)
    move_b = array(zeros(len(a)),dtype=float)
    avg_s = array(zeros(len(a)),dtype=float)
    vol_s = array(zeros(len(a)),dtype=float)
    move_s = array(zeros(len(a)),dtype=float)

    x_s = 0
    x_b = 0
    for r in a:
	if r[3]:
	    avg_b[x_b] = r[0]

	    vol_b[x_b] = r[1]
	    move_b[x_b] = r[2]
	    x_b += 1
	else:
	    avg_s[x_s] = r[0]
	    vol_s[x_s] = r[1]
	    move_s[x_s] = r[2]
	    x_s += 1
    avg_b.resize(x_b)
    avg_s.resize(x_s)
    vol_b.resize(x_b)
    vol_s.resize(x_s)
    move_b.resize(x_b)
    move_s.resize(x_s)
    b = (None,None,None)
    s = (None,None,None)
    try:
	b = (pylab.median(avg_b), pylab.mean(vol_b), pylab.mean(move_b))
	s = (pylab.median(avg_s), pylab.mean(vol_s), pylab.mean(move_s))
    except:
	return (b,b,b)

    ret = ( ((b[0]+s[0])/2, (b[1]+s[1])/2, (b[2]+s[2])/2), b, s)
    print ret
    return ret
开发者ID:PeterGottesman,项目名称:eve-central.com,代码行数:43,代码来源:market_stat.py


示例14: simulateGame

def simulateGame(numGames, tilePairs, gameType):
    runTimeList = []
    for i in range(numGames):
        gameTime = gameType(tilePairs)
        runTimeList.append(gameTime)
    medTime = pylab.median(runTimeList)
    meanTime = pylab.mean(runTimeList)
    pylab.hist(runTimeList,[x*2 for x in range(400)])
    print 'meanTime: ' + str(meanTime)
    print 'medianTime: ' + str(medTime)
    return meanTime, medTime
开发者ID:kschultze,项目名称:Learning-Python,代码行数:11,代码来源:tileMatching.py


示例15: DFA

def DFA(data, npoints=None, degree=1, use_median=False):
    """
    computes the detrended fluctuation analysis
    returns the fluctuation F and the corresponding window length L

    :args:
        data (n-by-1 array): the data from which to compute the DFA
        npoints (int): the number of points to evaluate; if omitted the log(n)
            will be used
        degree (int): degree of the polynomial to use for detrending
        use_median (bool): use median instead of mean fluctuation

    :returns:
        F, L: the fluctuation F as function of the window length L

    """
    # max window length: n/4

    #0th: compute integral
    integral = cumsum(data - mean(data))

    #1st: compute different window lengths
    n_samples = npoints if npoints is not None else int(log(len(data)))
    lengths = sort(array(list(set(
            logspace(2,log(len(data)/4.),n_samples,base=exp(1)).astype(int)
             ))))

    #print lengths
    all_flucs = []
    used_lengths = []
    for wlen in lengths:
        # compute the fluctuation of residuals from a linear fit
        # according to Kantz&Schreiber, ddof must be the degree of polynomial,
        # i.e. 1 (or 2, if mean also counts? -> see in book)
        curr_fluc = []
#        rrt = 0
        for startIdx in arange(0,len(integral),wlen):
            pt = integral[startIdx:startIdx+wlen]
            if len(pt) > 3*(degree+1):
                resids = pt - polyval(polyfit(arange(len(pt)),pt,degree),
                                  arange(len(pt)))
#                if abs(wlen - lengths[0]) < -1:
#                    print resids[:20]
#                elif rrt == 0:
#                    print "wlen", wlen, "l0", lengths[0]
#                    rrt += 1
                curr_fluc.append(std(resids, ddof=degree+1))
        if len(curr_fluc) > 0:
            if use_median:
                all_flucs.append(median(curr_fluc))
            else:
                all_flucs.append(mean(curr_fluc))
            used_lengths.append(wlen)
    return array(all_flucs), array(used_lengths)
开发者ID:MMaus,项目名称:mutils,代码行数:54,代码来源:statistics.py


示例16: __init__

    def __init__(self, data, time):
        # data format: multidimensional numpy array
        #              Each inner array is an array of OD values
        #              ordered by time.
        #              This is important for determining the median

        self.dataReps = data  # OD data values (replicates implied)
        self.dataMed = py.median(self.dataReps, axis=0)
        self.time = time  # time values
        self.asymptote = self.__calcAsymptote()
        self.maxGrowthRate, self.mgrTime = self.__calcMGR()
        self.dataLogistic, self.lag = self.__calcLag()
        self.growthLevel = self.__calcGrowth()
开发者ID:dacuevas,项目名称:phenotype_microarray,代码行数:13,代码来源:GrowthCurve.py


示例17: validate_age_group

def validate_age_group(model, replicate):
    # set random seed for reproducibility
    mc.np.random.seed(1234567 + replicate)

    N = 30
    delta_true = 5.0
    pi_true = true_rate_function
    m = simulate_age_group_data(N=N, delta_true=delta_true, pi_true=pi_true)

    if model == "midpoint_covariate":
        fit_midpoint_covariate_model(m)
    if model == "alt_midpoint_covariate":
        fit_alt_midpoint_covariate_model(m)
    elif model == "age_standardizing":
        fit_age_standardizing_model(m)
    elif model == "age_integrating":
        fit_age_integrating_model(m)
    elif model == "midpoint_model":
        fit_midpoint_model(m)
    elif model == "disaggregation_model":
        fit_disaggregation_model(m)
    else:
        raise TypeError, 'Unknown model type: "%s"' % model

    # compare estimate to ground truth
    import data_simulation

    m.mu = pandas.DataFrame(
        dict(
            true=[pi_true(a) for a in range(101)],
            mu_pred=m.vars["mu_age"].stats()["mean"],
            sigma_pred=m.vars["mu_age"].stats()["standard deviation"],
        )
    )
    data_simulation.add_quality_metrics(m.mu)
    print "\nparam prediction bias: %.5f, MARE: %.3f, coverage: %.2f" % (
        m.mu["abs_err"].mean(),
        pl.median(pl.absolute(m.mu["rel_err"].dropna())),
        m.mu["covered?"].mean(),
    )
    print

    data_simulation.add_quality_metrics(m.mu)

    data_simulation.initialize_results(m)
    data_simulation.add_to_results(m, "mu")
    data_simulation.finalize_results(m)

    return m
开发者ID:aflaxman,项目名称:gbd,代码行数:49,代码来源:validate_age_group.py


示例18: plot_output_distribution

def plot_output_distribution(out,title):
    from splikes.utils import paramtext

    out=out.ravel()
    out_full=out

    result=py.hist(out,200)
    paramtext(1.2,0.95,
              'min %f' % min(out_full),
              'max %f' % max(out_full),
              'mean %f' % py.mean(out_full),
              'median %f' % py.median(out_full),
              'std %f' % py.std(out_full),
              )
    py.title(title)
开发者ID:bblais,项目名称:Plasticnet,代码行数:15,代码来源:utils.py


示例19: mare

def mare(pred, obs):
    ''' model median absolute relative error
    Parameters
    ----------
    pred : df
      df of observations from model.vars[data_type]['p_pred'].stats()['mean']
    obs : df
      df of observations from model.vars[data_type]['p_obs'].value
    Results
    -------
    mare : float
      mean median absolute relative error, as a percent
    '''
    pred = pl.array(pred['mean'])
    obs = pl.array(obs['value']) 
    mare = pl.median((abs(pred - obs)/obs)*100)
    return mare
开发者ID:peterhm,项目名称:dismod-mr_rate_validation,代码行数:17,代码来源:model_utilities.py


示例20: fit

def fit(model):
    emp_priors = model.emp_priors

    ## Then fit the model and compare the estimates to the truth
    model.vars = {}
    model.vars['p'] = data_model.data_model('p', model, 'p', 'all', 'total', 'all', None, emp_priors['p', 'mu'], emp_priors['p', 'sigma'])
    model.map, model.mcmc = fit_model.fit_data_model(model.vars['p'], iter=5000, burn=2000, thin=25, tune_interval=100)
    #model.map, model.mcmc = fit_model.fit_data_model(model.vars['p'], iter=101, burn=0, thin=1, tune_interval=100)

    #graphics.plot_one_ppc(model.vars['p'], 'p')
    #graphics.plot_convergence_diag(model.vars)
    graphics.plot_one_type(model, model.vars['p'], emp_priors, 'p')
    pl.plot(model.a, model.pi_age_true, 'b--', linewidth=3, alpha=.5, label='Truth')
    pl.legend(fancybox=True, shadow=True, loc='upper left')
    pl.title('Heterogeneity %s'%model.parameters['p']['heterogeneity'])

    pl.show()

    model.input_data['mu_pred'] = model.vars['p']['p_pred'].stats()['mean']
    model.input_data['sigma_pred'] = model.vars['p']['p_pred'].stats()['standard deviation']
    data_simulation.add_quality_metrics(model.input_data)

    model.delta = pandas.DataFrame(dict(true=[model.delta_true]))
    model.delta['mu_pred'] = pl.exp(model.vars['p']['eta'].trace()).mean()
    model.delta['sigma_pred'] = pl.exp(model.vars['p']['eta'].trace()).std()
    data_simulation.add_quality_metrics(model.delta)

    print 'delta'
    print model.delta

    print '\ndata prediction bias: %.5f, MARE: %.3f, coverage: %.2f' % (model.input_data['abs_err'].mean(),
                                                     pl.median(pl.absolute(model.input_data['rel_err'].dropna())),
                                                                       model.input_data['covered?'].mean())

    model.mu = pandas.DataFrame(dict(true=model.pi_age_true,
                                     mu_pred=model.vars['p']['mu_age'].stats()['mean'],
                                     sigma_pred=model.vars['p']['mu_age'].stats()['standard deviation']))
    data_simulation.add_quality_metrics(model.mu)

    data_simulation.initialize_results(model)
    data_simulation.add_to_results(model, 'delta')
    data_simulation.add_to_results(model, 'mu')
    data_simulation.add_to_results(model, 'input_data')
    data_simulation.finalize_results(model)

    print model.results
开发者ID:aflaxman,项目名称:gbd,代码行数:46,代码来源:validate_similarity.py



注:本文中的pylab.median函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python pylab.meshgrid函数代码示例发布时间:2022-05-25
下一篇:
Python pylab.mean函数代码示例发布时间:2022-05-25
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap