• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python numpy.median函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中numpy.median函数的典型用法代码示例。如果您正苦于以下问题:Python median函数的具体用法?Python median怎么用?Python median使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了median函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _getTotalDuration

 def _getTotalDuration(self,actStream): #for bed toilet transition  margin = 1 hr 
         totDuration=0;
         count = 0;        
         durlist = []; 
         for i in range(0,len(actStream)-2):
             #print actStream[i]
             firstLine = actStream[i].split(" ");
             secondLine =actStream[i+1].split(" ");
             #get a date from here
             d1= self._get_datetime(firstLine[0],firstLine[1]);
             d2=self._get_datetime(secondLine[0],secondLine[1]);
             td= d2-d1;
             duration =td.total_seconds();
             #print td, duration 
             #durlist.append(duration)
             margin = self._calculateMargin(d1,d2);
             if duration > 60*margin:              
                 #check to see if there were other activities, 
                 count=count+1;
                 continue;
             durlist.append(duration)
             totDuration=duration+totDuration;   
         try:
             #print round(min(durlist)/3600, 2), round(max(durlist)/3600, 2), round(totDuration/3600,2), round(sum(durlist)/3600, 2)
             #return (round(totDuration/60,5), count, round(numpy.min(durlist)/60, 5), round(numpy.max(durlist)/60, 5), round(numpy.median(durlist)/60,5), round(numpy.average(durlist)/60, 5));
             return (round(numpy.median(durlist)/60,5), count);
         except ValueError:
             #return (round(totDuration/60,5),count, 0, 0, 0, 0);
             return (round(numpy.median(durlist)/60,5), count);
开发者ID:BeiyuLin,项目名称:caabChange,代码行数:29,代码来源:Activities.py


示例2: work

    def work(self):
        self.worked = True
        kwargs = dict(
                weights=self.weights,
                mus=self.mus,
                sigmas=self.sigmas,
                low=self.low,
                high=self.high,
                q=self.q,
                )
        samples = GMM1(rng=self.rng,
                size=(self.n_samples,),
                **kwargs)
        samples = np.sort(samples)
        edges = samples[::self.samples_per_bin]
        #print samples

        pdf = np.exp(GMM1_lpdf(edges[:-1], **kwargs))
        dx = edges[1:] - edges[:-1]
        y = 1 / dx / len(dx)

        if self.show:
            plt.scatter(edges[:-1], y)
            plt.plot(edges[:-1], pdf)
            plt.show()
        err = (pdf - y) ** 2
        print np.max(err)
        print np.mean(err)
        print np.median(err)
        if not self.show:
            assert np.max(err) < .1
            assert np.mean(err) < .01
            assert np.median(err) < .01
开发者ID:AshBT,项目名称:hyperopt,代码行数:33,代码来源:test_tpe.py


示例3: compute_ks_by_contained

def compute_ks_by_contained(contigs_by_lib_name, sinks, sources):
    # compute median of maxmin as well as ks p-value of contained maxmin
    for lib_snk in contigs_by_lib_name:
        # for a fixed lib_snk; do all source libs together
        # contained_ctg: contig names of all source libraries stored by source library names
        contained_ctg=collections.defaultdict(set)
        for snkCtg in contigs_by_lib_name[lib_snk].itervalues():
            for srcCtg in snkCtg.contained_in:
                contained_ctg[srcCtg.lib].add(srcCtg.name)
        for lib_src in contigs_by_lib_name:
            if lib_src in contained_ctg:
                contained=[]
                not_contained=[]
                for ctg in contigs_by_lib_name[lib_src]:
                    if ctg in contained_ctg[lib_src]:
                        contained.append(contigs_by_lib_name[lib_src][ctg].maxmin)
                    else:
                        not_contained.append(contigs_by_lib_name[lib_src][ctg].maxmin)
 #               contained=[contigs_by_lib_name[lib_src][ctg].maxmin for ctg in contigs_by_lib_name[lib_src] if ctg in contained_ctg[lib_src]]
 #               not_contained=[contigs_by_lib_name[lib_src][ctg].maxmin for ctg in contigs_by_lib_name[lib_src] if ctg not in contained_ctg[lib_src]]
                ks_pvalue = stats.ks_2samp(contained, not_contained)[1]
                print lib_src, lib_snk, ks_pvalue, sum(contained)/len(contained), sum(not_contained)/len(not_contained)
                if ks_pvalue < 0.05 and np.median(contained) > np.median(not_contained):
                    sources[lib_snk] |= {lib_src}
                    sinks[lib_src] |= {lib_snk}
开发者ID:shoudan,项目名称:sag,代码行数:25,代码来源:source_sink.py


示例4: getStripStatistics

    def getStripStatistics(self, yKey='vPhi', nMin=10):

        """For each of the strips, get the strip statistics"""

        if np.size(self.stripsFeH) < 1:
            self.buildStripsFeH()

        # may as well loop through!!

        # View of what we're using for our vertical quantity
        x = self.tSim['FeHObs']
        y = self.tSim[yKey]

        nStrips = np.size(self.stripsFeH) - 1
        self.stripCounts = np.zeros(nStrips, dtype='int')
        self.stripMeans = np.zeros(nStrips)
        self.stripMedns = np.zeros(nStrips)
        self.stripStdds = np.zeros(nStrips)
        self.stripFeHs = np.zeros(nStrips) # central point for sample

        for iStrip in range(nStrips):
            xLo = self.stripsFeH[iStrip]
            xHi = self.stripsFeH[iStrip+1]

            bStrip = (self.bSel) & (x >= xLo) & (x < xHi)

            self.stripCounts[iStrip] = np.sum(bStrip)
            if self.stripCounts[iStrip] < nMin:
                continue
            
            self.stripMeans[iStrip] = np.mean(y[bStrip])
            self.stripMedns[iStrip] = np.median(y[bStrip])
            self.stripStdds[iStrip] = np.std(y[bStrip])
            self.stripFeHs[iStrip] = np.median(x[bStrip])
开发者ID:willclarkson,项目名称:lsstScratchWIC,代码行数:34,代码来源:viewSim.py


示例5: start_requests

 def start_requests(self):
     summary_utc = datetime.utcnow() - timedelta(days=1)
     db_engine = create_engine(self.settings.get('SQLALCHEMY_DATABASE_URI'))
     db_session = sessionmaker(bind=db_engine)()
     db_query = db_session.query(LiveTVSite.id.label('site_id'), LiveTVRoom.id.label('room_id'),
                                 LiveTVRoom.url.label('room_url'),
                                 LiveTVRoomPresent.crawl_date_format.label('summary_date'),
                                 func.array_agg(LiveTVRoomPresent.online).label('online_list'))\
         .join(LiveTVSite, LiveTVRoom, LiveTVRoomPresent)\
         .filter(LiveTVRoomPresent.crawl_date_format == summary_utc.strftime(DAILY_DATE_FORMAT))\
         .group_by(LiveTVSite.id, LiveTVRoom.id, LiveTVRoom.url, LiveTVRoomPresent.crawl_date_format)
     for group_row in db_query:
         meta_info = {
             'site_id': group_row.site_id,
             'room_id': group_row.room_id,
             'summary_date': group_row.summary_date,
             'online': numpy.median(group_row.online_list)
         }
         room = self.session.query(LiveTVRoom).filter_by(id=meta_info['room_id']).one_or_none()
         if room:
             yield DailyItem(site_id=group_row.site_id, room_id=group_row.room_id,
                             summary_date=group_row.summary_date, online=numpy.median(group_row.online_list),
                             followers=room.followers, description=room.description, announcement=room.announcement,
                             fallback=False)
     db_session.close()
开发者ID:taogeT,项目名称:livetv_mining,代码行数:25,代码来源:quanmin.py


示例6: allclose_with_out

def allclose_with_out(x, y, atol=0.0, rtol=1.0e-5):
    # run the np.allclose on x and y
    # if it fails print some stats
    # before returning
    ac = np.allclose(x, y, rtol=rtol, atol=atol)
    if not ac:
        dd = np.abs(x - y)
        neon_logger.display('abs errors: %e [%e, %e] Abs Thresh = %e'
                            % (np.median(dd), np.min(dd), np.max(dd), atol))
        amax = np.argmax(dd)

        if np.isscalar(x):
            neon_logger.display('worst case: %e %e' % (x, y.flat[amax]))
        elif np.isscalar(y):
            neon_logger.display('worst case: %e %e' % (x.flat[amax], y))
        else:
            neon_logger.display('worst case: %e %e' % (x.flat[amax], y.flat[amax]))

        dd = np.abs(dd - atol) / np.abs(y)
        neon_logger.display('rel errors: %e [%e, %e] Rel Thresh = %e'
                            % (np.median(dd), np.min(dd), np.max(dd), rtol))
        amax = np.argmax(dd)
        if np.isscalar(x):
            neon_logger.display('worst case: %e %e' % (x, y.flat[amax]))
        elif np.isscalar(y):
            neon_logger.display('worst case: %e %e' % (x.flat[amax], y))
        else:
            neon_logger.display('worst case: %e %e' % (x.flat[amax], y.flat[amax]))
    return ac
开发者ID:StevenLOL,项目名称:neon,代码行数:29,代码来源:utils.py


示例7: q1

def q1():
    # generate random clusters
    clusters = []
    sizes = range(2, 201)
    for size in sizes:
        clusters.append(gen_random_clusters(size))
    
    # get running times
    random.seed(912)
    
    # run 10 trials, and take the median time for each n to smooth data
    slow_trials = np.zeros((10, 199))
    fast_trials = np.zeros((10, 199))
    for i in range(10):
        slow_trials[i,:] = timer(slow_closest_pair, clusters)
        fast_trials[i,:] = timer(fast_closest_pair, clusters)
       
    # times
    slow_times = np.median(slow_trials, 0)
    fast_times = np.median(fast_trials, 0)
    
    # plot
    plt.figure()
    plt.plot(sizes, slow_times, 'c-', label='slow_closest_pair')
    plt.plot(sizes, fast_times, 'm-', label='fast_closest_pair')
    plt.legend(loc='upper left')
    plt.xlabel('Size of Cluster List')
    plt.ylabel('Median Running Time (s), 10 Trials')
    plt.title('Comparison of Running Times on Desktop Python')
    plt.show()
    
    return None
开发者ID:keithgw,项目名称:algorthimic_thinking,代码行数:32,代码来源:app3.py


示例8: remaining_time

 def remaining_time(self):
     """Return our best estimate of the remaining duration, or None
     if we have no bases for guessing."""
     if self.end_times is None:
         return None # We have not started the first module yet
     else:
         module_index = self.current_module.module_num - 1
         index = self.image_set_index * self.num_modules + module_index
         durations = (self.end_times[1:] - self.end_times[:-1]).reshape(self.num_image_sets, self.num_modules)
         per_module_estimates = np.zeros(self.num_modules)
         per_module_estimates[:module_index] = np.median(durations[:self.image_set_index+1,:module_index], 0)
         current_module_so_far = self.adjusted_time() - self.end_times[1 + index - 1]
         if self.image_set_index > 0:
             per_module_estimates[module_index:] = np.median(durations[:self.image_set_index,module_index:], 0)
             per_module_estimates[module_index] = max(per_module_estimates[module_index], current_module_so_far)
         else:
             # Guess that the modules that haven't finished yet are
             # as slow as the slowest one we've seen so far.
             per_module_estimates[module_index] = current_module_so_far
             per_module_estimates[module_index:] = per_module_estimates[:module_index+1].max()
         if False:
             print "current_module_so_far =", current_module_so_far, "; adjusted_time =", self.adjusted_time(), "; end_times =", self.end_times
             print "durations:"
             print durations
             print "per_module_estimates:"
             print per_module_estimates
         per_module_estimates[:module_index] *= self.num_image_sets - self.image_set_index - 1
         per_module_estimates[module_index:] *= self.num_image_sets - self.image_set_index
         per_module_estimates[module_index] -= current_module_so_far
         return per_module_estimates.sum()
开发者ID:drmono,项目名称:CellProfiler,代码行数:30,代码来源:progress.py


示例9: lonlat2xy

def lonlat2xy(lon,lat,lon_0=None,lat_0=None):
    """ Convert pairs of (Lat,Lon) into (x,y)

        Input:
                      Lon [deg]
          Lat [deg]
          Lon_0 [deg] => Lon of the origin of the cartesian system
          Lat_0 [deg] => Lat of the origin of the cartesian system
        Output:
                      x [m]
          y [m]

        The projection is deformed as get away from the center. Since the
          Latitudes don't deform, the y is estimated first, then for each
          point is estimated the distante to the meridian of reference
          (Lon_0) considering the Latitude of the measurement.
    """
    if (lat_0==None) or (lon_0==None):
        lat_0=numpy.median(lat)
        lon_0=numpy.median(lon)
    from fluid.common.distance import distance
    y=distance(lat,0,lat_0,0)
    y[lat<lat_0]=-1*y[lat<lat_0]
    x=distance(lat,lon,lat,lon_0)
    x[lon<lon_0]=-1*x[lon<lon_0]
    return x,y
开发者ID:cageo,项目名称:castelao-2013,代码行数:26,代码来源:utils.py


示例10: is_outlier

def is_outlier(points, threshold=3.5):
    """
    Returns a boolean array with True if points are outliers and False 
    otherwise.
    
    Data points with a modified z-score greater than this 
    # value will be classified as outliers.
    """
    # transform into vector
    if len(points.shape) == 1:
        points = points[:,None]

    # compute median value    
    median = np.median(points, axis=0)
    
    # compute diff sums along the axis
    diff = np.sum((points - median)**2, axis=-1)
    diff = np.sqrt(diff)
    # compute MAD
    med_abs_deviation = np.median(diff)
    
    # compute modified Z-score
    # http://www.itl.nist.gov/div898/handbook/eda/section4/eda43.htm#Iglewicz
    modified_z_score = 0.6745 * diff / med_abs_deviation

    # return a mask for each outlier
    return modified_z_score > threshold
开发者ID:EricDoug,项目名称:python-data-viz-cookbook,代码行数:27,代码来源:ch02-clean-mad.py


示例11: _idealize_uncert

def _idealize_uncert(dds):
    for action in dds.actions:
        field = action.diffeo.d
        field_inv = action.diffeo_inv.d
    
        I = np.zeros(field.shape)
        Y, X = np.meshgrid(range(field.shape[1]), range(field.shape[0]))
        I[:, :, 0] = X
        I[:, :, 1] = Y
        
        D = field - I
        v = (np.median(D[:, :, 0]), np.median(D[:, :, 1]))
        
        D_inv = field_inv - I
        v_inv = (np.median(D_inv[:, :, 0]), np.median(D_inv[:, :, 1]))
        
        print('v     = ' + str(v))
        print('v_inv = ' + str(v_inv))
        
        for c in itertools.product(range(X.shape[0]), range(X.shape[1])):
            
            if defined_cell(c, X.shape, v):
                action.diffeo.variance[c] = 1.0
            else:
                action.diffeo.variance[c] = 0.0
                
            if defined_cell(c, X.shape, v_inv):
                action.diffeo_inv.variance[c] = 1.0
            else:
                action.diffeo_inv.variance[c] = 0.0
    return dds
开发者ID:AndreaCensi,项目名称:surf12adam,代码行数:31,代码来源:idealize.py


示例12: denoise

 def denoise(self, data, wavelet):
     noiseSigma = median(absolute(data - median(data))) / 0.6745
     levels = int(floor(log(len(data))))
     WC = pywt.wavedec(data, wavelet, level=levels)
     threshold = noiseSigma * sqrt(2 * log(len(data)))
     NWC = map(lambda x: pywt.thresholding.hard(x, threshold), WC)
     return pywt.waverec(NWC, wavelet)
开发者ID:Fazi99,项目名称:Lab,代码行数:7,代码来源:MODIS_EVI_Wavelet.py


示例13: __init__

    def __init__(self, f, label, color="k", linestyle="-"):
        d = np.load(f)
        self.data = d
        self.mass = d["mass"]
        self.ul_med = []
        self.ul68_lo = []
        self.ul68_hi = []
        self.ul95_lo = []
        self.ul95_hi = []
        self.label = label
        self.color = color
        self.linestyle = linestyle

        for i in range(len(d["mass"])):

            ul = np.sort(d["ul"][:, i])
            ul = ul[ul > 0]

            n = len(ul)

            m = np.median(ul)

            self.ul68_lo.append(ul[max(0, n / 2.0 - n * 0.34)])
            self.ul68_hi.append(ul[min(n - 1, n / 2.0 + n * 0.34)])
            self.ul95_lo.append(ul[max(0, n / 2.0 - n * 0.95 / 2.0)])
            self.ul95_hi.append(ul[min(n - 1, n / 2.0 + n * 0.95 / 2.0)])
            self.ul_med.append(np.median(ul))
开发者ID:lcreyes,项目名称:gammatools,代码行数:27,代码来源:dmmodel.py


示例14: make_lick_individual

def make_lick_individual(targetSN, w1, w2):
    """ Make maps for the kinematics. """
    filename = "lick_corr_sn{0}.tsv".format(targetSN)
    binimg = pf.getdata("voronoi_sn{0}_w{1}_{2}.fits".format(targetSN, w1, w2))
    intens = "collapsed_w{0}_{1}.fits".format(w1, w2)
    extent = calc_extent(intens)
    bins = np.loadtxt(filename, usecols=(0,), dtype=str).tolist()
    bins = np.array([x.split("bin")[1] for x in bins]).astype(int)
    data = np.loadtxt(filename, usecols=np.arange(25)+1).T
    labels = [r'Hd$_A$', r'Hd$_F$', r'CN$_1$', r'CN$_2$', r'Ca4227', r'G4300',
             r'Hg$_A$', r'Hg$_F$', r'Fe4383', r'Ca4455', r'Fe4531', r'C4668',
             r'H$_\beta$', r'Fe5015', r'Mg$_1$', r'Mg$_2$', r'Mg$_b$', r'Fe5270',
             r'Fe5335', r'Fe5406', r'Fe5709', r'Fe5782', r'Na$_D$', r'TiO$_1$',
             r'TiO$_2$']
    mag = "[mag]"
    ang = "[\AA]"
    units = [ang, ang, mag, mag, ang, ang,
             ang, ang, ang, ang, ang, ang,
             ang, ang, mag, mag, ang, ang,
             ang, ang, ang, ang, ang, mag,
             mag]
    lims = [[None, None], [None, None], [None, None], [None, None],
            [None, None], [None, None], [None, None], [None, None],
            [None, None], [None, None], [None, None], [None, None],
            [None, None], [None, None], [None, None], [None, None],
            [None, None], [None, None], [None, None], [None, None],
            [None, None], [None, None], [None, None], [None, None],
            [None, None], [None, None], [None, None], [None, None]]
    pdf = PdfPages("figs/lick_sn{0}.pdf".format(targetSN))
    fig = plt.figure(1, figsize=(6.25,5))
    plt.subplots_adjust(bottom=0.12, right=0.97, left=0.09, top=0.96)
    plt.minorticks_on()
    ax = plt.subplot(111)
    ax.minorticks_on()
    plot_indices = np.arange(12,22)
    for i, vector in enumerate(data):
        if i not in plot_indices:
            continue
        print "Making plot for {0}...".format(labels[i])
        kmap = np.zeros_like(binimg)
        kmap[:] = np.nan
        for bin,v in zip(bins, vector):
            idx = np.where(binimg == bin)
            kmap[idx] = v
        vmin = lims[i][0] if lims[i][0] else np.median(vector) - 2 * vector.std()
        vmax = lims[i][1] if lims[i][1] else np.median(vector) + 2 * vector.std()
        m = plt.imshow(kmap, cmap="inferno", origin="bottom", vmin=vmin,
                   vmax=vmax, extent=extent, aspect="equal")
        make_contours()
        plt.minorticks_on()
        plt.xlabel("X [kpc]")
        plt.ylabel("Y [kpc]")
        plt.xlim(extent[0], extent[1])
        plt.ylim(extent[2], extent[3])
        cbar = plt.colorbar(m)
        cbar.set_label("{0} {1}".format(labels[i], units[i]))
        pdf.savefig()
        plt.clf()
    pdf.close()
    return
开发者ID:kadubarbosa,项目名称:hydramuse,代码行数:60,代码来源:maps.py


示例15: test_compare_cache_benchmark

    def test_compare_cache_benchmark(self, varying_param, analytics_data, plt):
        stats = pytest.importorskip('scipy.stats')

        d1, d2 = analytics_data
        assert np.all(d1[varying_param] == d2[varying_param]), (
            'Cannot compare different parametrizations')
        axis_label = self.param_to_axis_label[varying_param]

        print("Cache, varying {0}:".format(axis_label))
        for label, key in zip(self.labels, self.keys):
            clean_d1 = [self.reject_outliers(d) for d in d1[key]]
            clean_d2 = [self.reject_outliers(d) for d in d2[key]]
            diff = [np.median(b) - np.median(a)
                    for a, b in zip(clean_d1, clean_d2)]

            p_values = np.array([2. * stats.mannwhitneyu(a, b)[1]
                                 for a, b in zip(clean_d1, clean_d2)])
            overall_p = 1. - np.prod(1. - p_values)
            if overall_p < .05:
                print("  {label}: Significant change (p <= {p:.3f}). See plots"
                      " for details.".format(
                          label=label, p=np.ceil(overall_p * 1000.) / 1000.))
            else:
                print("  {label}: No significant change.".format(label=label))

            plt.plot(d1[varying_param], diff, label=label)

        plt.xlabel("Number of %s" % axis_label)
        plt.ylabel("Difference in build time (s)")
        plt.legend(loc='best')
开发者ID:amshenoy,项目名称:nengo,代码行数:30,代码来源:test_cache.py


示例16: medianVolume

	def medianVolume(self):
		volpath = os.path.join(self.params['rundir'], "volumes/*a.mrc")
		mrcfiles = glob.glob(volpath)
		volumes = []
		for filename in mrcfiles:
			if os.path.isfile(filename):
				vol = mrc.read(filename)
				print filename, vol.shape
				volumes.append(vol)
		volarray = numpy.asarray(volumes, dtype=numpy.float32)
		try:
			medarray = numpy.median(volarray, axis=0)
		except:
			medarray = numpy.median(volarray)
		medfile = os.path.join(self.params['rundir'], "volumes/medianVolume.mrc")
		print medfile, medarray.shape
		mrc.write(medarray, medfile)

		apix = apStack.getStackPixelSizeFromStackId(self.params['stackid'])
		sessiondata = apStack.getSessionDataFromStackId(self.params['stackid'])

		uploadcmd = ( ("uploadModel.py --projectid=%d --session=%s --file=%s "
				+"--apix=%.3f --sym=%s --name=satmedian-recon%d.mrc --res=30 --description='%s %d'")
			%(self.params['projectid'], sessiondata['name'], medfile, 
				apix, self.params['symmname'], self.params['reconid'],
				"SAT selected median volume for recon", self.params['reconid'], ) )
		apDisplay.printColor(uploadcmd, "purple")
		f = open("upload.sh", "w")
		f.write(uploadcmd+"\n")
		f.close()
开发者ID:leschzinerlab,项目名称:myami-3.2-freeHand,代码行数:30,代码来源:satEuler.py


示例17: createModel

    def createModel(self,b,g,r):
        bMinusr = self.bMinusr
        bMinusg = self.bMinusg
        b0 = b.copy()
        g0 = g.copy()
        r0 = r.copy()
        
        w = r.shape[0]/2-5
        rb = r0/b0
        gb = g0/b0
        rnorm = numpy.median(rb[w:-w,w:-w])
        gnorm = numpy.median(gb[w:-w,w:-w])
        r0 /= rnorm
        g0 /= gnorm
        r0 *= 10**(0.4*bMinusr)
        g0 *= 10**(0.4*bMinusg)

        r0 /= 620.
        g0 /= 540.
        b0 /= 460.

        I = (r0+g0+b0)/3.
        self.I = I
        self.rnorm = rnorm
        self.gnorm = gnorm
        return self.colorize(b,g,r)
开发者ID:DES-SL,项目名称:BlueRings,代码行数:26,代码来源:colorImage.py


示例18: _computePositionTraditionalControl

    def _computePositionTraditionalControl(self, caseObservations, controlObservations, methylFractionFlag, identifyFlag, testProcedure=_tTest):
        """Summarize the observed ipds at one template position/strand, using a case-control analysis"""
        # Compute stats on the observed ipds
        caseData = caseObservations['data']['ipd']
        controlData = controlObservations['data']['ipd']

        res = dict()
        res['refId'] = self.refId

        # FASTA header name
        res['refName'] = self.refName

        strand = res['strand'] = 1 - caseObservations['strand']
        tpl = res['tpl'] = caseObservations['tpl']
        res['base'] = self.cognateBaseFunc(tpl, strand)

        res['coverage'] = int(round((caseData.size + controlData.size) / 2.0))  # need a coverage annotation

        res['caseCoverage'] = caseData.size
        res['controlCoverage'] = controlData.size

        res['caseMean'] = caseData.mean().item()
        res['caseMedian'] = np.median(caseData).item()
        res['caseStd'] = np.std(caseData).item()

        res['controlMean'] = controlData.mean().item()
        res['controlMedian'] = np.median(controlData).item()
        res['controlStd'] = np.std(controlData).item()

        trim = (0.001, 0.03)
        ctrlMean = mstats.trimmed_mean(controlData, trim).item()
        if abs(ctrlMean) > 1e-3:
            res['ipdRatio'] = (mstats.trimmed_mean(caseData, trim).item() / ctrlMean)
        else:
            res['ipdRatio'] = 1.0

        testResults = testProcedure(caseData, controlData)
        res['testStatistic'] = testResults['testStatistic']
        res['pvalue'] = testResults['pvalue']

        pvalue = max(sys.float_info.min, res['pvalue'])
        res['score'] = round(-10.0 * math.log10(pvalue))

        # If the methylFractionFlag is set, then estimate fraction using just modelPrediction in the detection case.
        if methylFractionFlag and pvalue < self.options.pvalue and not identifyFlag:
            if res['controlCoverage'] > self.options.methylMinCov and res['caseCoverage'] > self.options.methylMinCov:

                # Instantiate mixture estimation methods:
                mixture = MixtureEstimationMethods(self.ipdModel.gbmModel.post, self.ipdModel.gbmModel.pre, res, self.options.methylMinCov)
                x = mixture.detectionMixModelBootstrap(res['controlMean'], caseData)

                res[FRAC] = x[0]
                res[FRAClow] = x[1]
                res[FRACup] = x[2]
            else:
                res[FRAC] = np.nan
                res[FRACup] = np.nan
                res[FRAClow] = np.nan

        return res
开发者ID:jgurtowski,项目名称:kineticsTools,代码行数:60,代码来源:KineticWorker.py


示例19: columnpull

def columnpull(column, index, bg, stdev):
    """Define a column pull detector artifact.

    Parameters
    ----------
    column : array
      The column from a detector.
    index : int
      The index at which the column pull may have started, e.g., the
      location of a bright star.
    bg : float
      The background level of the image.
    stdev : float
      The background standard deviation.

    Returns
    -------
    pull : ndarray
      The shape of the column pull.

    """

    if (index < 0) or (index >= column.shape[0]):
        return

    m1 = np.median(column[:index]) - bg
    m2 = np.median(column[index:]) - bg

    pull = np.zeros_like(column)
    if (np.abs(m1 - m2) / stdev) > 1.0:
        pull[:index] = m1
        pull[index:] = m2

    return pull
开发者ID:mkelley,项目名称:mskpy,代码行数:34,代码来源:process.py


示例20: bench

def bench(workers, sizes, max_partition_fill_rates, byte_sizes, num_runs):
    for worker in workers:
        for size in sizes:
            for max_partition_fill_rate in max_partition_fill_rates:
                for byte_size in byte_sizes:
                    with open(result_dir + "/" + str(worker) + "_" + str(size) + "_" + str(max_partition_fill_rate) + "_" + str(byte_size) + "_S", "w+") as file1:
                        times = []
                        #flushes = []
                        #collisions = []
                        spills = []
                        for _ in range(num_runs):
                            process = subprocess.Popen(['../../build/benchmarks/hashtable_bench_probing_hashtable', '-s', str(size), '-w', str(worker), '-f', str(max_partition_fill_rate), '-t', str(byte_size)], stdout=subprocess.PIPE)
                            process.wait()
                            out = process.communicate()[0]
                            out_s = out.split()
                            times.append(float(out_s[0]))
                            #flushes.append(float(out_s[1]))
                            #collisions.append(float(out_s[2]))
                            spills.append(float(out_s[1]))
                        time = numpy.median(times)
                        #flush = numpy.median(flushes)
                        #collision = numpy.median(collisions)
                        spill = numpy.median(spills)
                        print str(worker) + "_" + str(size) + "_" + "_" + str(max_partition_fill_rate) + "_" + str(byte_size) + ": " + str(time) + " " + str(spill)
                        file1.write(str(time) + " " + str(spill) + "\n")
                    file1.close()
开发者ID:pombredanne,项目名称:thrill,代码行数:26,代码来源:bench_probing_hashtable.py



注:本文中的numpy.median函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python numpy.memmap函数代码示例发布时间:2022-05-27
下一篇:
Python numpy.mean函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap