• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python numpy.searchsorted函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中numpy.searchsorted函数的典型用法代码示例。如果您正苦于以下问题:Python searchsorted函数的具体用法?Python searchsorted怎么用?Python searchsorted使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了searchsorted函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: map_to_external_reference

    def map_to_external_reference(self, roi, refname='HXB2', in_patient=True):
        '''
        return a map of positions in the patient to a reference genomewide
        Args:
            roi  --  region of interest given as a string or a tuple (start, end)
            refname --  reference to compare to
            in_patient -- specifies whether the (start, end) refers to reference or patient coordinates
        returns:
            a (len(roi), 3) array with reference coordinates in first column, 
                                        patient coordinates in second 
                                        roi coordinates in third column
        '''
        from .filenames import get_coordinate_map_filename
        coo_fn = get_coordinate_map_filename(self.name, 'genomewide', refname=refname)
        genomewide_map = np.loadtxt(coo_fn, dtype=int)

        if roi in self.annotation:
            roi_pos = np.array([x for x in self.annotation[roi]], dtype = int)
            ind = np.in1d(genomewide_map[:,1], roi_pos)
            roi_indices = np.in1d(roi_pos, genomewide_map[:,1]).nonzero()[0]
            return np.vstack((genomewide_map[ind].T, [roi_indices])).T

        elif roi == "genomewide":
            return np.vstack((genomewide_map.T, [genomewide_map[:,1]])).T            

        else:
            try:
                start, stop = map(int, roi)
                start_ind = np.searchsorted(genomewide_map[:,in_patient], start)
                stop_ind = np.searchsorted(genomewide_map[:,in_patient], stop)
                return np.vstack((genomewide_map[start_ind:stop_ind].T,
                                  [genomewide_map[start_ind:stop_ind, in_patient] - start])).T
            except:
                raise ValueError("ROI not understood")
开发者ID:vpuller,项目名称:HIVEVO_access,代码行数:34,代码来源:patients.py


示例2: test_constant_interpolation_basic

    def test_constant_interpolation_basic(self):
        """Interpolation library works for piecewise constant function
        """

        # Define pixel centers along each direction
        x = numpy.array([1.0, 2.0, 4.0])
        y = numpy.array([5.0, 9.0])

        # Define ny by nx array with corresponding values
        A = numpy.zeros((len(x), len(y)))

        # Define values for each x, y pair as a linear function
        for i in range(len(x)):
            for j in range(len(y)):
                A[i, j] = linear_function(x[i], y[j])

        # Then test that interpolated points are always assigned value of
        # closest neighbour
        xis = numpy.linspace(x[0], x[-1], 10)
        etas = numpy.linspace(y[0], y[-1], 10)
        points = combine_coordinates(xis, etas)

        vals = interpolate2d(x, y, A, points, mode='constant')

        # Find upper neighbours for each interpolation point
        xi = points[:, 0]
        eta = points[:, 1]
        idx = numpy.searchsorted(x, xi, side='left')
        idy = numpy.searchsorted(y, eta, side='left')

        # Get the four neighbours for each interpolation point
        x0 = x[idx - 1]
        x1 = x[idx]
        y0 = y[idy - 1]
        y1 = y[idy]

        z00 = A[idx - 1, idy - 1]
        z01 = A[idx - 1, idy]
        z10 = A[idx, idy - 1]
        z11 = A[idx, idy]

        # Location coefficients
        alpha = (xi - x0) / (x1 - x0)
        beta = (eta - y0) / (y1 - y0)

        refs = numpy.zeros(len(vals))
        for i in range(len(refs)):
            if alpha[i] < 0.5 and beta[i] < 0.5:
                refs[i] = z00[i]

            if alpha[i] >= 0.5 and beta[i] < 0.5:
                refs[i] = z10[i]

            if alpha[i] < 0.5 and beta[i] >= 0.5:
                refs[i] = z01[i]

            if alpha[i] >= 0.5 and beta[i] >= 0.5:
                refs[i] = z11[i]

        assert numpy.allclose(vals, refs, rtol=1e-12, atol=1e-12)
开发者ID:manombawa,项目名称:risk_in_a_box,代码行数:60,代码来源:test_interpolate.py


示例3: __init__

    def __init__(self, A, fraction=0.80):
        assert 0 <= fraction <= 1
        # A = U . diag(d) . Vt, O( m n^2 ), lapack_lite --
        self.U, self.d, self.Vt = np.linalg.svd(A, full_matrices=False)
        # different versions of numpy can return U and Vt such that
        # U * Vt is constant but the signs may be switched. Gross...
        # numpy, you owe me one day buster!
        # force a check here...
        if self.Vt[0, 0] < 0:
            self.Vt *= -1.0
            self.U *= -1.0
        assert np.all(self.d[:-1] >= self.d[1:])  # sorted
        self.eigen = self.d ** 2
        self.sumvariance = np.cumsum(self.eigen)
        try:
            self.sumvariance /= self.sumvariance[-1]
        except:
            print len(A), len(self.sumvariance), len(self.eigen)
            raise

        self.npc = np.searchsorted(self.sumvariance, fraction) + 1
        while self.npc == 1:  # prevents less than 2 pcs being found
            fraction *= 1.1
            self.npc = np.searchsorted(self.sumvariance, fraction) + 1
        self.dinv = np.array([1 / d if d > self.d[0] * 1e-6 else 0 for d in self.d])
开发者ID:pombredanne,项目名称:GroopM,代码行数:25,代码来源:PCA.py


示例4: get_peaks

def get_peaks(sub_gene_df, top_s, max_dist, feature_name):
    """
    For each gene in gene_info get the
    peaks within max_dist in top_s. This 
    is basically reverse engineering to get
    the peak info for each gene that was found 
    to be associated with a peak. 
    The reason for reverse engeneering rather than 
    storing this information when searching for the genes
    for each peak is that we want to use precisely the same
    function to search the genes for the real data and for the 
    permutations.


    Input:
    gene_info ... data frame with index ('chrom','start')
                and columns 'gene_id' and 'end'
    top_s ... series of peak positions with index (chrom, pos)
                and values peak height
    max_dist ... maximum distance between gene and peak
    """
    gene_info = sub_gene_df

    def get_dist(df, gene_pos):
        """
        calculate distance
        """
        s = pd.Series(df.index.droplevel(0).values - gene_pos.ix[df.index[0][0]], index=df.index.droplevel(0).values)
        return s

    tot_gene_peaks_df = pd.DataFrame()
    if not top_s.index.is_monotonic:
        top_s = top_s.sortlevel([0, 1])
    if not gene_info.index.is_monotonic:
        gene_info = gene_info.sort_index()
    for chrom in gene_info.index.droplevel(1).unique():
        loc_top_s = top_s.ix[chrom]
        start = np.searchsorted(loc_top_s.index.values + max_dist, gene_info.ix[chrom].index.values)
        end = np.searchsorted(loc_top_s.index.values - max_dist, gene_info.ix[chrom]["end"].values)
        x = pd.concat(
            [loc_top_s.iloc[st:ed] for st, ed in zip(start, end)], keys=gene_info.ix[chrom][feature_name].values
        )
        x.name = "peak_height"

        dist_start = x.groupby(lambda i: i[0]).apply(
            lambda df: get_dist(df, gene_info.ix[chrom].reset_index().set_index(feature_name)["start"])
        )
        dist_start.name = "dist_start"
        dist_end = x.groupby(lambda i: i[0]).apply(
            lambda df: get_dist(df, gene_info.ix[chrom].set_index(feature_name)["end"])
        )
        dist_end.name = "dist_end"
        gene_peaks_df = pd.concat([x, dist_start, dist_end], axis=1)
        gene_peaks_df.index = pd.MultiIndex.from_arrays(
            [gene_peaks_df.index.droplevel(1), [chrom] * len(x), gene_peaks_df.index.droplevel(0)]
        )
        tot_gene_peaks_df = pd.concat([tot_gene_peaks_df, gene_peaks_df], axis=0)

    tot_gene_peaks_df.index.names = [feature_name, "chrom", "peak_pos"]
    return tot_gene_peaks_df
开发者ID:feilchenfeldt,项目名称:hs_vervet,代码行数:60,代码来源:test_enrichment_dev2.py


示例5: randomChoice

    def randomChoice(V, n=1):
        """
        Make a random choice from a vector V of values which are unnormalised
        probabilities. Return the corresponding index. For example if v = [1, 2, 4]
        then the probability of the indices repectively are [1/7, 2/7, 4/7]. The
        parameter n is the number of random choices to make. If V is a matrix,
        then the rows are taken as probabilities, and a choice is made for each
        row. 
        """
        Parameter.checkClass(V, numpy.ndarray)

        if V.shape[0]==0:
            return -1 

        if V.ndim == 1:
            cumV = numpy.cumsum(V)
            p = numpy.random.rand(n)*cumV[-1]
            return numpy.searchsorted(cumV, p)
        elif V.ndim == 2:
            cumV = numpy.cumsum(V, 1)
            P = numpy.random.rand(V.shape[0], n)*numpy.array([cumV[:, -1]]).T

            inds = numpy.zeros(P.shape, numpy.int)
            for i in range(P.shape[0]):
                inds[i, :] = numpy.searchsorted(cumV[i, :], P[i, :])

            return inds
        else:
            raise ValueError("Invalid number of dimensions")
开发者ID:omosola,项目名称:APGL,代码行数:29,代码来源:Util.py


示例6: __call__

    def __call__(self, x):
        """Evaluate the piecewise polynomial

        Parameters
        ----------
        x : scalar or array-like of length N

        Returns
        -------
        y : scalar or array-like of length R or length N or N by R
        """
        if _isscalar(x):
            pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n-2)
            y = self.polynomials[pos](x)
        else:
            x = np.asarray(x)
            m = len(x)
            pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n-2)
            if self.vector_valued:
                y = np.zeros((m,self.r))
            else:
                y = np.zeros(m)
            for i in xrange(self.n-1):
                c = pos==i
                y[c] = self.polynomials[i](x[c])
        return y
开发者ID:stefanv,项目名称:scipy3,代码行数:26,代码来源:polyint.py


示例7: _regrid_indices

 def _regrid_indices(cells, depth, points):
     # Calculate the minimum difference in cell extent.
     extent = np.min(np.diff(cells))
     if extent == 0:
         # Detected an dimension coordinate with an invalid
         # zero length cell extent.
         msg = 'The target grid cube {} ({!r}) coordinate contains ' \
             'a zero length cell extent.'
         axis, name = 'x', tx.name()
         if points is sy_points:
             axis, name = 'y', ty.name()
         raise ValueError(msg.format(axis, name))
     elif extent > 0:
         # The cells of the dimension coordinate are in ascending order.
         indices = np.searchsorted(cells, points, side='right') - 1
     else:
         # The cells of the dimension coordinate are in descending order.
         # np.searchsorted() requires ascending order, so we require to
         # account for this restriction.
         cells = cells[::-1]
         right = np.searchsorted(cells, points, side='right')
         left = np.searchsorted(cells, points, side='left')
         indices = depth - right
         # Only those points that exactly match the left-hand cell bound
         # will differ between 'left' and 'right'. Thus their appropriate
         # target cell location requires to be recalculated to give the
         # correct descending [upper, lower) interval cell, source to target
         # regrid behaviour.
         delta = np.where(left != right)[0]
         if delta.size:
             indices[delta] = depth - left[delta]
     return indices
开发者ID:TheClimateCorporation,项目名称:iris,代码行数:32,代码来源:regrid.py


示例8: _linearslice

def _linearslice(linearbpf, x0, x1):
    """
    Slice the given bpf, returning a new Linear bpf with endpoints
    x0 and x1.
    """
    assert isinstance(linearbpf, core.Linear)
    X, Y = linearbpf.points()
    insert_head = x0 > X[0]
    if insert_head:
        i = np.searchsorted(X, x0)
        X = X[i-1:]
        Y = Y[i-1:]
    insert_tail = x1 < X[-1]
    if insert_tail:
        i = np.searchsorted(X, x1)
        X = X[:i+1]
        Y = Y[:i+1]
    if insert_head or insert_tail:
        # we copy when we know exactly how much to copy
        X = X.copy()
        Y = Y.copy()
    if insert_head:
        X[0] = x0
        Y[0] = linearbpf(x0)
    if insert_tail:
        X[i] = x1
        Y[i] = linearbpf(x1)    
    return core.Linear(X, Y)
开发者ID:gesellkammer,项目名称:bpf4,代码行数:28,代码来源:util.py


示例9: split

 def split(self, cols_or_rows, columns=1):
     # Similar to take but returns two arrays, the extracted columns plus
     # the resulting array.  Assumes cols_or_rows is sorted
     base = dok_matrix()
     ext = dok_matrix()
     indx = int((columns == 1))
     if indx:
         for key in self.keys():
             num = np.searchsorted(cols_or_rows, key[1])
             if cols_or_rows[num] == key[1]:
                 newkey = (key[0], num)
                 ext[newkey] = self[key]
             else:
                 newkey = (key[0], key[1]-num)
                 base[newkey] = self[key]
     else:
         for key in self.keys():
             num = np.searchsorted(cols_or_rows, key[0])
             if cols_or_rows[num] == key[0]:
                 newkey = (num, key[1])
                 ext[newkey] = self[key]
             else:
                 newkey = (key[0]-num, key[1])
                 base[newkey] = self[key]
     return base, ext
开发者ID:87,项目名称:scipy,代码行数:25,代码来源:dok.py


示例10: get_exclude_coords

    def get_exclude_coords(self, ex_starts, ex_ends):
                                                    
        mx=self.starts.shape[0]-1
        n_exclude = len(ex_ends)     
        ex_wnd_starts = np.searchsorted(self.starts, ex_starts)
        ex_wnd_ends   = np.searchsorted(self.ends, ex_ends)
        ex_wnd_starts = np.amax(np.c_[ex_wnd_starts-1,np.zeros(n_exclude)],1).astype(int)
        ex_wnd_ends = np.amin(np.c_[ex_wnd_ends+1,np.ones(n_exclude)*mx],1).astype(int)
        ex_starts = self.starts[ex_wnd_starts] 
        ex_ends = self.ends[ex_wnd_ends] 

        ex_coords = [] 
        
        curr_s = ex_starts[0]
        curr_e = ex_ends[0]

        #print ex_wnd_starts
        #print ex_wnd_ends

        for i in xrange(1, n_exclude):
            if ex_starts[i] < curr_e:
                curr_e = ex_ends[i]
            else:
                ex_coords.append(tuple([curr_s,curr_e]))
                curr_s = ex_starts[i]
                curr_e = ex_ends[i]
        
        ex_coords.append(tuple([curr_s,curr_e]))
        return ex_coords
开发者ID:EichlerLab,项目名称:ssf_DTS_caller,代码行数:29,代码来源:ssf_caller.py


示例11: build3DHistogramArray

def build3DHistogramArray(inputA=None,xBinVector=None,yBinVector=None):
    """
    This builds and bins up the DCPD exposure surface.
    The input is a time parameterized array
    [beamlist,beampitchlist,beamyawlist]
    """
    threeDData=list()
    for i,aCol0 in enumerate(inputA[0]):
        threeDData.append((inputA[0][i],\
                           inputA[1][i],\
                           inputA[2][i]))
    #Sort the input structure by col0
    threeDData.sort()
    zMapLists=empty([len(xBinVector),len(yBinVector)],dtype=object)
    for ii in range(0,len(xBinVector)):
        for jj in range(0,len(yBinVector)):
            zMapLists[ii][jj]=list()
    for xIndex in range(0,len(xBinVector)-1):
        xLow=xBinVector[xIndex]
        xHigh=xBinVector[xIndex+1]
        xData=[a for a,b,c in threeDData]
        # Find all points that fit this X column
        dataSubset=threeDData[searchsorted(xData,xLow,side='left'):\
                              searchsorted(xData,xHigh,side='right')]
        yDataSubset=[(b,c) for a,b,c in dataSubset]
        yDataSubset.sort()
        yData=[b for b,c in yDataSubset]
        for yIndex in range(0,len(yBinVector)-1):
            yLow=yBinVector[yIndex]
            yHigh=yBinVector[yIndex+1]
            dataBinMatch=yDataSubset[searchsorted(yData,yLow,side='left'):\
                                     searchsorted(yData,yHigh,side='right')]
            zDataMatch=[c for b,c in dataBinMatch]
            zMapLists[xIndex][yIndex].extend(zDataMatch)
    return zMapLists
开发者ID:GeraintPratten,项目名称:lalsuite,代码行数:35,代码来源:followupPDSurface.py


示例12: kuiper_two

def kuiper_two(data1, data2):
    """Compute the Kuiper statistic to compare two samples.

    Parameters
    ----------
    data1 : array-like
        The first set of data values.
    data2 : array-like
        The second set of data values.
    
    Returns
    -------
    D : float
        The raw test statistic.
    fpp : float
        The probability of obtaining two samples this different from
        the same distribution.

    Notes
    -----
    Warning: the fpp is quite approximate, especially for small samples.

    """
    data1, data2 = sort(data1), sort(data2)

    if len(data2)<len(data1):
        data1, data2 = data2, data1

    cdfv1 = searchsorted(data2, data1)/float(len(data2)) # this could be more efficient
    cdfv2 = searchsorted(data1, data2)/float(len(data1)) # this could be more efficient
    D = (amax(cdfv1-arange(len(data1))/float(len(data1))) + 
            amax(cdfv2-arange(len(data2))/float(len(data2))))

    Ne = len(data1)*len(data2)/float(len(data1)+len(data2))
    return D, kuiper_FPP(D, Ne)
开发者ID:drdangersimon,项目名称:ageDate,代码行数:35,代码来源:kuiper.py


示例13: get_scx_scz_in_timerange

def get_scx_scz_in_timerange(timerange, file):
    """
    read a downloaded FERMI weekly pointing file and extract scx, scz for a timerange.

    Parameters
    ----------

    date : `datetime.datetime` 
        A datetime object or other date format understood by the parse_time function.
    file : str
        A filepath to a Fermi/LAT weekly pointing file (e.g. as obtained by the
        download_weekly_pointing_file function).
    """
    
    hdulist = fits.open(file)
    timesinutc = []
    for tim in hdulist[1].data['START']:
        timesinutc.append(met_to_utc(tim))

    startind = np.searchsorted(timesinutc, timerange.start)
    endind = np.searchsorted(timesinutc, timerange.end)

    scx_radec = []
    scz_radec = []
    for i in range(startind, endind):
        scx_radec.append((Longitude(hdulist[1].data['RA_SCX'][i]*u.deg),
                          Latitude(hdulist[1].data['DEC_SCX'][i]*u.deg)))
        scz_radec.append((Longitude(hdulist[1].data['RA_SCZ'][i]*u.deg),
                          Latitude(hdulist[1].data['DEC_SCZ'][i]*u.deg)))
    return scx_radec, scz_radec, timesinutc[startind:endind]
开发者ID:abigailStev,项目名称:sunpy,代码行数:30,代码来源:fermi.py


示例14: skim_imgs

def skim_imgs(Mimg, Mimg_tabs, Msnp_tabs, t_adjust=0, tb0=SKIMSPK_TB,
        te0=SKIMSPK_TE, n_blk=20000, onlyonce=True):
    if onlyonce:
        idx_eachimg = [np.nonzero(Mimg == i_img)[0][0] for i_img
                in np.unique(Mimg)]
        t_eachimg = Mimg_tabs[idx_eachimg]
        i_eachimg = Mimg[idx_eachimg]
    else:
        t_eachimg = Mimg_tabs
        i_eachimg = Mimg

    ibie = []
    ib = 0
    ie = 0
    for t0 in t_eachimg:
        tb = t0 + tb0 - t_adjust
        te = t0 + te0 - t_adjust

        xb = np.searchsorted(Msnp_tabs[ib: ib + n_blk], tb)
        if xb >= n_blk:
            xb = np.searchsorted(Msnp_tabs[ib:], tb)
        ib += xb

        xe = np.searchsorted(Msnp_tabs[ie: ie + n_blk], te)
        if xe >= n_blk:
            xe = np.searchsorted(Msnp_tabs[ie:], te)
        ie += xe
        ibie.append((ib, ie))
    return ibie, i_eachimg
开发者ID:hahong,项目名称:array_proj,代码行数:29,代码来源:spksort.py


示例15: get_indices

 def get_indices(ival):
     """ Retuns the indeces surrounding the given interval"""
     start_ind = np.searchsorted(self.x, ival[0], side='right')
     end_ind = np.searchsorted(self.x, ival[1], side='left')
     assert start_ind > 0 and end_ind < len(self.x), \
         "Invalid averaging interval"
     return start_ind, end_ind
开发者ID:alx5246,项目名称:srnnetTraining,代码行数:7,代码来源:DiscreteFunc.py


示例16: get_features

def get_features(peak_s, feature_df, feature_name='feature', max_dist=0):
    """
    take the input series and gets.
    names of features nearby

    Input:
    peak_s ... pandas series with (chrom, pos) index and value of
                the statistic ('peak height'). Series should be named.
    feature_df ... data frame with feature info.
    """
    all_features = []
    if not feature_df.index.is_monotonic:
        feature_df = feature_df.sort_index()
    tot_hit_df = pd.DataFrame()
    for chrom in peak_s.index.droplevel(1).unique():
        loc_feature_df = feature_df.ix[chrom]
        #loc_feature_df = loc_feature_df.append(pd.DataFrame(np.nan,index=[np.inf],columns=loc_feature_df.columns))
        #print loc_feature_df.index-max_dist, peak_s.ix[chrom].index.values
        #try:
        pos_rel_to_start = np.searchsorted(loc_feature_df.index.values-max_dist,peak_s.ix[chrom].index.values)
        #except:
        #    print chrom, peak_s.ix[chrom]
        pos_rel_to_end = np.searchsorted(loc_feature_df["end"].values+max_dist,peak_s.ix[chrom].index.values)
        features = list(set(loc_feature_df[feature_name].iloc[np.hstack([range(a,b) for a,b in zip(pos_rel_to_end,pos_rel_to_start)])]))
        all_features += features
    return all_features
开发者ID:feilchenfeldt,项目名称:enrichme,代码行数:26,代码来源:pandas_util.py


示例17: get_tmax

    def get_tmax(self, p, cutoff=None):
        if cutoff is None:
            cutoff = self.cutoff

        if self.quad:
            x = np.arange(1, 10000, 1)
            y = np.zeros_like(x)
            func = self.function(x, p)
            func_half = self.function(x[:-1] + 1 / 2, p)
            y[1:] = y[0] + np.cumsum(1 / 6 *
                                     (func[:-1] + 4 * func_half + func[1:]))
            y = y / quad(self.function, 0, np.inf, args=p)[0]
            return np.searchsorted(y, cutoff)

        else:
            t1 = -np.sqrt(3 / 5)
            t2 = 0
            t3 = np.sqrt(3 / 5)
            w1 = 5 / 9
            w2 = 8 / 9
            w3 = 5 / 9

            x = np.arange(1, 10000, 1)
            y = np.zeros_like(x)
            func = self.function(x, p)
            func_half = self.function(x[:-1] + 1 / 2, p)
            y[0] = 0.5 * (w1 * self.function(0.5 * t1 + 0.5, p) +
                          w2 * self.function(0.5 * t2 + 0.5, p) +
                          w3 * self.function(0.5 * t3 + 0.5, p))
            y[1:] = y[0] + np.cumsum(1 / 6 *
                                     (func[:-1] + 4 * func_half + func[1:]))
            y = y / quad(self.function, 0, np.inf, args=p)[0]
            return np.searchsorted(y, cutoff)
开发者ID:pastas,项目名称:pasta,代码行数:33,代码来源:rfunc.py


示例18: _substitute_iers_b

    def _substitute_iers_b(cls, table):
        """Substitute IERS B values with those from a real IERS B table.

        IERS-A has IERS-B values included, but for reasons unknown these
        do not match the latest IERS-B values (see comments in #4436).
        Here, we use the bundled astropy IERS-B table to overwrite the values
        in the downloaded IERS-A table.
        """
        iers_b = IERS_B.open()
        # Substitute IERS-B values for existing B values in IERS-A table
        mjd_b = table['MJD'][~table['UT1_UTC_B'].mask]
        i0 = np.searchsorted(iers_b['MJD'].value, mjd_b[0], side='left')
        i1 = np.searchsorted(iers_b['MJD'].value, mjd_b[-1], side='right')
        iers_b = iers_b[i0:i1]
        n_iers_b = len(iers_b)
        # If there is overlap then replace IERS-A values from available IERS-B
        if n_iers_b > 0:
            # Sanity check that we are overwriting the correct values
            if not np.allclose(table['MJD'][:n_iers_b], iers_b['MJD'].value):
                raise ValueError('unexpected mismatch when copying '
                                 'IERS-B values into IERS-A table.')
            # Finally do the overwrite
            table['UT1_UTC_B'][:n_iers_b] = iers_b['UT1_UTC'].value
            table['PM_X_B'][:n_iers_b] = iers_b['PM_x'].value
            table['PM_Y_B'][:n_iers_b] = iers_b['PM_y'].value

        return table
开发者ID:cactaur,项目名称:astropy,代码行数:27,代码来源:iers.py


示例19: derivatives

    def derivatives(self, x, der):
        """Evaluate a derivative of the piecewise polynomial
        Parameters
        ----------
        x : scalar or array-like of length N
        der : integer
            how many derivatives (including the function value as
            0th derivative) to extract

        Returns
        -------
        y : array-like of shape der by R or der by N or der by N by R

        """
        if _isscalar(x):
            pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n-2)
            y = self.polynomials[pos].derivatives(x,der=der)
        else:
            x = np.asarray(x)
            m = len(x)
            pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n-2)
            if self.vector_valued:
                y = np.zeros((der,m,self.r))
            else:
                y = np.zeros((der,m))
            for i in xrange(self.n-1):
                c = pos==i
                y[:,c] = self.polynomials[i].derivatives(x[c],der=der)
        return y
开发者ID:stefanv,项目名称:scipy3,代码行数:29,代码来源:polyint.py


示例20: BayesianWords

def BayesianWords(unigram_counts, bigram_counts, n_words):
  unigrams, ucounts = zip(*sorted(filter(
      lambda (k, v): k in bigram_counts,
      unigram_counts.items())))
  prior = np.array(ucounts) / sum(ucounts)
  prior_pdf = np.array([np.sum(prior[:n]) for n in range(len(unigrams))])

  bigram_pdfs = {}
  for w1, w1_bgcnts in bigram_counts.iteritems():
    w2strs, w2counts = zip(*sorted(w1_bgcnts.items()))
    w2pdf = np.array(w2counts) / sum(w2counts)
    bigram_pdfs[w1] = (
        w2strs,
        np.array([np.sum(w2pdf[:n]) for n in range(len(w2strs))]))
    #print '%d bigrams for %s' % (len(w2strs), w1)

  first_word_index = np.searchsorted(prior_pdf, np.random.random_sample())
  words = [unigrams[min(len(unigrams)-1, first_word_index)]]
  for n in range(1, n_words):
    if words[-1] in bigram_pdfs:
      bigram_strs, bigram_pdf = bigram_pdfs[words[-1]]
      idx = np.searchsorted(bigram_pdf, np.random.random_sample())
      words.append(bigram_strs[min(len(bigram_strs)-1, idx)])
    else:
      # Pick from the prior.
      idx = np.searchsorted(prior_pdf, np.random.random_sample())
      words.append(unigrams[min(len(unigrams)-1, idx)])
  return words
开发者ID:sharkinyourcomputer,项目名称:agui,代码行数:28,代码来源:parse_data.py



注:本文中的numpy.searchsorted函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python numpy.select函数代码示例发布时间:2022-05-27
下一篇:
Python numpy.sctype2char函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap