• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python numpy.nanpercentile函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中numpy.nanpercentile函数的典型用法代码示例。如果您正苦于以下问题:Python nanpercentile函数的具体用法?Python nanpercentile怎么用?Python nanpercentile使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了nanpercentile函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: print_stats

def print_stats(array):
    print
    print "5th percentile of data is: " + '\t\t\t' + str(round(np.nanpercentile(array, 5), 2)) + "um"
    print "95th percentile of data is: " + '\t\t\t' + str(round(np.nanpercentile(array, 95), 2)) + "um"
    print "Peak-to-peak amplitude of structure is: " + '\t' + str(round(np.nanpercentile(array, 95)-np.nanpercentile(array, 5), 2)) + "um"
    print "Half peak-to-peak amplitude of structure is: " + '\t' + str(round((np.nanpercentile(array, 95)-np.nanpercentile(array, 5))/2, 2)) + "um"
    print 
开发者ID:LivTel,项目名称:weave-scan,代码行数:7,代码来源:functions.py


示例2: _auto_limits

    def _auto_limits(self):

        if self.component_data is None:
            return

        exclude = (100 - self.percentile) / 2.

        # For subsets in 'data' mode, we want to compute the limits based on
        # the full dataset, not just the subset.
        if isinstance(self.data, Subset):
            data_values = self.data.data[self.component_id]
        else:
            data_values = self.data[self.component_id]

        try:
            lower = np.nanpercentile(data_values, exclude)
            upper = np.nanpercentile(data_values, 100 - exclude)
        except AttributeError:  # Numpy < 1.9
            data_values = data_values[~np.isnan(data_values)]
            lower = np.percentile(data_values, exclude)
            upper = np.percentile(data_values, 100 - exclude)

        if isinstance(self.data, Subset):
            lower = 0

        self.set_limits(lower, upper)
开发者ID:saimn,项目名称:glue,代码行数:26,代码来源:attribute_limits_helper.py


示例3: shift_mask_data

def shift_mask_data(X, Y, upper_percentile=70, lower_percentile=30, n_fwd_days=1):
    # Shift X to match factors at t to returns at t+n_fwd_days (we want to predict future returns after all)
    shifted_X = np.roll(X, n_fwd_days+1, axis=0)
    
    # Slice off rolled elements
    X = shifted_X[n_fwd_days+1:]
    Y = Y[n_fwd_days+1:]
    
    n_time, n_stocks, n_factors = X.shape
    
    # Look for biggest up and down movers
    upper = np.nanpercentile(Y, upper_percentile, axis=1)[:, np.newaxis]
    lower = np.nanpercentile(Y, lower_percentile, axis=1)[:, np.newaxis]
  
    upper_mask = (Y >= upper)
    lower_mask = (Y <= lower)
    
    mask = upper_mask | lower_mask # This also drops nans
    mask = mask.flatten()
    
    # Only try to predict whether a stock moved up/down relative to other stocks
    Y_binary = np.zeros(n_time * n_stocks)
    Y_binary[upper_mask.flatten()] = 1
    Y_binary[lower_mask.flatten()] = -1
    
    # Flatten X
    X = X.reshape((n_time * n_stocks, n_factors))

    # Drop stocks that did not move much (i.e. are in the 30th to 70th percentile)
    X = X[mask]
    Y_binary = Y_binary[mask]
    
    return X, Y_binary
开发者ID:vsmolyakov,项目名称:fin,代码行数:33,代码来源:alpha_selection.py


示例4: test_result_values

 def test_result_values(self):
     tgt = [np.percentile(d, 28) for d in _rdat]
     res = np.nanpercentile(_ndat, 28, axis=1)
     assert_almost_equal(res, tgt)
     tgt = [np.percentile(d, (28,98)) for d in _rdat]
     res = np.nanpercentile(_ndat, (28,98), axis=1)
     assert_almost_equal(res, tgt)
开发者ID:LeiDai,项目名称:numpy,代码行数:7,代码来源:test_nanfunctions.py


示例5: _auto_limits

    def _auto_limits(self):

        if self.data is None:
            return

        if self.attribute is None:
            return

        if self.subset_mode == 'outline':
            self.set_limits(0, 1)
            return

        exclude = (100 - self.percentile) / 2.

        # For subsets in 'data' mode, we want to compute the limits based on
        # the full dataset, not just the subset.
        if self.subset_mode == 'data':
            data_values = self.data.data[self.attribute]
        else:
            data_values = self.data[self.attribute]

        try:
            lower = np.nanpercentile(data_values, exclude)
            upper = np.nanpercentile(data_values, 100 - exclude)
        except AttributeError:  # Numpy < 1.9
            data_values = data_values[~np.isnan(data_values)]
            lower = np.percentile(data_values, exclude)
            upper = np.percentile(data_values, 100 - exclude)

        if self.subset_mode == 'data':
            self.set_limits(0, upper)
        else:
            self.set_limits(lower, upper)
开发者ID:bmorris3,项目名称:glue,代码行数:33,代码来源:attribute_limits_helper.py


示例6: _compute

    def _compute(self, arrays, dates, assets, mask):
        """
        For each row in the input, compute a mask of all values falling between
        the given percentiles.
        """
        # TODO: Review whether there's a better way of handling small numbers
        # of columns.
        data = arrays[0].copy().astype(float64)
        data[~mask] = nan

        # FIXME: np.nanpercentile **should** support computing multiple bounds
        # at once, but there's a bug in the logic for multiple bounds in numpy
        # 1.9.2.  It will be fixed in 1.10.
        # c.f. https://github.com/numpy/numpy/pull/5981
        lower_bounds = nanpercentile(
            data,
            self._min_percentile,
            axis=1,
            keepdims=True,
        )
        upper_bounds = nanpercentile(
            data,
            self._max_percentile,
            axis=1,
            keepdims=True,
        )
        return (lower_bounds <= data) & (data <= upper_bounds)
开发者ID:Giruvegan,项目名称:zipline,代码行数:27,代码来源:filter.py


示例7: test_multiple_percentiles

    def test_multiple_percentiles(self):
        perc = [50, 100]
        mat = np.ones((4, 3))
        nan_mat = np.nan * mat
        # For checking consistency in higher dimensional case
        large_mat = np.ones((3, 4, 5))
        large_mat[:, 0:2:4, :] = 0
        large_mat[:, :, 3:] *= 2
        for axis in [None, 0, 1]:
            for keepdim in [False, True]:
                with suppress_warnings() as sup:
                    sup.filter(RuntimeWarning, "All-NaN slice encountered")
                    val = np.percentile(mat, perc, axis=axis, keepdims=keepdim)
                    nan_val = np.nanpercentile(nan_mat, perc, axis=axis,
                                               keepdims=keepdim)
                    assert_equal(nan_val.shape, val.shape)

                    val = np.percentile(large_mat, perc, axis=axis,
                                        keepdims=keepdim)
                    nan_val = np.nanpercentile(large_mat, perc, axis=axis,
                                               keepdims=keepdim)
                    assert_equal(nan_val, val)

        megamat = np.ones((3, 4, 5, 6))
        assert_equal(np.nanpercentile(megamat, perc, axis=(1, 2)).shape, (2, 3, 6))
开发者ID:ContinuumIO,项目名称:numpy,代码行数:25,代码来源:test_nanfunctions.py


示例8: qmap_mean_departure

def qmap_mean_departure(x, sample1, sample2, meinequantilen, sample_size,
                        return_mean=False, linear=True):
    from support_functions import qstats

    s1d = x[sample1]  # truth (sample1)
    s2d = x[sample2]  # biased (sample2)

    # add 0 and 100
    meinequantilen = np.unique(np.concatenate([[0], meinequantilen, [100]]))

    qb = np.nanpercentile(s1d, meinequantilen)  # truth
    qa = np.nanpercentile(s2d, meinequantilen)  # biased
    mean1 = np.copy(qb)
    mean2 = np.copy(qa)

    # Mean of quantile boxes( not 0 and 100 )
    count1, m1 = qstats(s1d, meinequantilen[1:-1], counts=sample_size)
    count2, m2 = qstats(s2d, meinequantilen[1:-1], counts=sample_size)
    # only missing ?
    mean1[:-1] = m1
    mean2[:-1] = m2
    # interpolation of bin-means
    if linear:
        m1d = np.interp(s2d, qb[1:], mean1[:-1])  # interpoliere Mittelwerte zu Daten
        m2d = np.interp(s2d, qa[1:], mean2[:-1])
    else:
        tck = interpolate.splrep(qb[1:], mean1[:-1], s=0)
        m1d = interpolate.splev(s2d, tck, der=0)
        tck = interpolate.splrep(qa[1:], mean2[:-1], s=0)
        m2d = interpolate.splev(s2d, tck, der=0)
    # difference
    if return_mean:
        return m1, m2

    return m1d - m2d   # one value
开发者ID:MBlaschek,项目名称:radiosonde,代码行数:35,代码来源:departures.py


示例9: test_percentile_nasty_partitions

    def test_percentile_nasty_partitions(self):
        # Test percentile with nasty partitions: divide up 5 assets into
        # quartiles.
        # There isn't a nice mathematical definition of correct behavior here,
        # so for now we guarantee the behavior of numpy.nanpercentile.  This is
        # mostly for regression testing in case we write our own specialized
        # percentile calculation at some point in the future.

        data = arange(25, dtype=float).reshape(5, 5) % 4
        quartiles = range(4)
        filter_names = ['pct_' + str(q) for q in quartiles]

        graph = TermGraph(
            {
                name: self.f.percentile_between(q * 25.0, (q + 1) * 25.0)
                for name, q in zip(filter_names, quartiles)
            }
        )
        results = self.run_graph(
            graph,
            initial_workspace={self.f: data},
            mask=self.build_mask(ones((5, 5))),
        )

        for name, quartile in zip(filter_names, quartiles):
            result = results[name]
            lower = quartile * 25.0
            upper = (quartile + 1) * 25.0
            expected = and_(
                nanpercentile(data, lower, axis=1, keepdims=True) <= data,
                data <= nanpercentile(data, upper, axis=1, keepdims=True),
            )
            check_arrays(result, expected)
开发者ID:Weylew,项目名称:zipline,代码行数:33,代码来源:test_filter.py


示例10: simpleStats

def simpleStats(y, axis=None):
    """ Computes simple statistics

    Computes the mean, median, min, max, standard deviation, and interquartile
    range of a numpy array y.

    Args:
        y (array): A Numpy array
        axis (int, typle of ints): Optional. Axis or Axes along which the means
            are computed, the default is to compute the mean of the flattened
            array. If a tuple of ints, performed over multiple axes

    Returns:
        The mean, median, min, max, standard deviation and IQR by columns

    """
    # make sure that y is an array
    y = np.array(y, dtype='float64')

    # Perform the various calculations
    mean = np.nanmean(y, axis=axis)
    std = np.nanstd(y, axis=axis)
    median = np.nanmedian(y, axis=axis)
    min_ = np.nanmin(y, axis=axis)
    max_ = np.nanmax(y, axis=axis)
    IQR = np.nanpercentile(y, 75, axis=axis) - np.nanpercentile(y, 25, axis=axis)

    return mean, median, min_, max_, std, IQR
开发者ID:LinkedEarth,项目名称:Pyleoclim_util,代码行数:28,代码来源:Stats.py


示例11: update_values

    def update_values(self, use_default_modifiers=False, **properties):

        if not any(prop in properties for prop in ('attribute', 'percentile', 'log')):
            self.set(percentile='Custom')
            return

        if use_default_modifiers:
            percentile = 100
            log = False
        else:
            percentile = self.percentile or 100
            log = self.log or False

        if percentile == 'Custom' or self.data is None:

            self.set(percentile=percentile, log=log)

        else:

            exclude = (100 - percentile) / 2.

            data_values = self.data_values

            try:
                lower = np.nanpercentile(data_values, exclude)
                upper = np.nanpercentile(data_values, 100 - exclude)
            except AttributeError:  # Numpy < 1.9
                data_values = data_values[~np.isnan(data_values)]
                lower = np.percentile(data_values, exclude)
                upper = np.percentile(data_values, 100 - exclude)

            self.set(lower=lower, upper=upper, percentile=percentile, log=log)
开发者ID:astrofrog,项目名称:glue,代码行数:32,代码来源:state_objects.py


示例12: timeseries

def timeseries(iData, zoneMap, std=None):
    '''
    Make zone-wise averaging of input data
    input: 3D matrix(Layers x Width x Height) and map of zones (W x H)
    output: 2D matrices(L x WH) with mean and std
    '''
    #reshape input cube into 2D matrix
    r, h, w = iData.shape
    iData, notNanDataI = cube2flat(iData)
    #get unique values of not-nan labels
    uniqZones = np.unique(zoneMap[np.isfinite(zoneMap)])
    zoneNum = np.zeros((r, uniqZones.size))
    zoneMean = np.zeros((r, uniqZones.size))
    zoneStd = np.zeros((r, uniqZones.size))
    zoneP16 = np.zeros((r, uniqZones.size))
    zoneP84 = np.zeros((r, uniqZones.size))

    #in each zone: get all values from input data get not nan data average
    for i in range(uniqZones.size):
        zi = uniqZones[i]
        if not np.isnan(zi):
            zoneData = iData[:, zoneMap.flat == zi]
            zoneNum[:, i] = zi
            if std is not None:
                # filter out of maxSTD values
                outliers = (np.abs(zoneData.T - zoneMean[:, i]) > zoneStd[:, i] * std).T
                zoneData[outliers] = np.nan

            zoneMean[:, i] = np.nanmean(zoneData, axis=1)
            zoneStd[:, i] = np.nanstd(zoneData, axis=1)
            zoneP16[:, i] = np.nanpercentile(zoneData, 16, axis=1)
            zoneP84[:, i] = np.nanpercentile(zoneData, 84, axis=1)

    return zoneMean, zoneStd, zoneNum, zoneP16, zoneP84
开发者ID:nansencenter,项目名称:zoning,代码行数:34,代码来源:zoning.py


示例13: _rescale_imshow_rgb

def _rescale_imshow_rgb(darray, vmin, vmax, robust):
    assert robust or vmin is not None or vmax is not None
    # There's a cyclic dependency via DataArray, so we can't import from
    # xarray.ufuncs in global scope.
    from xarray.ufuncs import maximum, minimum
    # Calculate vmin and vmax automatically for `robust=True`
    if robust:
        if vmax is None:
            vmax = np.nanpercentile(darray, 100 - ROBUST_PERCENTILE)
        if vmin is None:
            vmin = np.nanpercentile(darray, ROBUST_PERCENTILE)
    # If not robust and one bound is None, calculate the default other bound
    # and check that an interval between them exists.
    elif vmax is None:
        vmax = 255 if np.issubdtype(darray.dtype, np.integer) else 1
        if vmax < vmin:
            raise ValueError(
                'vmin=%r is less than the default vmax (%r) - you must supply '
                'a vmax > vmin in this case.' % (vmin, vmax))
    elif vmin is None:
        vmin = 0
        if vmin > vmax:
            raise ValueError(
                'vmax=%r is less than the default vmin (0) - you must supply '
                'a vmin < vmax in this case.' % vmax)
    # Scale interval [vmin .. vmax] to [0 .. 1], with darray as 64-bit float
    # to avoid precision loss, integer over/underflow, etc with extreme inputs.
    # After scaling, downcast to 32-bit float.  This substantially reduces
    # memory usage after we hand `darray` off to matplotlib.
    darray = ((darray.astype('f8') - vmin) / (vmax - vmin)).astype('f4')
    return minimum(maximum(darray, 0), 1)
开发者ID:jcmgray,项目名称:xarray,代码行数:31,代码来源:plot.py


示例14: truncate_range

def truncate_range(data, percMin=0.25, percMax=99.75, discard_zeros=True):
    """Truncate too low and too high values.

    Parameters
    ----------
    data : np.ndarray
        Image to be truncated.
    percMin : float
        Percentile minimum.
    percMax : float
        Percentile maximum.
    discard_zeros : bool
        Discard voxels with value 0 from truncation.

    Returns
    -------
    data : np.ndarray
        Truncated data.
    pMin : float
        Minimum truncation threshold which is used.
    pMax : float
        Maximum truncation threshold which is used.

    """
    if discard_zeros:
        msk = ~np.isclose(data, 0.)
        pMin, pMax = np.nanpercentile(data[msk], [percMin, percMax])
    else:
        pMin, pMax = np.nanpercentile(data, [percMin, percMax])
    temp = data[~np.isnan(data)]
    temp[temp < pMin], temp[temp > pMax] = pMin, pMax  # truncate min and max
    data[~np.isnan(data)] = temp
    if discard_zeros:
        data[~msk] = 0  # put back masked out voxels
    return data, pMin, pMax
开发者ID:ofgulban,项目名称:segmentator,代码行数:35,代码来源:utils.py


示例15: test_multiple_percentiles

    def test_multiple_percentiles(self):
        perc = [50, 100]
        mat = np.ones((4, 3))
        nan_mat = np.nan * mat
        # For checking consistency in higher dimensional case
        large_mat = np.ones((3, 4, 5))
        large_mat[:, 0:2:4, :] = 0
        large_mat[:, :, 3:] *= 2
        for axis in [None, 0, 1]:
            for keepdim in [False, True]:
                with warnings.catch_warnings(record=True) as w:
                    warnings.simplefilter('always')
                    val = np.percentile(mat, perc, axis=axis, keepdims=keepdim)
                    nan_val = np.nanpercentile(nan_mat, perc, axis=axis,
                                               keepdims=keepdim)
                    assert_equal(nan_val.shape, val.shape)

                    val = np.percentile(large_mat, perc, axis=axis,
                                        keepdims=keepdim)
                    nan_val = np.nanpercentile(large_mat, perc, axis=axis,
                                               keepdims=keepdim)
                    assert_equal(nan_val, val)

        megamat = np.ones((3, 4, 5, 6))
        assert_equal(np.nanpercentile(megamat, perc, axis=(1, 2)).shape, (2, 3, 6))
开发者ID:dyao-vu,项目名称:meta-core,代码行数:25,代码来源:test_nanfunctions.py


示例16: Tukey_outliers

def Tukey_outliers(set_of_means, FDR=0.005, supporting_interval=0.5, verbose=False):
    """
    Performs Tukey quintile test for outliers from a normal distribution with defined false discovery rate
    :param set_of_means:
    :param FDR:
    :return:
    """
    # false discovery rate v.s. expected falses v.s. power
    q1_q3 = norm.interval(supporting_interval)
    # TODO: this is not necessary: we can perfectly well fit it with proper params to FDR
    FDR_q1_q3 = norm.interval(1 - FDR)
    multiplier = (FDR_q1_q3[1] - q1_q3[1]) / (q1_q3[1] - q1_q3[0])
    l_means = len(set_of_means)

    q1 = np.nanpercentile(set_of_means, 50*(1-supporting_interval))
    q3 = np.nanpercentile(set_of_means, 50*(1+supporting_interval))
    high_fence = q3 + multiplier*(q3 - q1)
    low_fence = q1 - multiplier*(q3 - q1)

    if verbose:
        print 'FDR:', FDR
        print 'q1_q3', q1_q3
        print 'FDRq1_q3', FDR_q1_q3
        print 'q1, q3', q1, q3
        print 'fences', high_fence, low_fence

    if verbose:
        print "FDR: %s %%, expected outliers: %s, outlier 5%% confidence interval: %s" % \
              (FDR*100, FDR*l_means, poisson.interval(0.95, FDR*l_means))

    ho = (set_of_means < low_fence).nonzero()[0]
    lo = (set_of_means > high_fence).nonzero()[0]

    return lo, ho
开发者ID:chiffa,项目名称:chiffatools,代码行数:34,代码来源:stats.py


示例17: display

	def display(self,keys= None,live = True, scale = False):
		"""
		plot the training data
		"""
		if keys == None:
			keys = self.headers
		plt.clf()
		fig, axes = plt.subplots(1, len(keys), figsize=(len(keys) * 5,5), squeeze=False)  
		counter = 0
		for fig_j,c in enumerate(self.categories):
			for fig_i, h in enumerate(keys):
				ax1 = axes[0,fig_i]
				ax1.plot(self.ys[c][h].x,self.ys[c][h].y,colors[fig_j])
				if scale:
					m = np.nanpercentile(self.ys[c][h].y , 25, interpolation="higher")
					M = np.nanpercentile(self.ys[c][h].y , 75, interpolation="higher")
					ax1.set_ylim([0 , 1.5 * M])
				val = self.ys[c][h].y[-1]
				#ax1.set_title(h + ": " +  str(val))
				if counter == 0:
					ax1.set_title("{0} : {1:.3f}".format(h,val))
				#ax1.annotate(self.ys[h][-1],xy=(   , np.mean(self.ys[h]) ) )
			counter += 1
		fig.tight_layout()
		if live:
			display.clear_output(wait=True)
			display.display(plt.gcf())
			plt.close()
		else:
			plt.plot()
			plt.show()
开发者ID:mhattingpete,项目名称:GenerativeAdversarialNetworks,代码行数:31,代码来源:viz.py


示例18: test_result_values

 def test_result_values(self):
     tgt = [np.percentile(d, 28) for d in _rdat]
     res = np.nanpercentile(_ndat, 28, axis=1)
     assert_almost_equal(res, tgt)
     # Transpose the array to fit the output convention of numpy.percentile
     tgt = np.transpose([np.percentile(d, (28, 98)) for d in _rdat])
     res = np.nanpercentile(_ndat, (28, 98), axis=1)
     assert_almost_equal(res, tgt)
开发者ID:ContinuumIO,项目名称:numpy,代码行数:8,代码来源:test_nanfunctions.py


示例19: doCalc

    def doCalc(self):
        self.median = float(np.nanmedian(self.list_values))
        self.average = float(np.nanmean(self.list_values))
        self.mode = float(stats.mode(self.list_values, nan_policy='omit')[0])
        #self.average = self.sum / self.len

        self.CI['min'] = float(np.nanpercentile(self.list_values, 5))
        self.CI['max'] = float(np.nanpercentile(self.list_values, 95))
开发者ID:stevenvanrossem,项目名称:son-cli,代码行数:8,代码来源:prometheus_lib.py


示例20: qmap_departure

def qmap_departure(x, sample1, sample2, meinequantilen, sample_size, sample3=None, return_mean=False, linear=True,
                   verbose=0):
    from support_functions import qstats
    #
    s1d = x[sample1]  # truth (sample1)
    s2d = x[sample2]  # biased (sample2)
    #
    # add 0 and 100
    meinequantilen = np.unique(np.concatenate([[0], meinequantilen, [100]]))
    # Be sure to remove 0,100 now
    # Mean of quantile boxes( not 0 and 100 )
    count1, m1 = qstats(s1d, meinequantilen[1:-1], counts=sample_size)
    count2, m2 = qstats(s2d, meinequantilen[1:-1], counts=sample_size)
    ok1 = count1[:-1] > sample_size
    ok2 = count2[:-1] > sample_size
    # Enough data to calculate ?
    if not np.any(ok1 & ok2):
        if sample3 is not None:
            return np.zeros(x[sample3].shape)  # return only zeros
        else:
            return np.zeros(s2d.shape)
    #
    if verbose > 1:
        print "Quantiles:", meinequantilen
        print "Sample 1: ", count1
        print "Sample 2: ", count2
    # 
    qb = np.nanpercentile(s1d, meinequantilen)  # truth
    qa = np.nanpercentile(s2d, meinequantilen)  # biased
    #
    diffs = qb - qa  # Difference of quantiles (1st and lst for interp)
    xp = qa
    xp[:-1] = m2  # x punkte der interpolation ( ? NAN )
    diffs[:-1] = m1 - m2  # y punkte der interpolation
    if return_mean:
        return m1, m2
    # interpolate quantile differences
    # how to handle end-point ?
    # if not extrapolate:
    #     diffs = diffs[1:-1] # trim
    #     xp = xp[1:-1]       # trim
    # Spline or linear interpolation
    if not linear:
        tck = interpolate.splrep(xp, diffs, s=0)
        if sample3 is not None:
            out = interpolate.splev(x[sample3], tck, der=0)  # does this retain nan ?
        else:
            out = interpolate.splev(s2d, tck, der=0)
    #
    else:
        # to all data in sample / but not when missing!
        if sample3 is not None:
            out = np.interp(x[sample3], xp, diffs)
        else:
            out = np.interp(s2d, xp, diffs)

    # turn missing into zero
    return np.where(np.isfinite(out), out, 0.)  # size of sample 2 or sample 3 # no adjustment
开发者ID:MBlaschek,项目名称:radiosonde,代码行数:58,代码来源:departures.py



注:本文中的numpy.nanpercentile函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python numpy.nanstd函数代码示例发布时间:2022-05-27
下一篇:
Python numpy.nanmin函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap