• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python numpy.cumsum函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中numpy.cumsum函数的典型用法代码示例。如果您正苦于以下问题:Python cumsum函数的具体用法?Python cumsum怎么用?Python cumsum使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了cumsum函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: pos_neg_integral

def pos_neg_integral(scores):
    """Works only for 1D arrays at the moment, but can be easily extended."""
    scores = np.hstack([[0], scores])  # Padding.
    pos_scores, neg_scores = scores.copy(), scores.copy()
    idxs = scores >= 0
    pos_scores[~idxs], neg_scores[idxs] = 0, 0
    return np.cumsum(pos_scores), np.cumsum(neg_scores)
开发者ID:danoneata,项目名称:approx_norm_fv,代码行数:7,代码来源:ess.py


示例2: drawPrfastscore

def drawPrfastscore(tp,fp,scr,tot,show=True):
    tp=numpy.cumsum(tp)
    fp=numpy.cumsum(fp)
    rec=tp/tot
    prec=tp/(fp+tp)
    #dif=numpy.abs(prec[1:]-rec[1:])
    dif=numpy.abs(prec[::-1]-rec[::-1])
    pos=dif.argmin()
    pos=len(dif)-pos-1
    ap=0
    for t in numpy.linspace(0,1,11):
        pr=prec[rec>=t]
        if pr.size==0:
            pr=0
        p=numpy.max(pr);
        ap=ap+p/11;
    if show:    
        pylab.plot(rec,prec,'-g')
        pylab.title("AP=%.3f EPRthr=%.3f"%(ap,scr[pos]))
        pylab.xlabel("Recall")
        pylab.ylabel("Precision")
        pylab.grid()
        pylab.show()
        pylab.draw()
    return rec,prec,scr,ap,scr[pos]
开发者ID:ChrisYang,项目名称:CRFdet,代码行数:25,代码来源:VOCpr.py


示例3: _additive_estimate

def _additive_estimate(events, timeline, _additive_f, _additive_var, reverse):
    """
    Called to compute the Kaplan Meier and Nelson-Aalen estimates.

    """
    if reverse:
        events = events.sort_index(ascending=False)
        at_risk = events['entrance'].sum() - events['removed'].cumsum().shift(1).fillna(0)

        deaths = events['observed']

        estimate_ = np.cumsum(_additive_f(at_risk, deaths)).sort_index().shift(-1).fillna(0)
        var_ = np.cumsum(_additive_var(at_risk, deaths)).sort_index().shift(-1).fillna(0)
    else:
        deaths = events['observed']
        at_risk = events['at_risk']
        estimate_ = np.cumsum(_additive_f(at_risk, deaths))
        var_ = np.cumsum(_additive_var(at_risk, deaths))

    timeline = sorted(timeline)
    estimate_ = estimate_.reindex(timeline, method='pad').fillna(0)
    var_ = var_.reindex(timeline, method='pad')
    var_.index.name = 'timeline'
    estimate_.index.name = 'timeline'

    return estimate_, var_
开发者ID:benrifkind,项目名称:lifelines,代码行数:26,代码来源:__init__.py


示例4: Bh_Bv_timeseries

def Bh_Bv_timeseries(igramsFile):
  h5file = h5py.File(igramsFile)
  igramList = h5file['interferograms'].keys()
  Bh_igram=[]
  Bv_igram=[]
  for igram in igramList:
      Bh_igram.append(float(h5file['interferograms'][igram].attrs['H_BASELINE_TOP_HDR']))
      Bv_igram.append(float(h5file['interferograms'][igram].attrs['V_BASELINE_TOP_HDR']))


  A,B=design_matrix(h5file)
  tbase,dateList,dateDict,dateList1 = date_list(h5file)
  dt = np.diff(tbase)

  Bh_rate=np.dot(np.linalg.pinv(B),Bh_igram)
  zero = np.array([0.],np.float32)
  Bh = np.concatenate((zero,np.cumsum([Bh_rate*dt])))

  Bv_rate=np.dot(np.linalg.pinv(B),Bv_igram)
  zero = np.array([0.],np.float32)
  Bv = np.concatenate((zero,np.cumsum([Bv_rate*dt])))

  h5file.close()

  return Bh,Bv
开发者ID:mohseniaref,项目名称:PySAR-1,代码行数:25,代码来源:_pysar_utilities.py


示例5: cumulative_moment

    def cumulative_moment(self, year, mag):
        '''Calculation of Mmax using aCumulative Moment approach, adapted from
        the cumulative strain energy method of Makropoulos & Burton (1983)
        :param year: Year of Earthquake
        :type year: numpy.ndarray
        :param mag: Magnitude of Earthquake
        :type mag: numpy.ndarray
        :keyword iplot: Include cumulative moment plot
        :type iplot: Boolean
        :return mmax: Returns Maximum Magnitude
        :rtype mmax: Float
        '''
        # Calculate seismic moment
        m_o = 10. ** (9.05 + 1.5 * mag)
        year_range = np.arange(np.min(year), np.max(year) + 1, 1)
        nyr = np.shape(year_range)[0]
        morate = np.zeros(nyr, dtype=float)
        # Get moment release per year
        for loc, tyr in enumerate(year_range):
            idx = np.abs(year - tyr) < 1E-5
            if np.sum(idx) > 0:
                # Some moment release in that year
                morate[loc] = np.sum(m_o[idx])
        ave_morate = np.sum(morate) / nyr

        # Average moment rate vector
        exp_morate = np.cumsum(ave_morate * np.ones(nyr))
        modiff = (np.abs(np.max(np.cumsum(morate) - exp_morate)) +
                  np.abs(np.min(np.cumsum(morate) - exp_morate)))
        # Return back to Mw
        if fabs(modiff) < 1E-20:
            return -np.inf
        mmax = (2. / 3.) * (np.log10(modiff) - 9.05)
        return mmax
开发者ID:GEMScienceTools,项目名称:hmtk,代码行数:34,代码来源:cumulative_moment_release.py


示例6: boxfilter

def boxfilter(I, r):
    """Fast box filter implementation.

    Parameters
    ----------
    I:  a single channel/gray image data normalized to [0.0, 1.0]
    r:  window radius

    Return
    -----------
    The filtered image data.
    """
    M, N = I.shape
    dest = np.zeros((M, N))

    # cumulative sum over Y axis
    sumY = np.cumsum(I, axis=0)
    # difference over Y axis
    dest[:r + 1] = sumY[r: 2 * r + 1]
    dest[r + 1:M - r] = sumY[2 * r + 1:] - sumY[:M - 2 * r - 1]
    dest[-r:] = np.tile(sumY[-1], (r, 1)) - sumY[M - 2 * r - 1:M - r - 1]

    # cumulative sum over X axis
    sumX = np.cumsum(dest, axis=1)
    # difference over Y axis
    dest[:, :r + 1] = sumX[:, r:2 * r + 1]
    dest[:, r + 1:N - r] = sumX[:, 2 * r + 1:] - sumX[:, :N - 2 * r - 1]
    dest[:, -r:] = np.tile(sumX[:, -1][:, None], (1, r)) - \
        sumX[:, N - 2 * r - 1:N - r - 1]

    return dest
开发者ID:guanlongzhao,项目名称:dehaze,代码行数:31,代码来源:guidedfilter.py


示例7: _divide

    def _divide(self):        
        block_size = self.spec.block_size # shortcut
        half_block = (block_size-1)/2
        
        rows, columns = self.dividing.nonzero()
        for i in range(len(rows)):
            row = rows[i]
            column = columns[i]

            write_block(self._cell_block, self.cells, row, column, block_size)
            cv2.filter2D(self._cell_block, cv2.CV_32F, self._tension_kernel,
                         self._probability, borderType=cv2.BORDER_CONSTANT)
            cv2.threshold(self._probability, self._tension_min, 0, 
                          cv2.THRESH_TOZERO, self._probability)
            self._probability[self._cell_block] = 0
            self._probability **= self.spec.tension_power
            self._probability *= self._distance_kernel
            
            # optimized version of np.random.choice
            np.cumsum(self._probability.flat, out=self._cumulative)
            total = self._cumulative[-1]
            if total < 1.0e-12:
                # no viable placements, we'll have precision problems anyways
                continue 
            self._cumulative /= total
            
            index = self._indices[np.searchsorted(self._cumulative, 
                                                  rdm.random())]
            local_row, local_column = np.unravel_index(index, 
                                                       self._probability.shape)
            self.set_alive(row+(local_row-half_block), 
                           column+(local_column-half_block))
开发者ID:chase-ok,项目名称:olin-biofilm,代码行数:32,代码来源:runner.py


示例8: SNfunc

    def SNfunc(self,data,sig,significancefloor=0.5):
        D=data.ravel()
        S=sig.ravel()

        args=numpy.argsort(-D/S)
        D=numpy.take(D,args)
        S=numpy.take(S,args)
        Dsum=numpy.cumsum(D)
        Ssum=numpy.cumsum(S**2)**0.5
        SN=(Dsum/Ssum).max()

        #regional SN
        import scipy.ndimage as  ndimage
        data[data/sig<significancefloor]=0
        masks, multiplicity = ndimage.measurements.label(data)
        labels=numpy.arange(1, multiplicity+1)
        SNs=numpy.zeros(multiplicity+1)
        SNs[0]=SN
        for i in range(multiplicity):
            D=data[masks==i+1].ravel()
            S=sig[masks==i+1].ravel()
            args=numpy.argsort(-D/S)
            D=numpy.take(D,args)
            S=numpy.take(S,args)
            Dsum=numpy.cumsum(D)
            Ssum=numpy.cumsum(S**2)**0.5
            SNi=(Dsum/Ssum).max()
            SNs[i+1]=SNi
        SNs=-numpy.sort(-SNs)
        return SNs
开发者ID:bnord,项目名称:LensPop,代码行数:30,代码来源:SignaltoNoise.py


示例9: windower

def windower(thing, max_radius):
    thing_pad = numpy.concatenate((
        thing[-max_radius:], thing, thing[:max_radius]
        ))
    thing_sum = numpy.cumsum(numpy.cumsum(thing_pad))
    
    return (len(thing), thing_sum, max_radius) 
开发者ID:Puneet-Shivanand,项目名称:nesoni,代码行数:7,代码来源:consequences.py


示例10: _generate_sample

    def _generate_sample(self, X, nn_data, nn_num, row, col, step):
        """Generate a synthetic sample with an additional steps for the
        categorical features.

        Each new sample is generated the same way than in SMOTE. However, the
        categorical features are mapped to the most frequent nearest neighbors
        of the majority class.
        """
        rng = check_random_state(self.random_state)
        sample = super(SMOTENC, self)._generate_sample(X, nn_data, nn_num,
                                                       row, col, step)
        # To avoid conversion and since there is only few samples used, we
        # convert those samples to dense array.
        sample = (sample.toarray().squeeze()
                  if sparse.issparse(sample) else sample)
        all_neighbors = nn_data[nn_num[row]]
        all_neighbors = (all_neighbors.toarray()
                         if sparse.issparse(all_neighbors) else all_neighbors)

        categories_size = ([self.continuous_features_.size] +
                           [cat.size for cat in self.ohe_.categories_])

        for start_idx, end_idx in zip(np.cumsum(categories_size)[:-1],
                                      np.cumsum(categories_size)[1:]):
            col_max = all_neighbors[:, start_idx:end_idx].sum(axis=0)
            # tie breaking argmax
            col_sel = rng.choice(np.flatnonzero(
                np.isclose(col_max, col_max.max())))
            sample[start_idx:end_idx] = 0
            sample[start_idx + col_sel] = 1

        return sparse.csr_matrix(sample) if sparse.issparse(X) else sample
开发者ID:scikit-learn-contrib,项目名称:imbalanced-learn,代码行数:32,代码来源:_smote.py


示例11: _major_slice

    def _major_slice(self, idx, copy=False):
        """Index along the major axis where idx is a slice object.
        """
        if idx == slice(None):
            return self.copy() if copy else self

        M, N = self._swap(self.shape)
        start, stop, step = idx.indices(M)
        M = len(xrange(start, stop, step))
        new_shape = self._swap((M, N))
        if M == 0:
            return self.__class__(new_shape)

        row_nnz = np.diff(self.indptr)
        idx_dtype = self.indices.dtype
        res_indptr = np.zeros(M+1, dtype=idx_dtype)
        np.cumsum(row_nnz[idx], out=res_indptr[1:])

        if step == 1:
            all_idx = slice(self.indptr[start], self.indptr[stop])
            res_indices = np.array(self.indices[all_idx], copy=copy)
            res_data = np.array(self.data[all_idx], copy=copy)
        else:
            nnz = res_indptr[-1]
            res_indices = np.empty(nnz, dtype=idx_dtype)
            res_data = np.empty(nnz, dtype=self.dtype)
            csr_row_slice(start, stop, step, self.indptr, self.indices,
                          self.data, res_indices, res_data)

        return self.__class__((res_data, res_indices, res_indptr),
                              shape=new_shape, copy=False)
开发者ID:Eric89GXL,项目名称:scipy,代码行数:31,代码来源:compressed.py


示例12: _major_index_fancy

    def _major_index_fancy(self, idx):
        """Index along the major axis where idx is an array of ints.
        """
        idx_dtype = self.indices.dtype
        indices = np.asarray(idx, dtype=idx_dtype).ravel()

        _, N = self._swap(self.shape)
        M = len(indices)
        new_shape = self._swap((M, N))
        if M == 0:
            return self.__class__(new_shape)

        row_nnz = np.diff(self.indptr)
        idx_dtype = self.indices.dtype
        res_indptr = np.zeros(M+1, dtype=idx_dtype)
        np.cumsum(row_nnz[idx], out=res_indptr[1:])

        nnz = res_indptr[-1]
        res_indices = np.empty(nnz, dtype=idx_dtype)
        res_data = np.empty(nnz, dtype=self.dtype)
        csr_row_index(M, indices, self.indptr, self.indices, self.data,
                      res_indices, res_data)

        return self.__class__((res_data, res_indices, res_indptr),
                              shape=new_shape, copy=False)
开发者ID:Eric89GXL,项目名称:scipy,代码行数:25,代码来源:compressed.py


示例13: resize

    def resize(self, *shape):
        shape = check_shape(shape)
        if hasattr(self, 'blocksize'):
            bm, bn = self.blocksize
            new_M, rm = divmod(shape[0], bm)
            new_N, rn = divmod(shape[1], bn)
            if rm or rn:
                raise ValueError("shape must be divisible into %s blocks. "
                                 "Got %s" % (self.blocksize, shape))
            M, N = self.shape[0] // bm, self.shape[1] // bn
        else:
            new_M, new_N = self._swap(shape)
            M, N = self._swap(self.shape)

        if new_M < M:
            self.indices = self.indices[:self.indptr[new_M]]
            self.data = self.data[:self.indptr[new_M]]
            self.indptr = self.indptr[:new_M + 1]
        elif new_M > M:
            self.indptr = np.resize(self.indptr, new_M + 1)
            self.indptr[M + 1:].fill(self.indptr[M])

        if new_N < N:
            mask = self.indices < new_N
            if not np.all(mask):
                self.indices = self.indices[mask]
                self.data = self.data[mask]
                major_index, val = self._minor_reduce(np.add, mask)
                self.indptr.fill(0)
                self.indptr[1:][major_index] = val
                np.cumsum(self.indptr, out=self.indptr)

        self._shape = shape
开发者ID:Eric89GXL,项目名称:scipy,代码行数:33,代码来源:compressed.py


示例14: __init__

    def __init__(self, b, bResolution, mu, nuXY, nuErr, massT=172.0, widthT=widthTop, massW=80.4, zPlus=True):

        for key, val in zip(
            ["", "XY", "Z", "E", "T2", "T", "Phi"],
            [mu, np.array([mu.x(), mu.y()]), mu.z(), mu.e(), mu.Perp2(), mu.Pt(), mu.Phi()],
        ):
            setattr(self, "mu" + key, val)

        for key, val in zip(
            ["massW2", "massT", "invT", "bound", "sign", "rawB", "nuXY", "fitNu"],
            [massW ** 2, massT, 1.0 / widthT, False, [-1, 1][zPlus], b, nuXY, utils.LorentzV()],
        ):
            setattr(self, key, val)

        self.bXY = np.array([b.x(), b.y()])

        eig, self.Einv = np.linalg.eig(nuErr)
        self.E = self.Einv.T
        self.inv = 1.0 / np.append([bResolution], np.sqrt(np.maximum(1, eig)))

        self.setFittedNu(nuXY)
        _, self.rawW, self.rawT = np.cumsum([mu, self.fitNu, self.rawB])

        self.residualsBSLT = self.fit()
        self.chi2 = self.residualsBSLT.dot(self.residualsBSLT)
        _, self.fitW, self.fitT = np.cumsum([mu, self.fitNu, self.fitB])
开发者ID:zuranski,项目名称:supy,代码行数:26,代码来源:fitKinematic.py


示例15: tests

    def tests(self, distribution='exp', pdelete=0., independent=True, dither=0., tilewindow=1.0):
        
        assert distribution in ['exp', 'exponential', 'poisson', 'regular']
        samplerate = 0.1 # ms
        spikerate = 0.001 # firing rate
        nspikes = 100 # number of spikes to test
        if distribution in ['exp', 'exponential']:
            st1 = np.random.exponential(1./spikerate, nspikes)
            st1 = np.cumsum(st1)
        elif distribution == 'regular':
            st1 = np.linspace(int(10./samplerate),
                int(9000./samplerate), int(10./samplerate))
        elif distribution == 'poisson':
            st1 = np.random.poisson(1./spikerate, nspikes)
            st1 = np.cumsum(st1)
        
        if independent:
            st2 = np.random.exponential(1./spikerate, nspikes)
            st2 = np.cumsum(st1)
        else:
            st2 = st1
        st2 = np.random.choice(st2,
                    int((1.0-pdelete)*st1.shape[0]), replace=False)
        if dither > 0:
            st2 = st2 + np.random.randn(len(st2))*dither
#        print('len st1, st2: ', len(st1), len(st2), np.max(st1), np.max(st2))
        self.set_spikes(samplerate, st1, st2, tilewindow=tilewindow)
        sttc = self.calc_sttc()
        print('# of spikes in spike train 1: {0:d}, in spike train 2: {1:d} '.format(st1.shape[0], st2.shape[0]))
        print('STTC value: {0:.3f} '.format(sttc))
        self.plot_sttc(st1, st2)
开发者ID:pbmanis,项目名称:VCNModel,代码行数:31,代码来源:sttc.py


示例16: rvs

    def rvs(self, n=1):
        """Generate random samples from the model.

        Parameters
        ----------
        n : int
            Number of samples to generate.

        Returns
        -------
        obs : array_like, length `n`
            List of samples
        """

        startprob_pdf = self.startprob
        startprob_cdf = np.cumsum(startprob_pdf)
        transmat_pdf = self.transmat
        transmat_cdf = np.cumsum(transmat_pdf, 1);

        # Initial state.
        rand = np.random.rand()
        currstate = (startprob_cdf > rand).argmax()
        obs = [self._generate_sample_from_state(currstate)]

        for x in xrange(n-1):
            rand = np.random.rand()
            currstate = (transmat_cdf[currstate] > rand).argmax()
            obs.append(self._generate_sample_from_state(currstate))

        return np.array(obs)
开发者ID:timwee,项目名称:scikit-learn,代码行数:30,代码来源:hmm.py


示例17: __init__

    def __init__(self, lc, voltage, t_tot, t_anchoring, pretilt=0,
                 totaltwist=0, nlayers=100, data_file=None):

        self.lc = lc
        self.t_tot = t_tot
        self.t_anchoring = t_anchoring
        self.pretilt = pretilt
        self.totaltwist = totaltwist
        self.nlayers = nlayers
        self.data_file = data_file
        # thicknesses of internal layers
        tlc_internal = (self.t_tot - 2. * self.t_anchoring) / \
            (self.nlayers - 2.) * numpy.ones(self.nlayers - 2)
        # thicknesses of layers
        self.tlc = numpy.r_[self.t_anchoring, tlc_internal, self.t_anchoring]
        # internal sample points
        lhs = numpy.r_[0, numpy.cumsum(tlc_internal)]
        # normalized sample points: at the center of internal layers, plus the
        # boundaries (i.e. the anchoring layers)
        self.normalized_sample_points = numpy.r_[
            0, (lhs[1:] + lhs[:-1]) / 2. / (self.t_tot - 2 * self.t_anchoring),
            1]
        tmp = numpy.r_[0, numpy.cumsum(self.tlc)]
        self.sample_points = .5 * (tmp[1:] + tmp[:-1])
        # finally, apply voltage
        self.voltage = voltage
开发者ID:demisjohn,项目名称:EMpy,代码行数:26,代码来源:utils.py


示例18: traj_ss

def traj_ss(lon1, lat1, lon2, lat2):
    '''
    Trajectory skill score, from Liu and Weisberg, 2011
    '''

    # distance between drifters in time
    dist = get_dist(lon1, lon2, lat1, lat2) # in time

    # distance along path for control case, which is taken as lon1, lat1
    # first cumsum is to make length distance traveled up to that index
    length = np.cumsum(get_dist(lon1[:,:-1], lon1[:,1:], lat1[:,:-1], lat1[:,1:]), axis=1)

    # calculate s using cumulative sums
    # the first entry in time would be divided by zero, so this starts at the 2nd step
    # second cumsum is to sum up distances traveled
    s = np.cumsum(dist[:,1:], axis=1)/np.cumsum(length, axis=1)    

    # # pdb.set_trace()
    # # calculate skill score based on n=1
    # ind = (s>1)
    # ss = 1-s
    # ss[ind] = 0.

    # Return s instead of skill score so n parameter can be different
    return s
开发者ID:dcherian,项目名称:tracpy,代码行数:25,代码来源:calcs.py


示例19: kittler

def kittler(in_arr):
    """
    The reimplementation of Kittler-Illingworth Thresholding algorithm:
    https://www.mathworks.com/matlabcentral/fileexchange/45685
    Paper: [Kittler and Illingworth 1986] Minimum error thresholding.

    Args:
        in_arr(numpy.ndarray): Input 8-bits array.
    Returns:
        t(int): Calculated threshold.
    """
    h, g = np.histogram(in_arr.ravel(), 256, [0, 256])
    h = h.astype(np.float)
    g = g.astype(np.float)
    g = g[:-1]

    c = np.cumsum(h)
    m = np.cumsum(h * g)
    s = np.cumsum(h * g**2)
    sigma_f = np.sqrt(s/c - (m/c)**2)

    cb = c[-1] - c
    mb = m[-1] - m
    sb = s[-1] - s
    sigma_b = np.sqrt(sb/cb - (mb/cb)**2)

    p = c / c[-1]
    v = p * np.log(sigma_f) + (1-p)*np.log(sigma_b) - p*np.log(p) - \
        (1-p)*np.log(1-p)
    v[~np.isfinite(v)] = np.inf
    idx = np.argmin(v)
    t = g[idx]

    return t
开发者ID:h4k1m0u,项目名称:scikit-image-clustering-scripts,代码行数:34,代码来源:minimium_error_thresholding.py


示例20: run

def run(pars):

    verbose = pars.get('verbose', False)
    data    = pars.get('data')['samples']

    t = np.round(pars.get('target', 5)) # target sample size per option
    s = pars.get('s', 1.)               # continue scale factor

    counts_A = np.cumsum((data==0))
    counts_B = np.cumsum((data==1))

    p_sample_A = 1. / (1. + np.exp((counts_A + 1 - t) * s))
    p_sample_B = 1. / (1. + np.exp((counts_B + 1 - t) * s))

    p_sample_A = p_sample_A * (data==0) + (1 - p_sample_B) * p_sample_A * (data==1)
    p_sample_B = p_sample_B * (data==1) + (1 - p_sample_A) * p_sample_B * (data==0)

    p_sample_A = np.concatenate(([0.5], p_sample_A))
    p_sample_B = np.concatenate(([0.5], p_sample_B))

    p_stop = 1 - (p_sample_A + p_sample_B)

    return {'p_stop': p_stop,
            'p_sample_A': p_sample_A,
            'p_sample_B': p_sample_B}
开发者ID:dmarkant,项目名称:SamplingDynamics,代码行数:25,代码来源:model_fixedperoption.py



注:本文中的numpy.cumsum函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python numpy.datetime64函数代码示例发布时间:2022-05-27
下一篇:
Python numpy.cumprod函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap