• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python python.range函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中statsmodels.compat.python.range函数的典型用法代码示例。如果您正苦于以下问题:Python range函数的具体用法?Python range怎么用?Python range使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了range函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: generate_ordinal

def generate_ordinal():

    ## Regression coefficients
    beta = np.zeros(5, dtype=np.float64)
    beta[2] = 1
    beta[4] = -1

    rz = 0.5

    OUT = open("gee_ordinal_1.csv", "w")

    for i in range(200):

        n = np.random.randint(3, 6) # Cluster size

        x = np.random.normal(size=(n,5))
        for j in range(5):
            x[:,j] += np.random.normal()
        pr = np.dot(x, beta)
        pr = np.array([1,0,-0.5]) + pr[:,None]
        pr = 1 / (1 + np.exp(-pr))

        z = rz*np.random.normal() +\
            np.sqrt(1-rz**2)*np.random.normal(size=n)
        u = norm.cdf(z)

        y = (u[:,None] > pr).sum(1)

        for j in range(n):
            OUT.write("%d,%d," % (i, y[j]))
            OUT.write(",".join(["%.3f" % b for b in x[j,:]]) + "\n")

    OUT.close()
开发者ID:0ceangypsy,项目名称:statsmodels,代码行数:33,代码来源:gee_generate_tests.py


示例2: levinson_durbin_nitime

def levinson_durbin_nitime(s, order=10, isacov=False):
    '''Levinson-Durbin recursion for autoregressive processes

    '''
    #from nitime

##    if sxx is not None and type(sxx) == np.ndarray:
##        sxx_m = sxx[:order+1]
##    else:
##        sxx_m = ut.autocov(s)[:order+1]
    if isacov:
        sxx_m = s
    else:
        sxx_m = acovf(s)[:order+1]  #not tested

    phi = np.zeros((order+1, order+1), 'd')
    sig = np.zeros(order+1)
    # initial points for the recursion
    phi[1,1] = sxx_m[1]/sxx_m[0]
    sig[1] = sxx_m[0] - phi[1,1]*sxx_m[1]
    for k in range(2,order+1):
        phi[k,k] = (sxx_m[k]-np.dot(phi[1:k,k-1], sxx_m[1:k][::-1]))/sig[k-1]
        for j in range(1,k):
            phi[j,k] = phi[j,k-1] - phi[k,k]*phi[k-j,k-1]
        sig[k] = sig[k-1]*(1 - phi[k,k]**2)

    sigma_v = sig[-1]; arcoefs = phi[1:,-1]
    return sigma_v, arcoefs, pacf, phi  #return everything
开发者ID:0ceangypsy,项目名称:statsmodels,代码行数:28,代码来源:try_ld_nitime.py


示例3: dataset

    def dataset(self, as_dict=False):
        """
        Returns a Python generator object for iterating over the dataset.


        Parameters
        ----------
        as_dict : bool, optional
            If as_dict is True, yield each row of observations as a dict.
            If False, yields each row of observations as a list.

        Returns
        -------
        Generator object for iterating over the dataset.  Yields each row of
        observations as a list by default.

        Notes
        -----
        If missing_values is True during instantiation of StataReader then
        observations with _StataMissingValue(s) are not filtered and should
        be handled by your applcation.
        """

        try:
            self._file.seek(self._data_location)
        except Exception:
            pass

        if as_dict:
            vars = lmap(str, self.variables())
            for i in range(len(self)):
                yield dict(zip(vars, self._next()))
        else:
            for i in range(self._header['nobs']):
                yield self._next()
开发者ID:statsmodels,项目名称:statsmodels,代码行数:35,代码来源:foreign.py


示例4: prob_quantize_cdf_old

def prob_quantize_cdf_old(binsx, binsy, cdf):
    '''quantize a continuous distribution given by a cdf

    old version without precomputing cdf values

    Parameters
    ----------
    binsx : array_like, 1d
        binedges

    '''
    binsx = np.asarray(binsx)
    binsy = np.asarray(binsy)
    nx = len(binsx) - 1
    ny = len(binsy) - 1
    probs = np.nan * np.ones((nx, ny)) #np.empty(nx,ny)
    for xind in range(1, nx+1):
        for yind in range(1, ny+1):
            upper = (binsx[xind], binsy[yind])
            lower = (binsx[xind-1], binsy[yind-1])
            #print upper,lower,
            probs[xind-1,yind-1] = prob_bv_rectangle(lower, upper, cdf)

    assert not np.isnan(probs).any()
    return probs
开发者ID:ChadFulton,项目名称:statsmodels,代码行数:25,代码来源:quantize.py


示例5: prob_quantize_cdf

def prob_quantize_cdf(binsx, binsy, cdf):
    '''quantize a continuous distribution given by a cdf

    Parameters
    ----------
    binsx : array_like, 1d
        binedges

    '''
    binsx = np.asarray(binsx)
    binsy = np.asarray(binsy)
    nx = len(binsx) - 1
    ny = len(binsy) - 1
    probs = np.nan * np.ones((nx, ny)) #np.empty(nx,ny)
    cdf_values = cdf(binsx[:,None], binsy)
    cdf_func = lambda x, y: cdf_values[x,y]
    for xind in range(1, nx+1):
        for yind in range(1, ny+1):
            upper = (xind, yind)
            lower = (xind-1, yind-1)
            #print upper,lower,
            probs[xind-1,yind-1] = prob_bv_rectangle(lower, upper, cdf_func)

    assert not np.isnan(probs).any()
    return probs
开发者ID:ChadFulton,项目名称:statsmodels,代码行数:25,代码来源:quantize.py


示例6: prob_mv_grid

def prob_mv_grid(bins, cdf, axis=-1):
    '''helper function for probability of a rectangle grid in a multivariate distribution

    how does this generalize to more than 2 variates ?

    bins : tuple
        tuple of bin edges, currently it is assumed that they broadcast
        correctly

    '''
    if not isinstance(bins, np.ndarray):
        bins = lmap(np.asarray, bins)
        n_dim = len(bins)
        bins_ = []
        #broadcast if binedges are 1d
        if all(lmap(np.ndim, bins) == np.ones(n_dim)):
            for d in range(n_dim):
                sl = [None]*n_dim
                sl[d] = slice(None)
                bins_.append(bins[d][sl])
    else: #assume it is already correctly broadcasted
        n_dim = bins.shape[0]
        bins_ = bins

    print(len(bins))
    cdf_values = cdf(bins_)
    probs = cdf_values.copy()
    for d in range(n_dim):
        probs = np.diff(probs, axis=d)

    return probs
开发者ID:ChadFulton,项目名称:statsmodels,代码行数:31,代码来源:quantize.py


示例7: _prepare_structured_array

    def _prepare_structured_array(self, data):
        self.nobs = len(data)
        self.nvar = len(data.dtype)
        self.data = data
        self.datarows = iter(data)
        dtype = data.dtype
        descr = dtype.descr
        if dtype.names is None:
            varlist = _default_names(self.nvar)
        else:
            varlist = dtype.names

        # check for datetime and change the type
        convert_dates = self._convert_dates
        if convert_dates is not None:
            convert_dates = _maybe_convert_to_int_keys(convert_dates,
                                                      varlist)
            self._convert_dates = convert_dates
            for key in convert_dates:
                descr[key] = (
                        descr[key][0],
                        _convert_datetime_to_stata_type(convert_dates[key])
                                )
            dtype = np.dtype(descr)

        self.varlist = varlist
        self.typlist = [_dtype_to_stata_type(dtype[i])
                        for i in range(self.nvar)]
        self.fmtlist = [_dtype_to_default_stata_fmt(dtype[i])
                        for i in range(self.nvar)]
        # set the given format for the datetime cols
        if convert_dates is not None:
            for key in convert_dates:
                self.fmtlist[key] = convert_dates[key]
开发者ID:statsmodels,项目名称:statsmodels,代码行数:34,代码来源:foreign.py


示例8: test_generate_sample

    def test_generate_sample(self):
        process = ArmaProcess.from_coeffs([0.9])
        np.random.seed(12345)
        sample = process.generate_sample()
        np.random.seed(12345)
        expected = np.random.randn(100)
        for i in range(1, 100):
            expected[i] = 0.9 * expected[i - 1] + expected[i]
        assert_almost_equal(sample, expected)

        process = ArmaProcess.from_coeffs([1.6, -0.9])
        np.random.seed(12345)
        sample = process.generate_sample()
        np.random.seed(12345)
        expected = np.random.randn(100)
        expected[1] = 1.6 * expected[0] + expected[1]
        for i in range(2, 100):
            expected[i] = 1.6 * expected[i - 1] - 0.9 * expected[i - 2] + expected[i]
        assert_almost_equal(sample, expected)

        process = ArmaProcess.from_coeffs([1.6, -0.9])
        np.random.seed(12345)
        sample = process.generate_sample(burnin=100)
        np.random.seed(12345)
        expected = np.random.randn(200)
        expected[1] = 1.6 * expected[0] + expected[1]
        for i in range(2, 200):
            expected[i] = 1.6 * expected[i - 1] - 0.9 * expected[i - 2] + expected[i]
        assert_almost_equal(sample, expected[100:])


        np.random.seed(12345)
        sample = process.generate_sample(nsample=(100,5))
        assert_equal(sample.shape, (100,5))
开发者ID:cong1989,项目名称:statsmodels,代码行数:34,代码来源:test_arima_process.py


示例9: generate_poisson

def generate_poisson():

    ## Regression coefficients
    beta = np.zeros(5, dtype=np.float64)
    beta[2] = 0.5
    beta[4] = -0.5

    nclust = 100

    rz = 0.5

    OUT = open("gee_poisson_1.csv", "w")

    for i in range(nclust):

        n = np.random.randint(3, 6) # Cluster size

        x = np.random.normal(size=(n,5))
        for j in range(5):
            x[:,j] += np.random.normal()
        lp = np.dot(x, beta)
        E = np.exp(lp)
        y = [np.random.poisson(e) for e in E]
        y = np.array(y)

        for j in range(n):
            OUT.write("%d,%d," % (i, y[j]))
            OUT.write(",".join(["%.3f" % b for b in x[j,:]]) + "\n")

    OUT.close()
开发者ID:0ceangypsy,项目名称:statsmodels,代码行数:30,代码来源:gee_generate_tests.py


示例10: test_ftest_pvalues

    def test_ftest_pvalues(self):
        res = self.results
        use_t = res.use_t
        k_vars = len(res.params)
        # check default use_t
        pvals = [res.wald_test(np.eye(k_vars)[k], use_f=use_t).pvalue
                                                   for k in range(k_vars)]
        assert_allclose(pvals, res.pvalues, rtol=5e-10, atol=1e-25)

        # sutomatic use_f based on results class use_t
        pvals = [res.wald_test(np.eye(k_vars)[k]).pvalue
                                                   for k in range(k_vars)]
        assert_allclose(pvals, res.pvalues, rtol=5e-10, atol=1e-25)

        # label for pvalues in summary
        string_use_t = 'P>|z|' if use_t is False else 'P>|t|'
        summ = str(res.summary())
        assert_(string_use_t in summ)

        # try except for models that don't have summary2
        try:
            summ2 = str(res.summary2())
        except AttributeError:
            summ2 = None
        if summ2 is not None:
            assert_(string_use_t in summ2)
开发者ID:haribharadwaj,项目名称:statsmodels,代码行数:26,代码来源:test_generic_methods.py


示例11: _eigval_decomp_SZ

    def _eigval_decomp_SZ(self, irf_resim):
        """
        Returns
        -------
        W: array of eigenvectors
        eigva: list of eigenvalues
        k: matrix indicating column # of largest eigenvalue for each c_i,j

        """
        neqs = self.neqs
        periods = self.periods

        cov_hold = np.zeros((neqs, neqs, periods, periods))
        for i in range(neqs):
            for j in range(neqs):
                cov_hold[i,j,:,:] = np.cov(irf_resim[:,1:,i,j],rowvar=0)

        W = np.zeros((neqs, neqs, periods, periods))
        eigva = np.zeros((neqs, neqs, periods, 1))
        k = np.zeros((neqs, neqs))

        for i in range(neqs):
            for j in range(neqs):
                W[i,j,:,:], eigva[i,j,:,0], k[i,j] = util.eigval_decomp(cov_hold[i,j,:,:])
        return W, eigva, k
开发者ID:bashtage,项目名称:statsmodels,代码行数:25,代码来源:irf.py


示例12: __iter__

    def __iter__(self):
        n = self.n
        k = self.k
        start = self.start
        if self.return_slice:
            for i in range(start, n-k):
                train_slice = slice(None, i, None)
                if self.kall:
                    test_slice = slice(i, i+k)
                else:
                    test_slice = slice(i+k-1, i+k)
                yield train_slice, test_slice

        else: #for compatibility with other iterators
            for i in range(start, n-k):
                train_index  = np.zeros(n, dtype=np.bool)
                train_index[:i] = True
                test_index  = np.zeros(n, dtype=np.bool)
                if self.kall:
                    test_index[i:i+k] = True # np.logical_not(test_index)
                else:
                    test_index[i+k-1:i+k] = True
                #or faster to return np.arange(i,i+k) ?
                #returning slice should be faster in this case
                yield train_index, test_index
开发者ID:ChadFulton,项目名称:statsmodels,代码行数:25,代码来源:cross_val.py


示例13: approx_hess2

def approx_hess2(x, f, epsilon=None, args=(), kwargs={}, return_grad=False):
    #
    n = len(x)
    # NOTE: ridout suggesting using eps**(1/4)*theta
    h = _get_epsilon(x, 3, epsilon, n)
    ee = np.diag(h)
    f0 = f(*((x,)+args), **kwargs)
    # Compute forward step
    g = np.zeros(n)
    gg = np.zeros(n)
    for i in range(n):
        g[i] = f(*((x+ee[i, :],)+args), **kwargs)
        gg[i] = f(*((x-ee[i, :],)+args), **kwargs)

    hess = np.outer(h, h)  # this is now epsilon**2
    # Compute "double" forward step
    for i in range(n):
        for j in range(i, n):
            hess[i, j] = (f(*((x + ee[i, :] + ee[j, :],) + args), **kwargs) -
                          g[i] - g[j] + f0 +
                          f(*((x - ee[i, :] - ee[j, :],) + args), **kwargs) -
                          gg[i] - gg[j] + f0)/(2 * hess[i, j])
            hess[j, i] = hess[i, j]
    if return_grad:
        grad = (g - f0)/h
        return hess, grad
    else:
        return hess
开发者ID:haribharadwaj,项目名称:statsmodels,代码行数:28,代码来源:numdiff.py


示例14: initialize

    def initialize(self, model):

        super(GlobalOddsRatio, self).initialize(model)

        if self.model.weights is not None:
            warnings.warn("weights not implemented for GlobalOddsRatio "
                          "cov_struct, using unweighted covariance estimate",
                          NotImplementedWarning)

        # Need to restrict to between-subject pairs
        cpp = []
        for v in model.endog_li:

            # Number of subjects in this group
            m = int(len(v) / self._ncut)
            i1, i2 = np.tril_indices(m, -1)

            cpp1 = {}
            for k1 in range(self._ncut):
                for k2 in range(k1 + 1):
                    jj = np.zeros((len(i1), 2), dtype=np.int64)
                    jj[:, 0] = i1 * self._ncut + k1
                    jj[:, 1] = i2 * self._ncut + k2
                    cpp1[(k2, k1)] = jj

            cpp.append(cpp1)

        self.cpp = cpp

        # Initialize the dependence parameters
        self.crude_or = self.observed_crude_oddsratio()
        if self.model.update_dep:
            self.dep_params = self.crude_or
开发者ID:Bonfils-ebu,项目名称:statsmodels,代码行数:33,代码来源:cov_struct.py


示例15: make_lag_names

def make_lag_names(names, lag_order, trendorder=1, exog=None):
    """
    Produce list of lag-variable names. Constant / trends go at the beginning

    Examples
    --------
    >>> make_lag_names(['foo', 'bar'], 2, 1)
    ['const', 'L1.foo', 'L1.bar', 'L2.foo', 'L2.bar']

    """
    lag_names = []
    if isinstance(names, string_types):
        names = [names]

    # take care of lagged endogenous names
    for i in range(1, lag_order + 1):
        for name in names:
            if not isinstance(name, string_types):
                name = str(name) # will need consistent unicode handling
            lag_names.append('L'+str(i)+'.'+name)

    # handle the constant name
    if trendorder != 0:
        lag_names.insert(0, 'const')
    if trendorder > 1:
        lag_names.insert(1, 'trend')
    if trendorder > 2:
        lag_names.insert(2, 'trend**2')
    if exog is not None:
        for i in range(exog.shape[1]):
            lag_names.insert(trendorder + i, "exog" + str(i))
    return lag_names
开发者ID:statsmodels,项目名称:statsmodels,代码行数:32,代码来源:util.py


示例16: plot_full_acorr

def plot_full_acorr(acorr, fontsize=8, linewidth=8, xlabel=None,
                    err_bound=None):
    """

    Parameters
    ----------



    """
    import matplotlib.pyplot as plt

    config = MPLConfigurator()
    config.set_fontsize(fontsize)

    k = acorr.shape[1]
    fig, axes = plt.subplots(k, k, figsize=(10, 10), squeeze=False)

    for i in range(k):
        for j in range(k):
            ax = axes[i][j]
            acorr_plot(acorr[:, i, j], linewidth=linewidth,
                       xlabel=xlabel, ax=ax)

            if err_bound is not None:
                ax.axhline(err_bound, color='k', linestyle='--')
                ax.axhline(-err_bound, color='k', linestyle='--')

    adjust_subplots()
    config.revert()

    return fig
开发者ID:ChadFulton,项目名称:statsmodels,代码行数:32,代码来源:plotting.py


示例17: _band2array

def _band2array(a, lower=0, symmetric=False, hermitian=False):
    """
    Take an upper or lower triangular banded matrix and return a
    numpy array.

    INPUTS:
       a         -- a matrix in upper or lower triangular banded matrix
       lower     -- is the matrix upper or lower triangular?
       symmetric -- if True, return the original result plus its transpose
       hermitian -- if True (and symmetric False), return the original
                    result plus its conjugate transposed

    """

    n = a.shape[1]
    r = a.shape[0]
    _a = 0

    if not lower:
        for j in range(r):
            _b = np.diag(a[r-1-j],k=j)[j:(n+j),j:(n+j)]
            _a += _b
            if symmetric and j > 0: _a += _b.T
            elif hermitian and j > 0: _a += _b.conjugate().T
    else:
        for j in range(r):
            _b = np.diag(a[j],k=j)[0:n,0:n]
            _a += _b
            if symmetric and j > 0: _a += _b.T
            elif hermitian and j > 0: _a += _b.conjugate().T
        _a = _a.T

    return _a
开发者ID:0ceangypsy,项目名称:statsmodels,代码行数:33,代码来源:bspline.py


示例18: varsim

def varsim(coefs, intercept, sig_u, steps=100, initvalues=None, seed=None):
    """
    Simulate VAR(p) process, given coefficients and assuming Gaussian noise

    Parameters
    ----------
    coefs : ndarray
        Coefficients for the VAR lags of endog.
    intercept : None or ndarray 1-D (neqs,) or (steps, neqs)
        This can be either the intercept for each equation or an offset.
        If None, then the VAR process has a zero intercept.
        If intercept is 1-D, then the same (endog specific) intercept is added
        to all observations.
        If intercept is 2-D, then it is treated as an offset and is added as
        an observation specific intercept to the autoregression. In this case,
        the intercept/offset should have same number of rows as steps, and the
        same number of columns as endogenous variables (neqs).
    sig_u : ndarray
        Covariance matrix of the residuals or innovations.
        If sig_u is None, then an identity matrix is used.
    steps : None or int
        number of observations to simulate, this includes the initial
        observations to start the autoregressive process.
        If offset is not None, then exog of the model are used if they were
        provided in the model
    seed : None or integer
        If seed is not None, then it will be used with for the random
        variables generated by numpy.random.

    Returns
    -------
    endog_simulated : nd_array
        Endog of the simulated VAR process

    """
    rs = np.random.RandomState(seed=seed)
    rmvnorm = rs.multivariate_normal
    p, k, k = coefs.shape
    if sig_u is None:
        sig_u = np.eye(k)
    ugen = rmvnorm(np.zeros(len(sig_u)), sig_u, steps)
    result = np.zeros((steps, k))
    if intercept is not None:
        # intercept can be 2-D like an offset variable
        if np.ndim(intercept) > 1:
            if not len(intercept) == len(ugen):
                raise ValueError('2-D intercept needs to have length `steps`')
        # add intercept/offset also to intial values
        result += intercept
        result[p:] += ugen[p:]
    else:
        result[p:] = ugen[p:]

    # add in AR terms
    for t in range(p, steps):
        ygen = result[t]
        for j in range(p):
            ygen += np.dot(coefs[j], result[t-j-1])

    return result
开发者ID:statsmodels,项目名称:statsmodels,代码行数:60,代码来源:util.py


示例19: product_func

                    def product_func(value, d1=d1, d2=d2):

                        out = []
                        for r in range(d1):
                            for s in range(d2):
                                out.append(value[r] * value[d1+s])
                        return np.array(out)
开发者ID:statsmodels,项目名称:statsmodels,代码行数:7,代码来源:formula.py


示例20: get_columns

    def get_columns(self, *args, **kw):
        """
        Calling function for factor instance.
        """

        v = self.namespace[self._name]
        while True:
            if callable(v):
                if isinstance(v, (Term, Formula)):
                    v = copy.copy(v)
                    v.namespace = self.namespace
                v = v(*args, **kw)
            else: break

        n = len(v)

        if self.ordinal:
            col = [float(self.keys.index(v[i])) for i in range(n)]
            return np.array(col)

        else:
            value = []
            for key in self.keys:
                col = [float((v[i] == key)) for i in range(n)]
                value.append(col)
            return np.array(value)
开发者ID:statsmodels,项目名称:statsmodels,代码行数:26,代码来源:formula.py



注:本文中的statsmodels.compat.python.range函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python python.zip函数代码示例发布时间:2022-05-27
下一篇:
Python python.lzip函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap