本文整理汇总了Python中statsmodels.compat.python.lrange函数的典型用法代码示例。如果您正苦于以下问题:Python lrange函数的具体用法?Python lrange怎么用?Python lrange使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了lrange函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_wls_example
def test_wls_example():
#example from the docstring, there was a note about a bug, should
#be fixed now
Y = [1,3,4,5,2,3,4]
X = lrange(1,8)
X = add_constant(X, prepend=False)
wls_model = WLS(Y,X, weights=lrange(1,8)).fit()
#taken from R lm.summary
assert_almost_equal(wls_model.fvalue, 0.127337843215, 6)
assert_almost_equal(wls_model.scale, 2.44608530786**2, 6)
开发者ID:NanoResearch,项目名称:statsmodels,代码行数:10,代码来源:test_regression.py
示例2: test_arma_order_select_ic
def test_arma_order_select_ic():
# smoke test, assumes info-criteria are right
from statsmodels.tsa.arima_process import arma_generate_sample
arparams = np.array([.75, -.25])
maparams = np.array([.65, .35])
arparams = np.r_[1, -arparams]
maparam = np.r_[1, maparams]
nobs = 250
np.random.seed(2014)
y = arma_generate_sample(arparams, maparams, nobs)
res = arma_order_select_ic(y, ic=['aic', 'bic'], trend='nc')
# regression tests in case we change algorithm to minic in sas
aic_x = np.array([[ np.nan, 552.7342255 , 484.29687843],
[ 562.10924262, 485.5197969 , 480.32858497],
[ 507.04581344, 482.91065829, 481.91926034],
[ 484.03995962, 482.14868032, 483.86378955],
[ 481.8849479 , 483.8377379 , 485.83756612]])
bic_x = np.array([[ np.nan, 559.77714733, 494.86126118],
[ 569.15216446, 496.08417966, 494.41442864],
[ 517.61019619, 496.99650196, 499.52656493],
[ 498.12580329, 499.75598491, 504.99255506],
[ 499.49225249, 504.96650341, 510.48779255]])
aic = DataFrame(aic_x, index=lrange(5), columns=lrange(3))
bic = DataFrame(bic_x, index=lrange(5), columns=lrange(3))
assert_almost_equal(res.aic.values, aic.values, 5)
assert_almost_equal(res.bic.values, bic.values, 5)
assert_equal(res.aic_min_order, (1, 2))
assert_equal(res.bic_min_order, (1, 2))
assert_(res.aic.index.equals(aic.index))
assert_(res.aic.columns.equals(aic.columns))
assert_(res.bic.index.equals(bic.index))
assert_(res.bic.columns.equals(bic.columns))
index = pd.date_range('2000-1-1', freq='M', periods=len(y))
y_series = pd.Series(y, index=index)
res_pd = arma_order_select_ic(y_series, max_ar=2, max_ma=1,
ic=['aic', 'bic'], trend='nc')
assert_almost_equal(res_pd.aic.values, aic.values[:3, :2], 5)
assert_almost_equal(res_pd.bic.values, bic.values[:3, :2], 5)
assert_equal(res_pd.aic_min_order, (2, 1))
assert_equal(res_pd.bic_min_order, (1, 1))
res = arma_order_select_ic(y, ic='aic', trend='nc')
assert_almost_equal(res.aic.values, aic.values, 5)
assert_(res.aic.index.equals(aic.index))
assert_(res.aic.columns.equals(aic.columns))
assert_equal(res.aic_min_order, (1, 2))
开发者ID:haribharadwaj,项目名称:statsmodels,代码行数:48,代码来源:test_stattools.py
示例3: print_ic_table
def print_ic_table(ics, selected_orders):
"""
For VAR order selection
"""
# Can factor this out into a utility method if so desired
cols = sorted(ics)
data = mat([["%#10.4g" % v for v in ics[c]] for c in cols],
dtype=object).T
# start minimums
for i, col in enumerate(cols):
idx = int(selected_orders[col]), i
data[idx] = data[idx] + '*'
# data[idx] = data[idx][:-1] + '*' # super hack, ugh
fmt = dict(_default_table_fmt,
data_fmts=("%s",) * len(cols))
buf = StringIO()
table = SimpleTable(data, cols, lrange(len(data)),
title='VAR Order Selection', txt_fmt=fmt)
buf.write(str(table) + '\n')
buf.write('* Minimum' + '\n')
print(buf.getvalue())
开发者ID:0ceangypsy,项目名称:statsmodels,代码行数:28,代码来源:output.py
示例4: _plot_leverage_resid2
def _plot_leverage_resid2(results, influence, alpha=.05, ax=None,
**kwargs):
from scipy.stats import zscore, norm
fig, ax = utils.create_mpl_ax(ax)
infl = influence
leverage = infl.hat_matrix_diag
resid = zscore(infl.resid)
ax.plot(resid**2, leverage, 'o', **kwargs)
ax.set_xlabel("Normalized residuals**2")
ax.set_ylabel("Leverage")
ax.set_title("Leverage vs. Normalized residuals squared")
large_leverage = leverage > _high_leverage(results)
#norm or t here if standardized?
cutoff = norm.ppf(1.-alpha/2)
large_resid = np.abs(resid) > cutoff
labels = results.model.data.row_labels
if labels is None:
labels = lrange(int(results.nobs))
index = np.where(np.logical_or(large_leverage, large_resid))[0]
ax = utils.annotate_axes(index, labels, lzip(resid**2, leverage),
[(0, 5)]*int(results.nobs), "large",
ax=ax, ha="center", va="bottom")
ax.margins(.075, .075)
return fig
开发者ID:ChadFulton,项目名称:statsmodels,代码行数:27,代码来源:regressionplots.py
示例5: _make_predict_dates
def _make_predict_dates(self):
data = self.data
dtstart = data.predict_start
dtend = data.predict_end
freq = data.freq
if freq is not None:
pandas_freq = _freq_to_pandas[freq]
try:
from pandas import DatetimeIndex
dates = DatetimeIndex(start=dtstart, end=dtend,
freq=pandas_freq)
except ImportError as err:
from pandas import DateRange
dates = DateRange(dtstart, dtend, offset = pandas_freq).values
# handle
elif freq is None and (isinstance(dtstart, int) and
isinstance(dtend, int)):
from pandas import Index
dates = Index(lrange(dtstart, dtend+1))
# if freq is None and dtstart and dtend aren't integers, we're
# in sample
else:
dates = self.data.dates
start = self._get_dates_loc(dates, dtstart)
end = self._get_dates_loc(dates, dtend)
dates = dates[start:end+1] # is this index inclusive?
self.data.predict_dates = dates
开发者ID:5267,项目名称:statsmodels,代码行数:28,代码来源:tsa_model.py
示例6: maineffect_func
def maineffect_func(value, reference=reference):
rvalue = []
keep = lrange(value.shape[0])
keep.pop(reference)
for i in range(len(keep)):
rvalue.append(value[keep[i]] - value[reference])
return np.array(rvalue)
开发者ID:statsmodels,项目名称:statsmodels,代码行数:7,代码来源:formula.py
示例7: _make_predict_dates
def _make_predict_dates(self):
data = self.data
dtstart = data.predict_start
dtend = data.predict_end
freq = data.freq
if freq is not None:
pandas_freq = _freq_to_pandas[freq]
# preserve PeriodIndex or DatetimeIndex
dates = self.data.dates.__class__(start=dtstart,
end=dtend,
freq=pandas_freq)
# handle
elif freq is None and (isinstance(dtstart, (int, long)) and
isinstance(dtend, (int, long))):
from pandas import Index
dates = Index(lrange(dtstart, dtend+1))
# if freq is None and dtstart and dtend aren't integers, we're
# in sample
else:
dates = self.data.dates
start = self._get_dates_loc(dates, dtstart)
end = self._get_dates_loc(dates, dtend)
dates = dates[start:end+1] # is this index inclusive?
self.data.predict_dates = dates
开发者ID:Inoryy,项目名称:statsmodels,代码行数:25,代码来源:tsa_model.py
示例8: test_pickle
def test_pickle():
import tempfile
from numpy.testing import assert_equal
tmpdir = tempfile.mkdtemp(prefix='pickle')
a = lrange(10)
save_pickle(a, tmpdir+'/res.pkl')
b = load_pickle(tmpdir+'/res.pkl')
assert_equal(a, b)
#cleanup, tested on Windows
try:
import os
os.remove(tmpdir+'/res.pkl')
os.rmdir(tmpdir)
except (OSError, IOError):
pass
assert not os.path.exists(tmpdir)
#test with file handle
fh = BytesIO()
save_pickle(a, fh)
fh.seek(0,0)
c = load_pickle(fh)
fh.close()
assert_equal(a,b)
开发者ID:0ceangypsy,项目名称:statsmodels,代码行数:25,代码来源:test_pickle.py
示例9: plot_with_error
def plot_with_error(y, error, x=None, axes=None, value_fmt='k',
error_fmt='k--', alpha=0.05, stderr_type = 'asym'):
"""
Make plot with optional error bars
Parameters
----------
y :
error : array or None
"""
import matplotlib.pyplot as plt
if axes is None:
axes = plt.gca()
x = x if x is not None else lrange(len(y))
plot_action = lambda y, fmt: axes.plot(x, y, fmt)
plot_action(y, value_fmt)
#changed this
if error is not None:
if stderr_type == 'asym':
q = util.norm_signif_level(alpha)
plot_action(y - q * error, error_fmt)
plot_action(y + q * error, error_fmt)
if stderr_type in ('mc','sz1','sz2','sz3'):
plot_action(error[0], error_fmt)
plot_action(error[1], error_fmt)
开发者ID:ChadFulton,项目名称:statsmodels,代码行数:29,代码来源:plotting.py
示例10: irf_grid_plot
def irf_grid_plot(values, stderr, impcol, rescol, names, title,
signif=0.05, hlines=None, subplot_params=None,
plot_params=None, figsize=(10,10), stderr_type='asym'):
"""
Reusable function to make flexible grid plots of impulse responses and
comulative effects
values : (T + 1) x k x k
stderr : T x k x k
hlines : k x k
"""
import matplotlib.pyplot as plt
if subplot_params is None:
subplot_params = {}
if plot_params is None:
plot_params = {}
nrows, ncols, to_plot = _get_irf_plot_config(names, impcol, rescol)
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, sharex=True,
squeeze=False, figsize=figsize)
# fill out space
adjust_subplots()
fig.suptitle(title, fontsize=14)
subtitle_temp = r'%s$\rightarrow$%s'
k = len(names)
rng = lrange(len(values))
for (j, i, ai, aj) in to_plot:
ax = axes[ai][aj]
# HACK?
if stderr is not None:
if stderr_type == 'asym':
sig = np.sqrt(stderr[:, j * k + i, j * k + i])
plot_with_error(values[:, i, j], sig, x=rng, axes=ax,
alpha=signif, value_fmt='b', stderr_type=stderr_type)
if stderr_type in ('mc','sz1','sz2','sz3'):
errs = stderr[0][:, i, j], stderr[1][:, i, j]
plot_with_error(values[:, i, j], errs, x=rng, axes=ax,
alpha=signif, value_fmt='b', stderr_type=stderr_type)
else:
plot_with_error(values[:, i, j], None, x=rng, axes=ax,
value_fmt='b')
ax.axhline(0, color='k')
if hlines is not None:
ax.axhline(hlines[i,j], color='k')
sz = subplot_params.get('fontsize', 12)
ax.set_title(subtitle_temp % (names[j], names[i]), fontsize=sz)
return fig
开发者ID:ChadFulton,项目名称:statsmodels,代码行数:59,代码来源:plotting.py
示例11: _maybe_reset_index
def _maybe_reset_index(data):
"""
All the Rdatasets have the integer row.labels from R if there is no
real index. Strip this for a zero-based index
"""
if data.index.equals(Index(lrange(1, len(data) + 1))):
data = data.reset_index(drop=True)
return data
开发者ID:BranYang,项目名称:statsmodels,代码行数:8,代码来源:utils.py
示例12: variables
def variables(self):
"""
Returns a list of the dataset's StataVariables objects.
"""
return lmap(_StataVariable, zip(lrange(self._header['nvar']),
self._header['typlist'], self._header['varlist'],
self._header['srtlist'],
self._header['fmtlist'], self._header['lbllist'],
self._header['vlblist']))
开发者ID:statsmodels,项目名称:statsmodels,代码行数:9,代码来源:foreign.py
示例13: __iter__
def __iter__(self):
n = self.n
p = self.p
comb = combinations(lrange(n), p)
for idx in comb:
test_index = np.zeros(n, dtype=np.bool)
test_index[np.array(idx)] = True
train_index = np.logical_not(test_index)
yield train_index, test_index
开发者ID:ChadFulton,项目名称:statsmodels,代码行数:9,代码来源:cross_val.py
示例14: test__reduce_dict
def test__reduce_dict():
data = OrderedDict(zip(list(product('mf', 'oy', 'wn')), [1] * 8))
eq(_reduce_dict(data, ('m',)), 4)
eq(_reduce_dict(data, ('m', 'o')), 2)
eq(_reduce_dict(data, ('m', 'o', 'w')), 1)
data = OrderedDict(zip(list(product('mf', 'oy', 'wn')), lrange(8)))
eq(_reduce_dict(data, ('m',)), 6)
eq(_reduce_dict(data, ('m', 'o')), 1)
eq(_reduce_dict(data, ('m', 'o', 'w')), 0)
开发者ID:cong1989,项目名称:statsmodels,代码行数:9,代码来源:test_mosaicplot.py
示例15: date_range_str
def date_range_str(start, end=None, length=None):
"""
Returns a list of abbreviated date strings.
Parameters
----------
start : str
The first abbreviated date, for instance, '1965q1' or '1965m1'
end : str, optional
The last abbreviated date if length is None.
length : int, optional
The length of the returned array of end is None.
Returns
-------
date_range : list
List of strings
"""
flags = re.IGNORECASE | re.VERBOSE
#_check_range_inputs(end, length, freq)
start = start.lower()
if re.search(_m_pattern, start, flags):
annual_freq = 12
split = 'm'
elif re.search(_q_pattern, start, flags):
annual_freq = 4
split = 'q'
elif re.search(_y_pattern, start, flags):
annual_freq = 1
start += 'a1' # hack
if end:
end += 'a1'
split = 'a'
else:
raise ValueError("Date %s not understood" % start)
yr1, offset1 = lmap(int, start.replace(":","").split(split))
if end is not None:
end = end.lower()
yr2, offset2 = lmap(int, end.replace(":","").split(split))
length = (yr2 - yr1) * annual_freq + offset2
elif length:
yr2 = yr1 + length // annual_freq
offset2 = length % annual_freq + (offset1 - 1)
years = np.repeat(lrange(yr1+1, yr2), annual_freq).tolist()
years = np.r_[[str(yr1)]*(annual_freq+1-offset1), years] # tack on first year
years = np.r_[years, [str(yr2)]*offset2] # tack on last year
if split != 'a':
offset = np.tile(np.arange(1, annual_freq+1), yr2-yr1-1)
offset = np.r_[np.arange(offset1, annual_freq+1).astype('a2'), offset]
offset = np.r_[offset, np.arange(1,offset2+1).astype('a2')]
date_arr_range = [''.join([i, split, asstr(j)]) for i,j in
zip(years, offset)]
else:
date_arr_range = years.tolist()
return date_arr_range
开发者ID:Inoryy,项目名称:statsmodels,代码行数:55,代码来源:datetools.py
示例16: interactions
def interactions(terms, order=[1,2]):
"""
Output all pairwise interactions of given order of a
sequence of terms.
The argument order is a sequence specifying which order
of interactions should be generated -- the default
creates main effects and two-way interactions. If order
is an integer, it is changed to range(1,order+1), so
order=3 is equivalent to order=[1,2,3], generating
all one, two and three-way interactions.
If any entry of order is greater than len(terms), it is
effectively treated as len(terms).
>>> print interactions([Term(l) for l in ['a', 'b', 'c']])
<formula: a*b + a*c + b*c + a + b + c>
>>>
>>> print interactions([Term(l) for l in ['a', 'b', 'c']], order=list(range(5)))
<formula: a*b + a*b*c + a*c + b*c + a + b + c>
>>>
"""
l = len(terms)
values = {}
if np.asarray(order).shape == ():
order = lrange(1, int(order)+1)
# First order
for o in order:
I = np.indices((l,)*(o))
I.shape = (I.shape[0], np.product(I.shape[1:]))
for m in range(I.shape[1]):
# only keep combinations that have unique entries
if (np.unique(I[:,m]).shape == I[:,m].shape and
np.alltrue(np.equal(np.sort(I[:,m]), I[:,m]))):
ll = [terms[j] for j in I[:,m]]
v = ll[0]
for ii in range(len(ll)-1):
v *= ll[ii+1]
values[tuple(I[:,m])] = v
key = list(iterkeys(values))[0]
value = values[key]
del(values[key])
for v in itervalues(values):
value += v
return value
开发者ID:statsmodels,项目名称:statsmodels,代码行数:54,代码来源:formula.py
示例17: summary
def summary(self):
buf = StringIO()
rng = lrange(self.periods)
for i in range(self.neqs):
ppm = output.pprint_matrix(self.decomp[i], rng, self.names)
buf.write('FEVD for %s\n' % self.names[i])
buf.write(ppm + '\n')
print(buf.getvalue())
开发者ID:bert9bert,项目名称:statsmodels,代码行数:11,代码来源:var_model.py
示例18: check_index
def check_index(self, is_sorted=True, unique=True, index=None):
"""Sanity checks"""
if not index:
index = self.index
if is_sorted:
test = pd.DataFrame(lrange(len(index)), index=index)
test_sorted = test.sort()
if not test.index.equals(test_sorted.index):
raise Exception('Data is not be sorted')
if unique:
if len(index) != len(index.unique()):
raise Exception('Duplicate index entries')
开发者ID:cong1989,项目名称:statsmodels,代码行数:12,代码来源:grouputils.py
示例19: cat2dummy
def cat2dummy(y, nonseq=0):
if nonseq or (y.ndim == 2 and y.shape[1] > 1):
ycat, uniques, unitransl = convertlabels(y, lrange(y.shape[1]))
else:
ycat = y.copy()
ymin = y.min()
uniques = np.arange(ymin,y.max()+1)
if ycat.ndim == 1:
ycat = ycat[:,np.newaxis]
# this builds matrix nobs*ncat
dummy = (ycat == uniques).astype(int)
return dummy
开发者ID:Cassin123,项目名称:statsmodels,代码行数:12,代码来源:try_catdata.py
示例20: _influence_plot
def _influence_plot(results, influence, external=True, alpha=.05,
criterion="cooks", size=48, plot_alpha=.75, ax=None,
**kwargs):
infl = influence
fig, ax = utils.create_mpl_ax(ax)
if criterion.lower().startswith('coo'):
psize = infl.cooks_distance[0]
elif criterion.lower().startswith('dff'):
psize = np.abs(infl.dffits[0])
else:
raise ValueError("Criterion %s not understood" % criterion)
# scale the variables
#TODO: what is the correct scaling and the assumption here?
#we want plots to be comparable across different plots
#so we would need to use the expected distribution of criterion probably
old_range = np.ptp(psize)
new_range = size**2 - 8**2
psize = (psize - psize.min()) * new_range/old_range + 8**2
leverage = infl.hat_matrix_diag
if external:
resids = infl.resid_studentized_external
else:
resids = infl.resid_studentized
from scipy import stats
cutoff = stats.t.ppf(1.-alpha/2, results.df_resid)
large_resid = np.abs(resids) > cutoff
large_leverage = leverage > _high_leverage(results)
large_points = np.logical_or(large_resid, large_leverage)
ax.scatter(leverage, resids, s=psize, alpha=plot_alpha)
# add point labels
labels = results.model.data.row_labels
if labels is None:
labels = lrange(len(resids))
ax = utils.annotate_axes(np.where(large_points)[0], labels,
lzip(leverage, resids),
lzip(-(psize/2)**.5, (psize/2)**.5), "x-large",
ax)
#TODO: make configurable or let people do it ex-post?
font = {"fontsize" : 16, "color" : "black"}
ax.set_ylabel("Studentized Residuals", **font)
ax.set_xlabel("H Leverage", **font)
ax.set_title("Influence Plot", **font)
return fig
开发者ID:ChadFulton,项目名称:statsmodels,代码行数:52,代码来源:regressionplots.py
注:本文中的statsmodels.compat.python.lrange函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论