本文整理汇总了Python中scikits.bootstrap.ci函数的典型用法代码示例。如果您正苦于以下问题:Python ci函数的具体用法?Python ci怎么用?Python ci使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ci函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: ScikitsBootstrap
def ScikitsBootstrap(fdf, loc=0, scale=100,
leftsigma=5, rightsigma=5, minsamples=100, verbose=1):
'''
parameters from fit of Gaussian are used to clip total range of data
from this a BCA bootstrap of the error in the loc and scale are found
by the MLE estimates (std and mean respectively) --> This will ONLY
work if the data given IS Gaussian
'''
# fRawData = fdf.Ampl[abs(fdf.Ampl) < 1000]
fRawData = fdf.Ampl[
(fdf.Ampl > loc - leftsigma * scale) & (fdf.Ampl < loc + rightsigma * scale)]
if verbose > 0:
print("number of samples", len(fRawData))
if len(fRawData) < minsamples:
if verbose > 0:
print("insufficient data")
return (1e12, 1e12, 1e12)
CILower, CIUpper = btp.ci(fRawData, std)
scaleerr = (CIUpper - std(fRawData)) / 1.96
CILower, CIUpper = btp.ci(fRawData, mean)
locerr = (CIUpper - mean(fRawData)) / 1.96
amperr = 0 # currently ignored
return (locerr, scaleerr, amperr)
开发者ID:marksbrown,项目名称:ProcessingCTRData,代码行数:25,代码来源:processingcern.py
示例2: FitToDelayData
def FitToDelayData(
DelayValues, timerange=1000, GenerateImages=True, verbose=0):
'''
Loads cropped data and fits Gaussian
Calculates error using bootstrap
'''
freq, binedges = histogram(
DelayValues, bins=2 * timerange / 25 + 1, range=(-timerange, timerange))
binedges = 0.5 * (binedges[1:] + binedges[:-1])
binedges = binedges[freq > 0]
freq = freq[freq > 0]
(param, err), chival = normfit(binedges, freq, yerr=sqrt(freq),
ScaleGuess=100, verbose=verbose) # fit to CTR peak
p1, p2, p3 = param
DelayValues = array(DelayValues)
fRawData = DelayValues[abs(DelayValues) < 500]
CILower, CIUpper = btp.ci(fRawData, std)
scaleerr = (CIUpper - std(fRawData)) / 1.96
CILower, CIUpper = btp.ci(fRawData, mean)
locerr = (CIUpper - mean(fRawData)) / 1.96
##amperr = p3 # currently ignored
p1err, p2err, p3err = err.diagonal()
return param, (locerr, scaleerr, p3err)
开发者ID:marksbrown,项目名称:ProcessingCTRData,代码行数:30,代码来源:processingcern.py
示例3: fit_learning_curve
def fit_learning_curve(data, length=10, user_length=None, context_answer_limit=100, reverse=False, bootstrap_samples=100):
confidence_vals = [[] for i in range(length)]
def _fit_learning_curve(series):
references_by_attempt = map(lambda references: [r for r in references if r is not None], zip(*series))
learning_curve = map(lambda xs: (numpy.mean(xs), len(xs)), references_by_attempt)
def _learn_fun(attempt, a, k):
return a * (1.0 / (attempt + 1) ** k)
opt, _ = curve_fit(
_learn_fun,
numpy.arange(len(learning_curve)),
numpy.array(map(lambda x: x[0], learning_curve)),
sigma=numpy.array(map(lambda x: 1.0 / numpy.sqrt(x[1] + 1), learning_curve))
)
fit = map(lambda attempt: _learn_fun(attempt, opt[0], opt[1]), range(len(learning_curve)))
for i, r in enumerate(fit):
confidence_vals[i].append(r)
return fit[-1]
series = reference_series(data, length=length, user_length=user_length,
context_answer_limit=context_answer_limit, reverse=reverse)
try:
bootstrap.ci(series, _fit_learning_curve, method='pi', n_samples=bootstrap_samples)
def _aggr(rs):
return {
'value': numpy.median(rs),
'confidence_interval_min': numpy.percentile(rs, 2),
'confidence_interval_max': numpy.percentile(rs, 98),
}
return map(_aggr, confidence_vals)
except:
return []
开发者ID:papousek,项目名称:slepemapy-learning-curves,代码行数:35,代码来源:ab_random_random_3.py
示例4: ScikitsBootstrap
def ScikitsBootstrap(fdf):
CILower, CIUpper = btp.ci(fdf.counts, std)
scaleerr = (CIUpper - std(fdf.counts)) / 1.96
CILower, CIUpper = btp.ci(fdf.counts, mean)
locerr = (CIUpper - mean(fdf.counts)) / 1.96
amperr = 0 # currently ignored
return (locerr, scaleerr, amperr)
开发者ID:marksbrown,项目名称:ProcessingCTRData,代码行数:8,代码来源:lightyield.py
示例5: test_pi_multi_2dout_multialpha
def test_pi_multi_2dout_multialpha(self):
np.random.seed(1234567890)
results1 = boot.ci((self.x,self.y), stats.linregress, alpha=(0.1,0.2,0.8,0.9),n_samples=2000,method='pi')
np.random.seed(1234567890)
results2 = boot.ci(np.vstack((self.x,self.y)).T, lambda a: stats.linregress(a)[0], alpha=(0.1,0.2,0.8,0.9),n_samples=2000,method='pi')
np.random.seed(1234567890)
results3 = boot.ci(np.vstack((self.x,self.y)).T, lambda a: stats.linregress(a)[1], alpha=(0.1,0.2,0.8,0.9),n_samples=2000,method='pi')
np.testing.assert_array_almost_equal(results1[:,0],results2)
np.testing.assert_array_almost_equal(results1[:,1],results3)
开发者ID:fspaolo,项目名称:scikits-bootstrap,代码行数:9,代码来源:test_bootstrap.py
示例6: flag_outlier
def flag_outlier(in_vec, thresh_percentage=95):
"""
Flags an outlier according to a percent difference threshold
:param thresh_percentage: percent confidence interval
:param in_vec:
:return: outlier_ind
"""
in_vec = np.array(in_vec)
# find largest outlier
outlier_ind = 0
l2_resid_old = 0
mask = np.ones(len(in_vec), dtype=bool)
for i in xrange(in_vec.shape[0]):
mask[i] = False
l2_resid = (in_vec[i] - np.mean(in_vec[mask]))**2
if l2_resid > l2_resid_old:
outlier_ind = i
l2_resid_old = l2_resid
mask[i] = True
# check if outlier is outside threshold percentage
# bootstrap a 95% ci from data
a_lvl = 1 - (thresh_percentage / 100.)
CIs = bootstrap.ci(data=in_vec, statfunction=mean, alpha=a_lvl)
if in_vec[outlier_ind] < CIs[0] or in_vec[outlier_ind] > CIs[1]:
return outlier_ind
else:
return None
开发者ID:mlsamsom,项目名称:PyFrictionTools,代码行数:31,代码来源:utilities.py
示例7: forcedChoicePlot
def forcedChoicePlot(listenerAccuracies, listenerScores, mturkAccuracies, mturkScores, outFile, title, errorBars=False):
"""listenerAccuracies is an array of accuracy arrays, one per problem level.
mturkAccuracies is a 1-d array of mturk accuracies on each problem level.
"""
matplotlib.rcParams.update({'font.size' : 20})
lw = 4
plt.hold(True)
nListeners = len(listenerAccuracies)
nIterations = len(listenerAccuracies[0]) - 1
plt.axis([0, nIterations, 0, 1])
plt.ylabel('Listener Accuracy')
plt.xlabel('Training Iterations')
for levelAccuracies, levelScores, lineColor in zip(listenerAccuracies, listenerScores, colors):
if errorBars:
yerrs = []
for scores in levelScores:
if np.array(scores).all():
yerrs.append(0)
else:
interval = boot.ci(np.array(scores), np.average)
err = (interval[1] - interval[0]) / 2.0
yerrs.append(err)
plt.errorbar(range(len(levelAccuracies)), levelAccuracies, yerr=yerrs, linewidth=lw, color=lineColor)
print lineColor
print levelAccuracies
else:
plt.plot(levelAccuracies, linewidth=lw, marker='o', color=lineColor)
listenerTitles = ['Level %d' % level for level in range(nListeners)]
plt.legend(listenerTitles, loc='lower right')
plt.title(title)
plt.savefig(outFile, format='pdf')
plt.show()
开发者ID:acvogel,项目名称:discriminative-ibr,代码行数:32,代码来源:discrim_ibr.py
示例8: bootstrapCI
def bootstrapCI(data, statFunc=None, alpha=0.05, nPerms=10000, output='lowhigh', method='pi'):
"""Wrapper around a function in the scikits_bootstrap module:
https://pypi.python.org/pypi/scikits.bootstrap
Parameters
----------
data : np.ndarray
Data for computing the confidence interval.
statFunc : function
Should take data and operate along axis=0
alpha : float
Returns the [alpha/2, 1-alpha/2] percentile confidence intervals.
nPerms : int
output : str
Use 'lowhigh' or 'errorbar', for matplotlib errorbars"""
if statFunc is None:
statFunc = partial(np.nanmean, axis=0)
try:
out = ci(data=data, statfunction=statFunc, alpha=alpha, n_samples=nPerms, output='lowhigh', method=method)
except IndexError:
shp = list(data.shape)
shp[0] = 2
out = np.nan * np.ones(shp)
if output == 'errorbar':
mu = statFunc(data)
shp = list(out.shape)
out[0,:] = out[0,:] - mu
out[1,:] = mu - out[1,:]
out = np.reshape(out, shp)
return out
开发者ID:agartland,项目名称:utils,代码行数:32,代码来源:bootstrap_testing.py
示例9: stats_per_group
def stats_per_group(x):
print 'stats-per-group'
x = x.groupby(['sid']).mean()
x = x.value
print len(x)
res = {'median': [], 'qtile': []}
medians = np.median(x)
res['mean'] = np.average(x)
res['median'] = medians
lower_quartile, upper_quartile = np.percentile(x, [25, 75])
res['qtile'] = (upper_quartile, lower_quartile)
# res['ci'] = np.percentile(x, [2.5,97.5])
iqr = upper_quartile - lower_quartile
upper_whisker = x[x <= upper_quartile + 1.5 * iqr].max()
lower_whisker = x[x >= lower_quartile - 1.5 * iqr].min()
res['whisk'] = (lower_whisker, upper_whisker)
res['err'] = (np.abs(lower_whisker - medians),
np.abs(upper_whisker - medians))
res['ci'] = bootstrap.ci(x, n_samples=BOOTSTRAP_NUM)
return pd.Series(res)
开发者ID:sinkpoint,项目名称:sagit,代码行数:25,代码来源:fiber_stats_viz.py
示例10: totalNspks
def totalNspks(self):
"""
Compute statistical comparisons of total nosepokes in no inhibition versus inhibition session of NpHR subjects
Return dictionary with means, sems, p-value, bootstrapped 95 percent CI
"""
totalNspks = {}
totalNspks['controlMean'] = self.datadict['totalNspksControl']['NoInhib'].mean()
totalNspks['controlSEM'] = self.datadict['totalNspksControl']['NoInhib'].sem()
totalNspks['controlCI'] = bootstrap.ci(data=self.datadict['totalNspksControl']['NoInhib'], statfunction=scipy.mean)
totalNspks['inhibMean'] = self.datadict['totalNspksInhibited']['Inhibited'].mean()
totalNspks['inhibSEM'] = self.datadict['totalNspksInhibited']['Inhibited'].sem()
totalNspks['inhibCI'] = bootstrap.ci(data=self.datadict['totalNspksInhibited']['Inhibited'], statfunction=scipy.mean)
totalNspks['p'] = scipy.stats.ttest_rel(self.datadict['totalNspksControl']['NoInhib'], self.datadict['totalNspksInhibited']['Inhibited'])
return totalNspks
开发者ID:sfischweiss,项目名称:Lab_analysis,代码行数:16,代码来源:stats.py
示例11: meanNspksInhib
def meanNspksInhib(self):
"""
Compute statistical comparisons of mean nosepokes in laser versus simlaser in inhibition session
Return dictionary with means, sems, p-value, bootstrapped 95 percent CI
"""
meanNspksInhib = {}
meanNspksInhib['simMean'] = self.datadict['meanNspksInhibited']['simLaser'].mean()
meanNspksInhib['simSEM'] = self.datadict['meanNspksInhibited']['simLaser'].sem()
meanNspksInhib['simCI'] = bootstrap.ci(data=self.datadict['meanNspksInhibited']['simLaser'], statfunction=scipy.mean)
meanNspksInhib['laserMean'] = self.datadict['meanNspksInhibited']['Laser'].mean()
meanNspksInhib['laserSEM'] = self.datadict['meanNspksInhibited']['Laser'].sem()
meanNspksInhib['laserCI'] = bootstrap.ci(data=self.datadict['meanNspksInhibited']['Laser'], statfunction=scipy.mean)
meanNspksInhib['p'] = scipy.stats.ttest_rel(self.datadict['meanNspksInhibited']['simLaser'], self.datadict['meanNspksInhibited']['Laser'])
return meanNspksInhib
开发者ID:sfischweiss,项目名称:Lab_analysis,代码行数:16,代码来源:stats.py
示例12: get_ci
def get_ci(data, ci):
try:
ci_vals = bootstrap.ci(data=data, alpha = ci,
statfunction=print_class,
n_samples = 10)
except:
ci_vals = [-1.0,1.0]
return ci_vals
开发者ID:ameert,项目名称:astro_image_processing,代码行数:8,代码来源:zpanel_functions.py
示例13: calc_bootstrap
def calc_bootstrap(data):
# Calculate the bootstrap
CIs = bootstrap.ci(data=data, statfunction=sp.mean)
# Print the data: the "*" turns the array CIs into a list
print('The conficence intervals for the mean are: {0} - {1}'.format(*CIs))
return CIs
开发者ID:CeasarSS,项目名称:books,代码行数:8,代码来源:bootstrap.py
示例14: bootstrap
def bootstrap(self):
"""
performs bootrapping of f1 measure on dataset. A narrow confidence interval is more indicative of a sufficient sample size
A 95% confidence interval means we are 95% confident that the true f1 measure is between (1) and (2).
( 1 and 2 are values return by bootstrap library).
:return:
"""
data = list(self.algorithm_results.items())
CIs = bootstrap.ci(data=data, statfunction=self.f1_bootstrap, n_samples=10000)
print(self.algorithm_name)
print("Bootstrapped 95% confidence intervals for f1 \nLow:", CIs[0], "\nHigh:", CIs[1])
开发者ID:a-raina,项目名称:Event-Detection-using-NLP,代码行数:11,代码来源:AlgorithmTester.py
示例15: calc_bootstrap
def calc_bootstrap(data):
''' Find the confidence interval for the mean of the given data set with bootstrapping. '''
# --- >>> START stats <<< ---
# Calculate the bootstrap
CIs = bootstrap.ci(data=data, statfunction=sp.mean)
# --- >>> STOP stats <<< ---
# Print the data: the "*" turns the array "CIs" into a list
print(('The conficence intervals for the mean are: {0} - {1}'.format(*CIs)))
return CIs
开发者ID:ejmurray,项目名称:statsintro_python,代码行数:12,代码来源:C11_8_bootstrapDemo.py
示例16: write_data
def write_data(fn,data):
"""Performs descriptive stats and writes stats to output file"""
f = open(fn,'w')
mue,muese = MUE(data)
f.write("Errors are 95% CIs\n")
f.write("MUE = %5.3f +/- %5.3f\n" % (mue,muese*1.96))
mse,msese = MSE(data)
f.write("MSE = %5.3f +/- %5.3f\n" % (mse,msese*1.96))
correldict = correls(data)
f.write("R^2 = %3.2f\n" % correldict['r_value']**2)
f.write("K-Tau = %3.2f\n\n" % correldict['tau'])
f.write("BOOTSTRAPPED RESULTS (10k resamples, 95% CIs)\n")
CIs = boot.ci(data,MUE)
f.write("MUE = %5.3f < %5.3f < %5.3f\n" % (CIs[0][0],mue,CIs[1][0]))
CIs = boot.ci(data,MSE)
f.write("MSE = %5.3f < %5.3f < %5.3f\n" % (CIs[0][0],mse,CIs[1][0]))
CIs = boot.ci(data,correls_for_bootstrap)
f.write("Pearson's R = %3.2f < %3.2f < %3.2f\n" % (CIs[0][2],correldict['r_value'],CIs[1][2]))
f.write("R^2 = %3.2f < %3.2f < %3.2f\n" % (CIs[0][3],correldict['r_value']**2,CIs[1][3]))
f.write("K-Tau = %3.2f < %3.2f < %3.2f\n\n" % (CIs[0][6],correldict['tau'],CIs[1][6]))
f.close()
开发者ID:rtb1c13,项目名称:scripts,代码行数:22,代码来源:analyse_hfe.py
示例17: scalesHiddenPlot
def scalesHiddenPlot(name='scales'):
matplotlib.rcParams.update({'font.size' : 20})
lw = 3
plt.hold(True)
if name == 'scalesPlus':
experimentName = 'Complex'
nLevels = 3
leveledFcData = turk.readScalesProblems('../../data/scale_plus_6stimuli_3levels_no_fam_24_january_SCAL.csv', name)
elif name == 'scales':
experimentName = 'Simple'
nLevels = 2
leveledFcData = turk.readScalesProblems('../../data/scales_6stimuli_3levels_no_fam_25_january_OSCA.csv', name)
else:
print '[forcedChoiceExperiments] Unknown experiment name: ', name
sizes = [5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80]
nModels = 10 # numbered 1 to 10
agents = [] # will be an array of arrays, one entry per hidden node, one entry per training iteration
# load the agents
for size in sizes:
sizeAgents = []
for agentNum in range(1,nModels + 1):
(listeners, speakers) = loadAllAgents('../../data/cogsci/agents-%d-%d.pickle' % (size, agentNum))
sizeAgents.append(listeners)
agents.append(sizeAgents)
for (levelProblems, lineColor) in zip(leveledFcData, colors):
dataset = forcedChoiceProblemsToDataset(levelProblems)
hiddenLayerAccuracies = []
hiddenLayerScores = []
yerrs = []
for (allListeners, size) in zip(agents, sizes): # for each # of hidden layers
sizeAccuracies = [] # accuracies for each independent trial for this # of hidden nodes and this level of problem. will be averaged.
sizeScores = []
for listeners in allListeners:
lastListener = listeners[3]
(correct, activations, scores) = evalListenerOnClassificationDataset(lastListener, dataset)
sizeAccuracies.append(float(correct) / len(scores))
sizeScores.append(scores)
averageAccuracy = np.array(sizeAccuracies).mean()
hiddenLayerAccuracies.append(averageAccuracy)
hiddenLayerScores.append(sizeScores)
interval = boot.ci(np.array(sizeScores), np.average)
err = (interval[1] - interval[0])/2.0
yerrs.append(err)
plt.errorbar(sizes, hiddenLayerAccuracies, yerr=yerrs, linewidth=lw, color=lineColor)
plt.axis([0, sizes[-1], 0, 1])
plt.title('ANN Accuracy on the %s Condition' % experimentName)
plt.xlabel('Number of Hidden Nodes')
plt.ylabel('Average Accuracy')
plt.legend(['Level %d' % i for i in range(nLevels)], loc='lower right')
plt.savefig('hidden%s.pdf' % name, format='pdf')
plt.show()
开发者ID:acvogel,项目名称:discriminative-ibr,代码行数:51,代码来源:discrim_ibr.py
示例18: test_bootstrap
def test_bootstrap():
import numpy as np
from scikits.bootstrap import ci
data = np.random.normal(loc=1, scale=1, size=1000)
print('std = %.2f' % data.std())
samples = bootstrap(data, 100)
boot_error = calc_bootstrap_error(samples, 0.32)
boot_error_ci = ci(data, np.median, 0.32)
print('bootstrap error', boot_error)
print('bootstrap error ci', boot_error_ci)
开发者ID:ezbc,项目名称:python_modules,代码行数:14,代码来源:mystats.py
示例19: diffusion_tensor_ci
def diffusion_tensor_ci(positions, orientations, lagtime=1, fps=1., ndim=3, **kwargs):
"""Calculate the diffusion tensor and the confidence interval using bootstrap."""
from scikits import bootstrap
delta_tjn, all_xjn = _compute_displ(positions, orientations, lagtime, fps)
if ndim == 2:
all_xjn = all_xjn[:, [0, 1, 5]] # only x, y transl and z rot
statfunc = lambda x: (x[:, :, np.newaxis] * x[:, np.newaxis, :]).mean(0).ravel() * 0.5 / delta_tjn
result = bootstrap.ci(all_xjn, statfunc, **kwargs)
if ndim == 2:
result = result.reshape((2, 3, 3))
else:
result = result.reshape((2, 6, 6))
return result
开发者ID:caspervdw,项目名称:clustertracking,代码行数:16,代码来源:motion.py
示例20: syntheticHiddenPlot
def syntheticHiddenPlot():
""" Evaluate a variety of hidden layer agents"""
matplotlib.rcParams.update({'font.size' : 20})
lw = 3
plt.hold(True)
levelInstances = [loadFacesInstances('../../data/facesInstances-%d.csv' % level) for level in [0,1,2]]
sizes = [5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80]
nModels = 10 # numbered 1 to 10
agents = [] # will be an array of arrays, one entry per hidden node, one entry per training iteration
# load the agents
for size in sizes:
sizeAgents = []
for agentNum in range(1,nModels + 1):
(listeners, speakers) = loadAllAgents('../../data/cogsci/agents-%d-%d.pickle' % (size, agentNum))
sizeAgents.append(listeners)
agents.append(sizeAgents)
# loop over levels, then over model sizes, then over agents..
for (instances, lineColor) in zip(levelInstances, colors): # for each level
dataset = goldListenerTrainingExamplesFromInstances(instances)
hiddenLayerAccuracies = [] # average accuracy for each hidden layer
hiddenLayerScores = []
yerrs = []
for (allListeners, size) in zip(agents, sizes): # for each # of hidden layers
sizeAccuracies = [] # accuracies for each independent trial for this # of hidden nodes and this level of problem. will be averaged.
sizeScores = []
for listeners in allListeners:
lastListener = listeners[3]
(correct, activations, scores) = evalListenerOnClassificationDataset(lastListener, dataset)
sizeAccuracies.append(float(correct) / len(scores))
sizeScores.append(scores)
averageAccuracy = np.array(sizeAccuracies).mean()
hiddenLayerAccuracies.append(averageAccuracy)
hiddenLayerScores.append(sizeScores)
interval = boot.ci(np.array(sizeScores), np.average)
err = (interval[1] - interval[0])/2.0
yerrs.append(err)
plt.errorbar(sizes, hiddenLayerAccuracies, yerr=yerrs, linewidth=lw, color=lineColor)
plt.title('ANN Accuracy by Size of Hidden Layer')
plt.axis([0, sizes[-1], 0, 1])
plt.xlabel('Number of Hidden Nodes')
plt.ylabel('Listener Accuracy')
legendTitles = ['Level 0', 'Level 1', 'Level 2']
plt.legend(legendTitles, loc='lower right')
plt.savefig('hiddenSynthetic.pdf', format='pdf')
plt.show()
开发者ID:acvogel,项目名称:discriminative-ibr,代码行数:45,代码来源:discrim_ibr.py
注:本文中的scikits.bootstrap.ci函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论