本文整理汇总了Python中numpy.percentile函数的典型用法代码示例。如果您正苦于以下问题:Python percentile函数的具体用法?Python percentile怎么用?Python percentile使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了percentile函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: iqr
def iqr(data):
return ",".join(
(
digits.format(numpy.percentile(data[column], 75) - numpy.percentile(data[column], 25))
for column in data.columns
)
)
开发者ID:russellnakamura,项目名称:commoncode,代码行数:7,代码来源:commons.py
示例2: _auto_limits
def _auto_limits(self):
if self.component_data is None:
return
exclude = (100 - self.percentile) / 2.
# For subsets in 'data' mode, we want to compute the limits based on
# the full dataset, not just the subset.
if isinstance(self.data, Subset):
data_values = self.data.data[self.component_id]
else:
data_values = self.data[self.component_id]
try:
lower = np.nanpercentile(data_values, exclude)
upper = np.nanpercentile(data_values, 100 - exclude)
except AttributeError: # Numpy < 1.9
data_values = data_values[~np.isnan(data_values)]
lower = np.percentile(data_values, exclude)
upper = np.percentile(data_values, 100 - exclude)
if isinstance(self.data, Subset):
lower = 0
self.set_limits(lower, upper)
开发者ID:saimn,项目名称:glue,代码行数:26,代码来源:attribute_limits_helper.py
示例3: main
def main(argv):
map_utilizations = []
reduce_utilizations = []
all_utilizations = []
dirname = argv[0]
for filename in os.listdir(dirname):
full_name = os.path.join(dirname, filename)
if os.path.isfile(full_name) and filename.endswith("job_log"):
print "Reading %s" % filename
analyzer = parse_logs.Analyzer(full_name)
for (id, stage) in analyzer.stages.iteritems():
for task in stage.tasks:
for name, block_device_numbers in task.disk_utilization.iteritems():
if name in ["xvdb", "xvdf"]:
effective_util = 0
if block_device_numbers[0] > 0:
effective_util = (block_device_numbers[1] + block_device_numbers[2]) / block_device_numbers[0]
all_utilizations.append(effective_util)
if task.has_fetch:
reduce_utilizations.append(effective_util)
else:
map_utilizations.append(effective_util)
output_filename = os.path.join(dirname, "disk_utilization_cdf")
f = open(output_filename, "w")
for percent in range(100):
f.write("%s\t%s\t%s\t%s\n" % (percent / 100., numpy.percentile(map_utilizations, percent),
numpy.percentile(reduce_utilizations, percent),
numpy.percentile(all_utilizations, percent)))
f.close()
开发者ID:AllenFang,项目名称:trace-analysis,代码行数:31,代码来源:disk_utilization.py
示例4: get_mean_vmax
def get_mean_vmax():
hostvmaxs = []
hostvmax25s = []
hostvmax75s = []
twentyfifth, fifty, seventyfifth = get_percentile()
rootdir = "/Users/catherinefielder/Documents/Research_Halos/HaloDetail"
for subdir, dirs, files in os.walk(rootdir):
head, tail = os.path.split(subdir)
haloname = tail
for file in files:
if file.endswith("_columnsadded_final"):
values = ascii.read(
os.path.join(subdir, file), format="commented_header"
) # Get full path and access file
hostvmax = values[1]["host_vmax"]
hostvmaxs = np.append(hostvmaxs, hostvmax)
twentyfifth = np.percentile(hostvmaxs, 25)
seventyfifth = np.percentile(hostvmaxs, 75)
for i in range(0, len(hostvmaxs)):
if hostvmaxs[i] >= seventyfifth:
hostvmax75s = np.append(hostvmax75s, hostvmaxs[i])
elif hostvmaxs[i] < twentyfifth:
hostvmax25s = np.append(hostvmax25s, hostvmaxs[i])
else:
continue
sumvmax = np.sum(hostvmaxs)
meanvmax = np.divide(sumvmax, len(hostvmaxs))
mean75 = np.mean(hostvmax75s)
mean25 = np.mean(hostvmax25s)
print "mean"
print meanvmax
print mean75
print mean25
return meanvmax, mean75, mean25
开发者ID:cfielder,项目名称:DM_haloprops,代码行数:34,代码来源:total_mean_CVF_spin.py
示例5: plot_wavenvelope
def plot_wavenvelope(self, ax, w_start, w_end):
""" This function plots the envelope of the recording.
:param ax: The axis in which you wish to plot.
:param w_start: Start of the best window.
:param w_end: End of the best window.
"""
window_size = int(0.05 * self._sample_rate) # 0.050 are 50 milliseconds for the envelope window!
w = 1.0 * np.ones(window_size) / window_size
envelope = (np.sqrt((np.correlate(self._eod ** 2, w, mode='same') -
np.correlate(self._eod, w, mode='same') ** 2)).ravel()) * np.sqrt(2.)
upper_bound = np.max(envelope) + np.percentile(envelope, 1)
ax.fill_between(self._time[::500], y1=-envelope[::500], y2=envelope[::500], color='purple', alpha=0.5)
ax.plot((w_start, w_start), (-upper_bound, upper_bound), 'k--', linewidth=2)
ax.plot((w_end, w_end), (-upper_bound, upper_bound), 'k--', linewidth=2)
ax.text((w_start + w_end) / 2., upper_bound - np.percentile(envelope, 10), 'Analysis Window',
rotation='horizontal', horizontalalignment='center', verticalalignment='center', fontsize=14)
ax.set_ylim(-upper_bound, upper_bound)
ax.set_xlabel('Time [s]', fontsize=16)
ax.set_ylabel('Signal Amplitude [au]', fontsize=16)
ax.tick_params(axis='both', which='major', labelsize=14)
pass
开发者ID:fabiansinz,项目名称:thunderfish,代码行数:25,代码来源:FishRecording.py
示例6: getMed
def getMed(x):
if len(x) == 0:
x = np.array([0])
median = np.percentile(x, 50)
sigma_min = median - np.percentile(x, 16)
sigma_max = np.percentile(x, 84) - median
return median, sigma_min, sigma_max
开发者ID:jrbourbeau,项目名称:ShowerLLH_scripts,代码行数:7,代码来源:sim.py
示例7: updateStats
def updateStats(self):
if self.current_layer is not None:
current_attribute = self.dlg.getCurrentAttribute()
if current_attribute >= 0:
attribute = self.layer_attributes[current_attribute]
# check if stats have been calculated before
idx = self.checkValuesAvailable(attribute)
if idx == -1:
self.retrieveAttributeValues(attribute)
idx = len(self.attribute_statistics)-1
stats = self.attribute_statistics[idx]
# calculate stats of selected objects only
select_stats = dict()
if self.current_layer.selectedFeatureCount() > 0:
self.selection_values, self.selection_ids = uf.getFieldValues(self.current_layer, attribute['name'], null=False, selection=True)
sel_values = [val for val in self.selection_values if val != NULL]
select_stats['Number'] = len(sel_values)
select_stats['Mean'] = uf.truncateNumber(np.mean(sel_values))
select_stats['Std Dev'] = uf.truncateNumber(np.std(sel_values))
select_stats['Variance'] = uf.truncateNumber(np.var(sel_values))
select_stats['Median'] = uf.truncateNumber(np.median(sel_values))
select_stats['Minimum'] = np.min(sel_values)
select_stats['Maximum'] = np.max(sel_values)
select_stats['Range'] = uf.truncateNumber(select_stats['Maximum']-select_stats['Minimum'])
select_stats['1st Quart'] = uf.truncateNumber(np.percentile(sel_values,25))
select_stats['3rd Quart'] = uf.truncateNumber(np.percentile(sel_values,75))
select_stats['IQR'] = uf.truncateNumber(select_stats['3rd Quart']-select_stats['1st Quart'])
select_stats['Gini'] = uf.roundNumber(uf.calcGini(sel_values))
else:
self.selection_values = []
self.selection_ids = []
# update the dialog
self.dlg.setStats(stats, select_stats)
开发者ID:JRappaz,项目名称:qgisSpaceSyntaxToolkit,代码行数:33,代码来源:ExplorerTool.py
示例8: show_bootstrap_statistics
def show_bootstrap_statistics(clf, X, y, features):
num_features = len(features)
coefs = []
for i in range(num_features):
coefs.append([])
for _ in range(BOOTSTRAP_ITERATIONS):
X_sample, y_sample = resample(X, y)
clf.fit(X_sample, y_sample)
for i, c in enumerate(get_normalized_coefs(clf)):
coefs[i].append(c)
poi_index = features.index('POI')
building_index = features.index('Building')
coefs[building_index] = coefs[poi_index]
intervals = []
print()
print('***** Bootstrap statistics *****')
print('{:<20}{:<20}{:<10}{:<10}'.format('Feature', '95% interval', 't-value', 'Pr(>|t|)'))
print()
for i, cs in enumerate(coefs):
values = np.array(cs)
lo = np.percentile(values, 2.5)
hi = np.percentile(values, 97.5)
interval = '({:.3f}, {:.3f})'.format(lo, hi)
tv = np.mean(values) / np.std(values)
pr = (1.0 - t.cdf(x=abs(tv), df=len(values))) * 0.5
stv = '{:.3f}'.format(tv)
spr = '{:.3f}'.format(pr)
print('{:<20}{:<20}{:<10}{:<10}'.format(features[i], interval, stv, spr))
开发者ID:milchakov,项目名称:omim,代码行数:34,代码来源:scoring_model.py
示例9: write_parameters_outputvalues
def write_parameters_outputvalues(self, P):
Mstar, SFR_opt, _ = model.stellar_info_array(self.chain.flatchain_sorted, self.data, self.out['realizations2int'])
column_names = np.transpose(np.array(["P025","P16","P50","P84","P975"], dtype='|S3'))
chain_pars = np.column_stack((self.chain.flatchain_sorted, Mstar, SFR_opt))
# np.mean(chain_pars, axis[0]),
# np.std(chain_pars, axis[0]),
if self.out['calc_intlum']:
SFR_IR = model.sfr_IR(self.int_lums[0]) #check that ['intlum_names'][0] is always L_IR(8-100)
chain_others =np.column_stack((self.int_lums.T, SFR_IR))
outputvalues = np.column_stack((np.transpose(map(lambda v: (v[0],v[1],v[2],v[3],v[4]), zip(*np.percentile(chain_pars, [2.5,16, 50, 84,97.5], axis=0)))),
np.transpose(map(lambda v: (v[0],v[1],v[2],v[3],v[4]), zip(*np.percentile(chain_others, [2.5,16, 50, 84,97.5], axis=0)))) ))
outputvalues_header= ' '.join([ i for i in np.hstack((P.names, 'Mstar', 'SFR_opt', self.out['intlum_names'], 'SFR_IR',))] )
else:
outputvalues = np.column_stack((map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]), zip(*np.percentile(chain_pars, [16, 50, 84], axis=0)))))
outputvalues_header=' '.join( [ i for i in P.names] )
return outputvalues, outputvalues_header
开发者ID:GabrielaCR,项目名称:functions,代码行数:25,代码来源:PLOTandWRITE_AGNfitter2.py
示例10: test_quantile
def test_quantile(self):
from numpy import percentile
q = self.ts.quantile(0.1)
self.assertEqual(q, percentile(self.ts.valid(), 10))
q = self.ts.quantile(0.9)
self.assertEqual(q, percentile(self.ts.valid(), 90))
# object dtype
q = Series(self.ts, dtype=object).quantile(0.9)
self.assertEqual(q, percentile(self.ts.valid(), 90))
# datetime64[ns] dtype
dts = self.ts.index.to_series()
q = dts.quantile(.2)
self.assertEqual(q, Timestamp('2000-01-10 19:12:00'))
# timedelta64[ns] dtype
tds = dts.diff()
q = tds.quantile(.25)
self.assertEqual(q, pd.to_timedelta('24:00:00'))
# GH7661
result = Series([np.timedelta64('NaT')]).sum()
self.assertTrue(result is pd.NaT)
msg = 'percentiles should all be in the interval \\[0, 1\\]'
for invalid in [-1, 2, [0.5, -1], [0.5, 2]]:
with tm.assertRaisesRegexp(ValueError, msg):
self.ts.quantile(invalid)
开发者ID:RogerThomas,项目名称:pandas,代码行数:31,代码来源:test_quantile.py
示例11: ampDiffStats
def ampDiffStats(ampIm1, ampIm2, osIm1, osIm2, exptime=0.0):
stats = np.zeros(shape=(1,),
dtype=statDtype)
a_i = 0
_s1 = np.median(ampIm1) - np.median(osIm1)
_s2 = np.median(ampIm2) - np.median(osIm2)
stats[a_i]['signal'] = signal = (_s1 + _s2)/2
stats[a_i]['npix'] = ampIm1.size
stats[a_i]['sqrtSig'] = np.sqrt(signal)
stats[a_i]['bias'] = (np.median(osIm1) + np.median(osIm2))/2
ampIm = ampIm2.astype('f4') - ampIm1
osIm = osIm2.astype('f4') - osIm1
sig1 = (0.741/np.sqrt(2)) * np.subtract.reduce(np.percentile(ampIm, [75,25]))
sig2 = (0.741/np.sqrt(2)) * np.subtract.reduce(np.percentile(osIm, [75,25]))
_, trusig1, _ = geom.clippedStats(ampIm) / np.sqrt(2)
_, trusig2, _ = geom.clippedStats(osIm) / np.sqrt(2)
stats[a_i]['readnoise'] = sig2
stats[a_i]['readnoiseM'] = trusig2
stats[a_i]['shotnoise'] = sig = np.sqrt(np.abs(sig1**2 - sig2**2))
stats[a_i]['shotnoiseM'] = trusig = np.sqrt(np.abs(trusig1**2 - trusig2**2))
stats[a_i]['gain'] = gain = signal/sig**2
stats[a_i]['gainM'] = signal/trusig**2
stats[a_i]['noise'] = sig2*gain
stats[a_i]['flux'] = signal/exptime if exptime != 0 else 0.0
return stats, ampIm, osIm
开发者ID:Subaru-PFS,项目名称:ics_xcu_fpga,代码行数:32,代码来源:ccdProcedures.py
示例12: descriptive_stats
def descriptive_stats(array, verbose=True, label='', mean=False, plot=False):
""" Simple statistics from vector.
"""
if mean:
mean_ = np.mean(array)
median = np.median(array)
mini = np.min(array)
maxi = np.max(array)
first_qu = np.percentile(array, 25)
third_qu = np.percentile(array, 75)
if verbose:
if mean:
label += 'min={:.1f} / 1st QU={:.1f} / ave={:.1f} / med={:.1f} / '
label += '3rd QU={:.1f} / max={:.1f}'
print(label.format(mini, first_qu, mean_, median, third_qu, maxi))
else:
label += 'min={:.1f} / 1st QU={:.1f} / med={:.1f} / 3rd QU={:.1f} '
label += '/ max={:.1f}'
print(label.format(mini, first_qu, median, third_qu, maxi))
if plot:
boxplot(array, vert=False, meanline=mean, showfliers=True, sym='.')
if mean:
return mini, first_qu, mean_, median, third_qu, maxi
else:
return mini, first_qu, median, third_qu, maxi
开发者ID:VChristiaens,项目名称:VIP,代码行数:28,代码来源:utils_stats.py
示例13: get_stat_function
def get_stat_function(statistics, perc=None):
# Define personalized functions for binned_statistics
if (statistics == 'mean') | (statistics == 'median'):
stat_func = statistics
elif statistics == 'std':
stat_func = np.std
elif statistics == 'mse':
stat_func = lambda x: np.mean(x**2)
elif statistics == 'frac':
# stat_func = lambda x: 100.0*np.abs(np.mean(x))/(np.abs(np.mean(x)) + np.std(x)) Wrong decomposition
stat_func = lambda x: np.sign(np.mean(x))*100.0*np.mean(x)**2/(np.mean(x)**2 + np.std(x)**2)
elif statistics == 'cv':
stat_func_ratio = lambda x: np.std(x)/np.mean(x)
stat_func_diff = lambda x: np.std(x) - np.abs(np.mean(x)) # To compute the CV for an already multiplicative variable (GD)
elif statistics == 'iqr':
stat_func = lambda x: np.percentile(x,75) - np.percentile(x,25)
elif statistics == 'percentile':
if perc == None:
print('Do not forget to pass the wanted percentile. I will use 50 by default...')
perc = 50
stat_func = lambda x: np.percentile(x, perc)
else:
print('Wrong statistics asked:', statistics)
sys.exit(1)
return(stat_func)
开发者ID:meteoswiss-mdr,项目名称:precipattractor,代码行数:25,代码来源:maple_dataload.py
示例14: arrivals
def arrivals(self, stories, state=6):
''' Chart a plot point for every arrival time in state
'''
arrivals = self.release.kanban().state_arrival_interval(state)
dates = [a['date'] for a in arrivals]
arrivals = [round(a['interval']/60./60., 1) for a in arrivals]
average = numpy.median([arrivals])
std = numpy.std([arrivals])
iql = numpy.percentile([arrivals], 25)
iqh = numpy.percentile([arrivals], 75)
nsul = []
nsuw = []
nsll = []
nslw = []
avg = []
for x in arrivals:
nsul.append(average + (iqh * 3))
nsuw.append(average + (iqh * 2))
nslw.append(average - (iql * 2))
nsll.append(average - (iql * 3))
avg.append(average)
pyplot.plot(dates, arrivals, '*', color='g')
pyplot.plot(dates, nsul, 'o', linestyle='-', color='r')
pyplot.plot(dates, nsuw, '.', linestyle=':', color='y')
pyplot.plot(dates, nslw, '.', linestyle=':', color='y')
pyplot.plot(dates, nsll, 'o', linestyle='-', color='r')
pyplot.plot(dates, avg, '',linestyle='-.', markerfacecolor='None')
pyplot.show(block=False)
开发者ID:deximer,项目名称:jira-shell,代码行数:28,代码来源:Plugin.py
示例15: handle_data
def handle_data(self):
current_time = self.current_datetime
try:
location = self.date_index.get_loc(current_time)
except KeyError:
return
if location >= 99:
histories = self.signals.factor[location-99:location]
current_level = histories[-1]
upper = np.percentile(histories, 95)
lower = np.percentile(histories, 5)
mid_upper = np.percentile(histories, 75)
mid_lower = np.percentile(histories, 25)
if current_level > upper:
self.order_to('ru.xsge', 1, 1)
elif current_level < lower:
self.order_to('ru.xsge', -1, 1)
#elif mid_lower < current_level < mid_upper:
# self.order_to('ru.cffex', 1, 0)
self.keep('factor', current_level)
self.keep('factor (95%)', upper)
self.keep('factor (5%)', lower)
self.keep('factor (75%)', mid_upper)
self.keep('factor (25%)', mid_lower)
self.keep('ru.xsge', self.close['ru.xsge'])
else:
return
开发者ID:wegamekinglc,项目名称:AlgoTrading,代码行数:31,代码来源:RUHighFrequencyExample.py
示例16: test_random_posterior
def test_random_posterior(self):
ndraws = 100000
ssqr_draws = np.empty(ndraws)
for i in xrange(ndraws):
ssqr_draws[i] = self.sigsqr.random_posterior()
nu = self.sigsqr.nu
prior_ssqr = self.sigsqr.lamb
post_dof = nu + len(self.y)
post_ssqr = (nu * prior_ssqr + self.y.size * np.var(self.sigsqr.bart_step.resids)) / post_dof
igam_shape = post_dof / 2.0
igam_scale = post_dof * post_ssqr / 2.0
igamma = stats.distributions.invgamma(igam_shape, scale=igam_scale)
# test draws from conditional posterior by comparing 1st and 2nd moments to true values
true_mean = igamma.moment(1)
frac_diff = np.abs(true_mean - ssqr_draws.mean()) / true_mean
rpmsg = "Fractional difference in mean from BartVariance.random_posterior() is greater than 2%"
self.assertLess(frac_diff, 0.02, msg=rpmsg)
true_ssqr = igamma.moment(2)
frac_diff = np.abs(true_ssqr - (ssqr_draws.var() + ssqr_draws.mean() ** 2)) / true_ssqr
rpmsg = "Fractional difference in 2nd moment from BartVariance.random_posterior() is greater than 2%"
self.assertLess(frac_diff, 0.02, msg=rpmsg)
# make sure gibbs sampler constrains the correct value
ssqr_low = np.percentile(ssqr_draws, 1.0)
ssqr_high = np.percentile(ssqr_draws, 99.0)
rpmsg = "Value of Variance parameter returned by Gibbs sampler is outside of 99% credibility interval."
self.assertGreater(self.true_sigsqr, ssqr_low, msg=rpmsg)
self.assertLess(self.true_sigsqr, ssqr_high, msg=rpmsg)
开发者ID:acbecker,项目名称:BART,代码行数:33,代码来源:test_tree_parameters.py
示例17: stat_info
def stat_info(data):
import matplotlib.pyplot as plt
D = np.ravel(data)
U = np.unique(D)
if len(U)>1:
sep = np.min(U[1:]-U[:-1])
N = min(100, int(np.ceil((np.max(D)-np.min(D))/sep)))
else:
N = 1
mean = np.mean(D)
std = np.std(D)
fig, ax = plt.subplots(2,1,figsize=(21,4))
ax[0].boxplot(D, 0, 'ro', 0);
ax[1].hist(D, N, density=True);
ax[1].axvline(mean, color='r', label='mean')
ax[1].axvline(mean+std, color='r', linestyle='--', label='1$\\sigma$')
ax[1].axvline(mean-std, color='r', linestyle='--', label='1$\\sigma$')
if mean-2*std >= U[0]:
ax[1].axvline(mean-2*std, color='r', linestyle=':', label='2$\\sigma$')
if mean+2*std <= U[-1]:
ax[1].axvline(mean+2*std, color='r', linestyle=':', label='2$\\sigma$')
ax[1].legend();
print("Stats")
print("\tAverage:", mean)
print("\tStandard-deviation:", std)
print("\tMinimum:", np.min(D))
print("\tQ1:", np.percentile(D, 25))
print("\tMedian:", np.percentile(D, 50))
print("\tQ3:", np.percentile(D, 75))
print("\tMaximum:", np.max(D))
开发者ID:scholi,项目名称:pySPM,代码行数:32,代码来源:math.py
示例18: plotKineticsScatter
def plotKineticsScatter(kinArr, outputFileName):
handles = []
colors = ['red', 'green', 'blue', 'magenta']
bases = ['A', 'C', 'G', 'T']
fig, ax = _createFigTemplate(dims=(10, 8))
for i in xrange(4):
baseHits = kinArr[kinArr['base'] == bases[i]]
if baseHits.shape[0] > 0:
# Add a bit of scatter to avoid ugly aliasing in plot due to
# integer quantization
cov = baseHits['coverage'] + 0.25 * \
np.random.randn(baseHits.shape[0])
score = baseHits['score'] + 0.25 * \
np.random.randn(baseHits.shape[0])
pl = ax.scatter(cov, score, c=colors[i], label=bases[
i], lw=0, alpha=0.3, s=12)
handles.append(pl)
ax.set_xlabel('Per-Strand Coverage')
ax.set_ylabel('Modification QV')
plt.legend(handles, bases, loc='upper left')
if kinArr.shape[0] > 0:
ax.set_xlim(0, np.percentile(kinArr['coverage'], 95.0) * 1.4)
ax.set_ylim(0, np.percentile(kinArr['score'], 99.9) * 1.3)
fig.savefig(outputFileName, dpi=72)
plt.close(fig)
开发者ID:vrainish-pacbio,项目名称:pbreports,代码行数:33,代码来源:motifs.py
示例19: BootstrapSc
def BootstrapSc(Method, Data, n=10000):
"""
Bootstrap the calculation of the best fit Sc value n times to get the 95%
confidence interval for the best fit Sc.
Values of n larger than 10000 will take a long time to run.
"""
tmp = []
# need to convert the LH,R,CHT data into a serial 1D array before bootstrapping
if Method == "raw":
for i in range(len(Data[0])):
tmp.append(SerializeData(Data[2][i], Data[4][i], Data[3][i]))
if Method == "patches":
for i in range(len(Data[0])):
tmp.append(SerializeData(Data[2][i], Data[10][i], Data[6][i]))
if Method == "basins":
for i in range(len(Data[0])):
tmp.append(SerializeData(Data[5][i], Data[7][i], Data[6][i]))
ToSample = np.array(tmp)
Scs = []
i = 0
while i < n:
print i
sample = np.random.choice(ToSample, len(ToSample), replace=True)
LH, R, CHT = UnserializeList(sample)
sc, _, _, _, _ = optimize.leastsq(Residuals, 0.8, args=(R, LH, CHT), full_output=True)
if sc < 2.0:
Scs.append(sc[0])
i += 1
# mean upper bound lower bound
return np.mean(Scs), np.percentile(Scs, 97.5) - np.mean(Scs), np.mean(Scs) - np.percentile(Scs, 2.5)
开发者ID:sgrieve,项目名称:ER_Star_Figs,代码行数:35,代码来源:Plot_ER_Data_Figure_8a.py
示例20: evaluate
def evaluate(im, algo, gt_illuminant, i, range_thresh, bin_num, dst_folder):
new_im = None
start_time = timeit.default_timer()
if algo=="grayworld":
new_im = cv2.xphoto.autowbGrayworld(im, 0.95)
elif algo=="nothing":
new_im = im
elif algo=="learning_based":
new_im = cv2.xphoto.autowbLearningBased(im, None, range_thresh, 0.98, bin_num)
elif algo=="GT":
gains = gt_illuminant / min(gt_illuminant)
g1 = float(1.0 / gains[2])
g2 = float(1.0 / gains[1])
g3 = float(1.0 / gains[0])
new_im = cv2.xphoto.applyChannelGains(im, g1, g2, g3)
time = 1000*(timeit.default_timer() - start_time) #time in ms
if len(dst_folder)>0:
if not os.path.exists(dst_folder):
os.makedirs(dst_folder)
im_name = ("%04d_" % i) + algo + ".jpg"
cv2.imwrite(os.path.join(dst_folder, im_name), stretch_to_8bit(new_im))
#recover the illuminant from the color balancing result, assuming the standard model:
estimated_illuminant = [0, 0, 0]
eps = 0.01
estimated_illuminant[2] = np.percentile((im[:,:,0] + eps) / (new_im[:,:,0] + eps), 50)
estimated_illuminant[1] = np.percentile((im[:,:,1] + eps) / (new_im[:,:,1] + eps), 50)
estimated_illuminant[0] = np.percentile((im[:,:,2] + eps) / (new_im[:,:,2] + eps), 50)
res = np.arccos(np.dot(gt_illuminant,estimated_illuminant)/
(np.linalg.norm(gt_illuminant) * np.linalg.norm(estimated_illuminant)))
return (time, (res / np.pi) * 180)
开发者ID:AryaPhilip,项目名称:opencv_contrib,代码行数:33,代码来源:color_balance_benchmark.py
注:本文中的numpy.percentile函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论