本文整理汇总了Python中numpy.mean函数的典型用法代码示例。如果您正苦于以下问题:Python mean函数的具体用法?Python mean怎么用?Python mean使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了mean函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: figure_2_4
def figure_2_4():
"""Replicate figure 2.4 of Sutton and Barto's book."""
print('Running figure 2.4 simulation ...')
np.random.seed(1234)
epsilons = (0.1, 0)
q_inits = (0, 5)
ars, pos = [], []
for epsilon, q_init in zip(epsilons, q_inits):
ar, po = run_experiment(2000, 1000, epsilon=epsilon, Q_init=q_init,
alpha=0.1)
ars.append(np.mean(ar, 0))
pos.append(np.mean(po, 0))
# plot the results
plt.close('all')
f, (ax1, ax2) = plt.subplots(2)
labels = ('$\epsilon$-greedy', 'optimistic')
for i,label in enumerate(labels):
ax1.plot(ars[i].T, label=label)
ax2.plot(pos[i].T, label=label)
ax1.legend(loc='lower right')
ax1.set_ylabel('Average reward')
ax1.set_xlim(xmin=-10)
ax2.legend(loc='lower right')
ax2.set_xlabel('Plays')
ax2.set_ylabel('% Optimal action')
ax2.set_xlim(xmin=-20)
plt.savefig('fig_2_4.pdf')
plt.show()
开发者ID:kokorotakey,项目名称:bandit,代码行数:29,代码来源:bandit.py
示例2: getIdealWins
def getIdealWins(errors, testErrors, p=0.01):
"""
Figure out whether the ideal error obtained using the test set is an improvement
over model selection using CV.
"""
winsShape = list(errors.shape[1:-1])
winsShape.append(3)
stdWins = numpy.zeros(winsShape, numpy.int)
for i in range(len(sampleSizes)):
for j in range(foldsSet.shape[0]):
s1 = errors[:, i, j, 0]
s2 = testErrors[:, i]
s1Mean = numpy.mean(s1)
s2Mean = numpy.mean(s2)
t, prob = scipy.stats.wilcoxon(s1, s2)
if prob < p:
if s1Mean > s2Mean:
stdWins[i, j, 2] = 1
elif s1Mean < s2Mean:
stdWins[i, j, 0] = 1
else:
print("Test draw samplesize:" + str(sampleSizes[i]) + " folds " + str(foldsSet[j]))
stdWins[i, j, 1] = 1
return stdWins
开发者ID:pierrebo,项目名称:wallhack,代码行数:28,代码来源:ProcessResults.py
示例3: add_noise_evoked
def add_noise_evoked(evoked, noise, snr, tmin=None, tmax=None):
"""Adds noise to evoked object with specified SNR.
SNR is computed in the interval from tmin to tmax.
Parameters
----------
evoked : Evoked object
An instance of evoked with signal
noise : Evoked object
An instance of evoked with noise
snr : float
signal to noise ratio in dB. It corresponds to
10 * log10( var(signal) / var(noise) )
tmin : float
start time before event
tmax : float
end time after event
Returns
-------
evoked_noise : Evoked object
An instance of evoked corrupted by noise
"""
evoked = copy.deepcopy(evoked)
tmask = _time_mask(evoked.times, tmin, tmax)
tmp = 10 * np.log10(np.mean((evoked.data[:, tmask] ** 2).ravel()) /
np.mean((noise.data ** 2).ravel()))
noise.data = 10 ** ((tmp - float(snr)) / 20) * noise.data
evoked.data += noise.data
return evoked
开发者ID:rajul,项目名称:mne-python,代码行数:31,代码来源:evoked.py
示例4: work
def work(self, **kwargs):
self.__dict__.update(kwargs)
self.worked = True
samples = LGMM1(rng=self.rng,
size=(self.n_samples,),
**self.LGMM1_kwargs)
samples = np.sort(samples)
edges = samples[::self.samples_per_bin]
centers = .5 * edges[:-1] + .5 * edges[1:]
print edges
pdf = np.exp(LGMM1_lpdf(centers, **self.LGMM1_kwargs))
dx = edges[1:] - edges[:-1]
y = 1 / dx / len(dx)
if self.show:
plt.scatter(centers, y)
plt.plot(centers, pdf)
plt.show()
err = (pdf - y) ** 2
print np.max(err)
print np.mean(err)
print np.median(err)
if not self.show:
assert np.max(err) < .1
assert np.mean(err) < .01
assert np.median(err) < .01
开发者ID:AshBT,项目名称:hyperopt,代码行数:27,代码来源:test_tpe.py
示例5: Haffine_from_points
def Haffine_from_points(fp, tp):
'''计算仿射变换的单应性矩阵H,使得tp是由fp经过仿射变换得到的'''
if fp.shape != tp.shape:
raise RuntimeError('number of points do not match')
# 对点进行归一化
# 映射起始点
m = numpy.mean(fp[:2], axis=1)
maxstd = numpy.max(numpy.std(fp[:2], axis=1)) + 1e-9
C1 = numpy.diag([1/maxstd, 1/maxstd, 1])
C1[0, 2] = -m[0] / maxstd
C1[1, 2] = -m[1] / maxstd
fp_cond = numpy.dot(C1, fp)
# 映射对应点
m = numpy.mean(tp[:2], axis=1)
maxstd = numpy.max(numpy.std(tp[:2], axis=1)) + 1e-9
C2 = numpy.diag([1/maxstd, 1/maxstd, 1])
C2[0, 2] = -m[0] / maxstd
C2[1, 2] = -m[1] / maxstd
tp_cond = numpy.dot(C2, tp)
# 因为归一化之后点的均值为0,所以平移量为0
A = numpy.concatenate((fp_cond[:2], tp_cond[:2]), axis=0)
U, S, V = numpy.linalg.svd(A.T)
# 创建矩阵B和C
tmp = V[:2].T
B = tmp[:2]
C = tmp[2:4]
tmp2 = numpy.concatenate((numpy.dot(C, numpy.linalg.pinv(B)), numpy.zeros((2, 1))), axis=1)
H = numpy.vstack((tmp2, [0, 0, 1]))
H = numpy.dot(numpy.linalg.inv(C2), numpy.dot(H, C1)) # 反归一化
return H / H[2, 2] # 归一化,然后返回
开发者ID:MarkPrecursor,项目名称:Programming-Computer-Vision-with-python,代码行数:35,代码来源:homography.py
示例6: trainer
def trainer(model, data, epochs, validate_period, model_path, prob_lm=0.1, runid=''):
def valid_loss():
result = dict(lm=[], visual=[])
for item in data.iter_valid_batches():
result['lm'].append(model.lm.loss_test(*model.lm.args(item)))
result['visual'].append(model.visual.loss_test(*model.visual.args(item)))
return result
costs = Counter(dict(cost_v=0.0, N_v=0.0, cost_t=0.0, N_t=0.0))
print "LM: {} parameters".format(count_params(model.lm.params()))
print "Vi: {} parameters".format(count_params(model.visual.params()))
for epoch in range(1,epochs+1):
for _j, item in enumerate(data.iter_train_batches()):
j = _j +1
if random.random() <= prob_lm:
cost_t = model.lm.train(*model.lm.args(item))
costs += Counter(dict(cost_t=cost_t, N_t=1))
else:
cost_v = model.visual.train(*model.visual.args(item))
costs += Counter(dict(cost_v=cost_v, N_v=1))
print epoch, j, j*data.batch_size, "train", \
numpy.divide(costs['cost_v'], costs['N_v']),\
numpy.divide(costs['cost_t'], costs['N_t'])
if j % validate_period == 0:
result = valid_loss()
print epoch, j, 0, "valid", \
numpy.mean(result['visual']),\
numpy.mean(result['lm'])
sys.stdout.flush()
model.save(path='model.r{}.e{}.zip'.format(runid, epoch))
model.save(path='model.zip')
开发者ID:gchrupala,项目名称:reimaginet,代码行数:30,代码来源:lm_visual.py
示例7: sample_every_two_correlation_times
def sample_every_two_correlation_times(energy_data, magnetization_data, correlation_time, no_of_sites):
"""Sample the given data every 2 correlation times and determine value and error."""
magnet_samples = []
energy_samples = []
for t in np.arange(0, len(energy_data), 2 * int(np.ceil(correlation_time))):
magnet_samples.append(magnetization_data[t])
energy_samples.append(energy_data[t])
magnet_samples = np.asarray(magnet_samples)
energy_samples = np.asarray(energy_samples)
abs_magnetization = np.mean(np.absolute(magnet_samples))
abs_magnetization_error = calculate_error(magnet_samples)
print("<m> (<|M|/N>) = {0} +/- {1}".format(abs_magnetization, abs_magnetization_error))
magnetization = np.mean(magnet_samples)
magnetization_error = calculate_error(magnet_samples)
print("<M/N> = {0} +/- {1}".format(magnetization, magnetization_error))
energy = np.mean(energy_samples)
energy_error = calculate_error(energy_samples)
print("<E/N> = {0} +/- {1}".format(energy, energy_error))
magnetization_squared = np.mean((magnet_samples * no_of_sites)**2)
magnetization_squared_error = calculate_error((magnet_samples * no_of_sites)**2)
print("<M^2> = {0} +/- {1}".format(magnetization_squared, magnetization_squared_error))
开发者ID:teunzwart,项目名称:bachelor-project,代码行数:27,代码来源:autocorrelation.py
示例8: modulate_image
def modulate_image(gabor_def,
visuals,
spacials,
position,
min_contrast=0.0,
frequency_data=None,
use_local_rms=False):
(pixels_per_degree, gabor_diameter, xf, yf, gaussian, ramp, grating, g) = frequency_data if isinstance(frequency_data, FREQ_DATA) else load_spacial_data(visuals, spacials)
import time
st = time.time()
top_left_pos = (position[0] - (gabor_diameter / 2.0), position[1] - (gabor_diameter / 2.0))
patch = gabor_def.rms_matrix[top_left_pos[0] : top_left_pos[0] + gabor_diameter, top_left_pos[1] : top_left_pos[1] + gabor_diameter, :]
if use_local_rms:
patch_avg = gabor_def.avg_matrix[top_left_pos[0] : top_left_pos[0] + gabor_diameter, top_left_pos[1] : top_left_pos[1] + gabor_diameter]
R = (patch_avg / 127.0) - 1
R = R / (numpy.max(numpy.abs(R))) / 2.0
rms_measure = numpy.std(R + 0.5) / numpy.mean(R + 0.5)
print rms_measure
if min_contrast > 0:
rms_measure = max(rms_measure, min_contrast)
g = g * (255.0 * rms_measure)
else:
g = g * (255.0 * gabor_def.rms_measure)
g = g - numpy.mean(g)
gabor = numpy.transpose(numpy.tile(g, (3,1,1)), (1,2,0))
print "took {0}".format((time.time() - st) * 1000.0)
return GABOR_DATA._make([top_left_pos, gabor_diameter, gabor_diameter / 2.0, patch, numpy.clip(patch + gabor, 0, 255).astype('uint8')])
开发者ID:kedean,项目名称:gabor_generator,代码行数:32,代码来源:gabor_util.py
示例9: update
def update(self, y):
L = Loss().MSE(self.output, y)
# stopping criteria
self.errors[self.epoch%5] = numpy.mean(L.E**2)**0.5
score = numpy.mean(self.errors)
# stop when error starts to diverge too much
print " " , self.bestScore
self.stop = score/self.bestScore > 1e60
# save the best weights
if score < self.bestScore:
self.bestW = self.W
self.bestScore = score
self.bestEpoch = self.epoch
norm_W = numpy.linalg.norm(self.W)
sys.stdout.write( "\rEpoch %d: RMSE: %2.3f, Norm(W): %2.2f"%(self.epoch, numpy.mean((y-self.output)**2)**0.5, norm_W) )
sys.stdout.flush()
# gradients
grad_outputs = L.dE_dY*(1 - self.output**2)
dE_dK = numpy.dot(self.hidden.reshape(self.n_hidden, 1), grad_outputs.reshape(1, self.n_output))
transfer = numpy.dot(grad_outputs, self.K.T)
# hidden layer
grad_hidden = transfer * (1 - self.hidden**2)
dE_dW = numpy.dot(self.X.T , grad_hidden)
# updating weights
self.K -= 1.2*self.alpha*dE_dK
self.W -= self.alpha*dE_dW
开发者ID:2php,项目名称:DenoisingAutoEncoder,代码行数:35,代码来源:dA.py
示例10: testPdfOfSampleMultiDims
def testPdfOfSampleMultiDims(self):
student = student_t.StudentT(df=[7., 11.], loc=[[5.], [6.]], scale=3.)
self.assertAllEqual([], student.event_shape)
self.assertAllEqual([], self.evaluate(student.event_shape_tensor()))
self.assertAllEqual([2, 2], student.batch_shape)
self.assertAllEqual([2, 2], self.evaluate(student.batch_shape_tensor()))
num = 50000
samples = student.sample(num, seed=123456)
pdfs = student.prob(samples)
sample_vals, pdf_vals = self.evaluate([samples, pdfs])
self.assertEqual(samples.get_shape(), (num, 2, 2))
self.assertEqual(pdfs.get_shape(), (num, 2, 2))
self.assertNear(5., np.mean(sample_vals[:, 0, :]), err=.03)
self.assertNear(6., np.mean(sample_vals[:, 1, :]), err=.03)
self._assertIntegral(sample_vals[:, 0, 0], pdf_vals[:, 0, 0], err=0.02)
self._assertIntegral(sample_vals[:, 0, 1], pdf_vals[:, 0, 1], err=0.02)
self._assertIntegral(sample_vals[:, 1, 0], pdf_vals[:, 1, 0], err=0.02)
self._assertIntegral(sample_vals[:, 1, 1], pdf_vals[:, 1, 1], err=0.02)
if not stats:
return
self.assertNear(
stats.t.var(7., loc=0., scale=3.), # loc d.n. effect var
np.var(sample_vals[:, :, 0]),
err=.4)
self.assertNear(
stats.t.var(11., loc=0., scale=3.), # loc d.n. effect var
np.var(sample_vals[:, :, 1]),
err=.4)
开发者ID:LiuCKind,项目名称:tensorflow,代码行数:28,代码来源:student_t_test.py
示例11: testEpsilon_MOEA_NegativeDTLZ2
def testEpsilon_MOEA_NegativeDTLZ2(self):
random = pyotl.utility.Random(1)
problemGen = lambda: pyotl.problem.real.NegativeDTLZ2(3)
problem = problemGen()
pathProblem = os.path.join(self.pathData, type(problem).__name__.replace('Negative', ''), str(problem.GetNumberOfObjectives()))
crossover = pyotl.crossover.real.SimulatedBinaryCrossover(random, 1, problem.GetBoundary(), 20)
mutation = pyotl.mutation.real.PolynomialMutation(random, 1 / float(len(problem.GetBoundary())), problem.GetBoundary(), 20)
epsilon = pyotl.utility.PyList2Vector_Real([0.06] * problem.GetNumberOfObjectives())
pfList = []
for _ in range(self.repeat):
problem = problemGen()
initial = pyotl.initial.real.BatchUniform(random, problem.GetBoundary(), 100)
optimizer = pyotl.optimizer.couple_couple.real.Epsilon_MOEA(random, problem, initial, crossover, mutation, epsilon)
while optimizer.GetProblem().GetNumberOfEvaluations() < 30000:
optimizer()
pf = pyotl.utility.PyListList2VectorVector_Real(
[list(solution.objective_) for solution in optimizer.GetSolutionSet()])
for objective in pf:
problem.Fix(objective)
pfList.append(pf)
pathCrossover = os.path.join(pathProblem, type(crossover).__name__)
pathOptimizer = os.path.join(pathCrossover, type(optimizer).__name__)
pfTrue = pyotl.utility.PyListList2VectorVector_Real(numpy.loadtxt(os.path.join(pathProblem, 'PF.csv')).tolist())
# GD
indicator = pyotl.indicator.real.DTLZ2GD()
metricList = [indicator(pf) for pf in pfList]
rightList = numpy.loadtxt(os.path.join(pathOptimizer, 'GD.csv')).tolist()
self.assertGreater(scipy.stats.ttest_ind(rightList, metricList)[1], 0.05, [numpy.mean(rightList), numpy.mean(metricList), metricList])
# IGD
indicator = pyotl.indicator.real.InvertedGenerationalDistance(pfTrue)
metricList = [indicator(pf) for pf in pfList]
rightList = numpy.loadtxt(os.path.join(pathOptimizer, 'IGD.csv')).tolist()
self.assertGreater(scipy.stats.ttest_ind(rightList, metricList)[1], 0.05, [numpy.mean(rightList), numpy.mean(metricList), metricList])
开发者ID:O-T-L,项目名称:PyOTL,代码行数:33,代码来源:epsilon_moea.py
示例12: SB_MotifTwo
def SB_MotifTwo(y,binarizeHow='diff'):
"""
Looks at local motifs in a binary symbolization of the time series, which is performed by a
given binarization method
Arguments
---------
y: a nitime time-series object, or numpy vector
"""
# Make the input a row vector of numbers:
y = makeRowVector(vectorize(y))
# Make binarization on incremental differences:
if binarizeHow == 'diff':
yBin = ((np.sign(np.diff(y)))+1.)/2.
else:
raise ValueError(binarizeHow)
# Initialize output dictionary
out = {}
# Where the difference is 0, 1
r0 = yBin==0
r1 = yBin==1
out['u'] = np.mean(r1)
out['d'] = np.mean(r0)
out['h'] = -(out['u']*np.log2(out['u']) + out['d']*np.log2(out['d']))
return out
开发者ID:jamesmccormac,项目名称:hctsa_python,代码行数:34,代码来源:tsStats.py
示例13: EN_CID
def EN_CID(y):
"""
CID measure from Batista, G. E. A. P. A., Keogh, E. J., Tataw, O. M. & de
Souza, V. M. A. CID: an efficient complexity-invariant distance for time
series. Data Min Knowl. Disc. 28, 634-669 (2014).
Arguments
---------
y: a nitime time-series object, or numpy vector
"""
# Make the input a row vector of numbers:
y = makeRowVector(vectorize(y))
# Prepare the output dictionary
out = {}
# Original definition (in Table 2 of paper cited above)
out['CE1'] = np.sqrt(np.mean(np.power(np.diff(y),2))); # sum -> mean to deal with non-equal time-series lengths
# Definition corresponding to the line segment example in Fig. 9 of the paper
# cited above (using Pythagoras's theorum):
out['CE2'] = np.mean(np.sqrt(1 + np.power(np.diff(y),2)));
return out
开发者ID:jamesmccormac,项目名称:hctsa_python,代码行数:27,代码来源:tsStats.py
示例14: softmax_experiment
def softmax_experiment():
"""Run softmax experiment."""
print('Running softmax experiment.')
taus = [0.01, 0.1, 1]
ars, pos = [], []
for tau in taus:
ar, po = run_experiment(2000, 1000, tau=tau, alpha=0.1)
ars.append
ars.append(np.mean(ar, 0))
pos.append(np.mean(po, 0))
# plot the results
plt.close('all')
f, (ax1, ax2) = plt.subplots(2)
for i,tau in enumerate(taus):
ax1.plot(ars[i].T, label='$\\tau$ = %.2f' % tau)
ax2.plot(pos[i].T, label='$\\tau$ = %.2f' % tau)
ax1.legend(loc='lower right')
ax1.set_ylabel('Average reward')
ax1.set_xlim(xmin=-10)
ax2.legend(loc='lower right')
ax2.set_xlabel('Plays')
ax2.set_ylabel('% Optimal action')
ax2.set_xlim(xmin=-20)
plt.savefig('softmax_experiment.pdf')
plt.show()
开发者ID:kokorotakey,项目名称:bandit,代码行数:26,代码来源:bandit.py
示例15: getClass
def getClass(imageWindow, models,z):
hasLabel=False
label=999
for k in models.keys():
m=models[k]
l1=m[0]
l2=m[1]
l3=m[2]
h1=m[3]
h2=m[4]
h3=m[5]
ch1=numpy.mean(imageWindow[:,:,0])
ch2=numpy.mean(imageWindow[:,:,1])
ch3=numpy.mean(imageWindow[:,:,2])
#print "checking if ", ch1, ch2, ch3, " is between ", h1, l1, h2, l2, h3, l3
if(l1<ch1<h1 and l2<ch2<h2 and l3<ch3<h3):
if(not hasLabel):
label=k
print "got label ", z[k]
hasLabel=True
else:
print "error, relabeling as :", z[k]
return 999
if(not hasLabel):
return 999
else:
return label
开发者ID:aplassard,项目名称:Image_Processing,代码行数:29,代码来源:simpleClassify.py
示例16: test_decimate
def test_decimate():
"""Test decimation of digitizer headshapes with too many points."""
# load headshape and convert to meters
hsp_mm = _get_ico_surface(5)['rr'] * 100
hsp_m = hsp_mm / 1000.
# save headshape to a file in mm in temporary directory
tempdir = _TempDir()
sphere_hsp_path = op.join(tempdir, 'test_sphere.txt')
np.savetxt(sphere_hsp_path, hsp_mm)
# read in raw data using spherical hsp, and extract new hsp
with warnings.catch_warnings(record=True) as w:
raw = read_raw_kit(sqd_path, mrk_path, elp_txt_path, sphere_hsp_path)
assert_true(any('more than' in str(ww.message) for ww in w))
# collect headshape from raw (should now be in m)
hsp_dec = np.array([dig['r'] for dig in raw.info['dig']])[8:]
# with 10242 points and _decimate_points set to resolution of 5 mm, hsp_dec
# should be a bit over 5000 points. If not, something is wrong or
# decimation resolution has been purposefully changed
assert_true(len(hsp_dec) > 5000)
# should have similar size, distance from center
dist = np.sqrt(np.sum((hsp_m - np.mean(hsp_m, axis=0))**2, axis=1))
dist_dec = np.sqrt(np.sum((hsp_dec - np.mean(hsp_dec, axis=0))**2, axis=1))
hsp_rad = np.mean(dist)
hsp_dec_rad = np.mean(dist_dec)
assert_almost_equal(hsp_rad, hsp_dec_rad, places=3)
开发者ID:HSMin,项目名称:mne-python,代码行数:29,代码来源:test_kit.py
示例17: summarize_features_mfcc
def summarize_features_mfcc(mfccs, v=False):
"""
Given mfcc matrix, return summary for a window
:param mfccs: NxM matrix
mfcc matrix
:param i_start: int
index for beginning of window
:param i_end: int
index for end of window
:return: 1xL array
feature vector
"""
# Summarize features
features = np.max(mfccs, axis=1)
features = np.append(features, np.mean(mfccs, axis=1))
features = np.append(features, np.std(mfccs, axis=1))
d_mfccs = np.diff(mfccs, axis=1)
features = np.append(features, np.mean(d_mfccs, axis=1))
features = np.append(features, np.std(d_mfccs, axis=1))
d_d_mfccs = np.diff(d_mfccs, axis=1)
features = np.append(features, np.mean(d_d_mfccs, axis=1))
features = np.append(features, np.std(d_d_mfccs, axis=1))
# print np.shape(d_d_mfccs)
# print np.shape(features)
return np.reshape(features, (1, len(features)))
开发者ID:justinsalamon,项目名称:fconsetdetection,代码行数:27,代码来源:KNN.py
示例18: svm_SVR_C
def svm_SVR_C( xM, yV, c_l, graph = True):
"""
SVR is performed iteratively with different C values
until all C in the list are used.
"""
r2_l, sd_l = [], []
for C in c_l:
print('sklearn.svm.SVR(C={})'.format( C))
clf = svm.SVR( C = C)
clf.fit( xM, yV.A1)
yV_pred = clf.predict(xM)
r2, sd = regress_show( yV, np.mat( yV_pred).T, graph = graph)
for X, x in [[r2_l, r2], [sd_l, sd]]:
X.append( x)
print('average r2, sd are', np.mean( r2_l), np.mean( sd_l))
if graph:
pdw = pd.DataFrame( { 'log10(C)': np.log10(c_l), 'r2': r2_l, 'sd': sd_l})
pdw.plot( x = 'log10(C)')
return r2_l, sd_l
开发者ID:jskDr,项目名称:jamespy_py3,代码行数:25,代码来源:jutil.py
示例19: run_svm_evaluation
def run_svm_evaluation(self, svmtype, inputdata, outputdata, k):
""" Run SVM on training data to evaluate classifier. Return f1scores, gamma and C"""
if svmtype == 'rbf':
# Parameter grid
param_grid = [
{'C': np.logspace(1,5,5), 'gamma': np.logspace(-3,0,5), 'kernel': ['rbf']}
]
if svmtype == 'ln':
param_grid =[ {'C': np.logspace(1,5,5)}]
score_func = metrics.f1_score
# Cross validation
cv = cross_validation.KFold(inputdata.shape[0], n_folds=k, indices=True,shuffle=True)
f1_scores = []
for traincv, testcv in cv:
# TODO: multithreading of cross validation.
(f1_score, gamma1, c) = self.do_cross_validation(param_grid, svmtype, score_func, inputdata[traincv], outputdata[traincv], inputdata[testcv], outputdata[testcv])
f1_scores.append(f1_score)
print "score average: %s" + str(np.mean(f1_scores))
print f1_scores
average_score =np.mean(f1_scores)
tuples = (average_score, f1_scores)
return (tuples, gamma1, c)
开发者ID:sagieske,项目名称:scriptie,代码行数:30,代码来源:start_svm.py
示例20: run_epoch
def run_epoch(self, session, input_data, input_labels,
shuffle=True, verbose=True):
orig_X, orig_y = input_data, input_labels
dp = self.config.dropout
# We're interested in keeping track of the loss and accuracy during training
total_loss = []
total_correct_examples = 0
total_processed_examples = 0
total_steps = len(orig_X) / self.config.batch_size
for step, (x, y) in enumerate(
data_iterator(orig_X, orig_y, batch_size=self.config.batch_size,
label_size=self.config.label_size, shuffle=shuffle)):
feed = self.create_feed_dict(input_batch=x, dropout=dp, label_batch=y)
loss, total_correct, _ = session.run(
[self.loss, self.correct_predictions, self.train_op],
feed_dict=feed)
total_processed_examples += len(x)
total_correct_examples += total_correct
total_loss.append(loss)
##
if verbose and step % verbose == 0:
sys.stdout.write('\r{} / {} : loss = {}'.format(
step, total_steps, np.mean(total_loss)))
sys.stdout.flush()
if verbose:
sys.stdout.write('\r')
sys.stdout.flush()
return np.mean(total_loss), total_correct_examples / float(total_processed_examples)
开发者ID:tracholar,项目名称:cs224d,代码行数:28,代码来源:q2_NER.py
注:本文中的numpy.mean函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论