本文整理汇总了Python中sklearn.discriminant_analysis.LinearDiscriminantAnalysis类的典型用法代码示例。如果您正苦于以下问题:Python LinearDiscriminantAnalysis类的具体用法?Python LinearDiscriminantAnalysis怎么用?Python LinearDiscriminantAnalysis使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了LinearDiscriminantAnalysis类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: computing_cv_accuracy_LDA
def computing_cv_accuracy_LDA(in_path=None, cv_n_fold=10):
def u65(mod_Y):
return 1.6 / mod_Y - 0.6 / mod_Y ** 2
def u80(mod_Y):
return 2.2 / mod_Y - 1.2 / mod_Y ** 2
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
data = export_data_set('iris.data') if in_path is None else pd.read_csv(in_path)
print("-----DATA SET TRAINING---", in_path)
X = data.iloc[:, :-1].values
y = np.array(data.iloc[:, -1].tolist())
kf = KFold(n_splits=cv_n_fold, random_state=None, shuffle=True)
lda = LinearDiscriminantAnalysis(solver="svd", store_covariance=True)
mean_u65, mean_u80 = 0, 0
for idx_train, idx_test in kf.split(y):
print("---k-FOLD-new-executing--")
X_cv_train, y_cv_train = X[idx_train], y[idx_train]
X_cv_test, y_cv_test = X[idx_test], y[idx_test]
lda.fit(X_cv_train, y_cv_train)
n_test = len(idx_test)
sum_u65, sum_u80 = 0, 0
for i, test in enumerate(X_cv_test):
evaluate = lda.predict([test])
print("-----TESTING-----", i)
if y_cv_test[i] in evaluate:
sum_u65 += u65(len(evaluate))
sum_u80 += u80(len(evaluate))
mean_u65 += sum_u65 / n_test
mean_u80 += sum_u80 / n_test
print("--->", mean_u65 / cv_n_fold, mean_u80 / cv_n_fold)
开发者ID:sdestercke,项目名称:classifip,代码行数:32,代码来源:qdatest.py
示例2: performLDA
def performLDA(data_to_fit, y, numComponent=None):
data_to_fit_np_t = np.array(data_to_fit).T
if numComponent is None:
numComponent = len(data_to_fit_np_t)
lda_model = LinearDiscriminantAnalysis(n_components=numComponent)
lda_results = lda_model.fit_transform(data_to_fit_np_t, y)
return lda_model, lda_results
开发者ID:anthonyho,项目名称:arrayAnalysisTools,代码行数:7,代码来源:CN_analysislib.py
示例3: computing_performance_LDA
def computing_performance_LDA(in_path=None, seeds=list([0])):
def u65(mod_Y):
return 1.6 / mod_Y - 0.6 / mod_Y ** 2
def u80(mod_Y):
return 2.2 / mod_Y - 1.2 / mod_Y ** 2
data = export_data_set('iris.data') if in_path is None else pd.read_csv(in_path)
print("-----DATA SET TRAINING---", in_path)
X = data.iloc[:, :-1].values
y = data.iloc[:, -1].tolist()
lda = LinearDiscriminantAnalysis(solver="svd", store_covariance=True)
mean_u65, mean_u80 = 0, 0
n_times = len(seeds)
for k in range(0, n_times):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=seeds[k])
sum_u65, sum_u80 = 0, 0
lda.fit(X_train, y_train)
n, _ = X_test.shape
for i, test in enumerate(X_test):
evaluate = lda.predict([test])
print("-----TESTING-----", i)
if y_test[i] in evaluate:
sum_u65 += u65(len(evaluate))
sum_u80 += u80(len(evaluate))
print("--k-->", k, sum_u65 / n, sum_u80 / n)
mean_u65 += sum_u65 / n
mean_u80 += sum_u80 / n
print("--->", mean_u65 / n_times, mean_u80 / n_times)
开发者ID:sdestercke,项目名称:classifip,代码行数:29,代码来源:qdatest.py
示例4: test
def test(self):
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_components=3)
X_r = pca.fit(X).transform(X)
lda = LinearDiscriminantAnalysis(n_components=3)
X_r2 = lda.fit(X, y).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
ax.scatter(X_r[y == i, 0], X_r[y == i, 1], zs=X[y == i, 2], c=c, label=target_name)
plt.legend()
plt.title('PCA of IRIS dataset')
fig2 = plt.figure()
ax = fig2.add_subplot(111, projection='3d')
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
ax.scatter(X_r2[y == i, 0], X_r2[y == i, 1], zs=X[y == i, 2], c=c, label=target_name)
plt.legend()
plt.title('LDA of IRIS dataset')
plt.show()
开发者ID:aadah,项目名称:ml_proj,代码行数:32,代码来源:visualize.py
示例5: LinearDiscriminantAnalysiscls
class LinearDiscriminantAnalysiscls(object):
"""docstring for ClassName"""
def __init__(self):
self.lda_cls = LinearDiscriminantAnalysis()
self.prediction = None
self.train_x = None
self.train_y = None
def train_model(self, train_x, train_y):
try:
self.train_x = train_x
self.train_y = train_y
self.lda_cls.fit(train_x, train_y)
except:
print(traceback.format_exc())
def predict(self, test_x):
try:
self.test_x = test_x
self.prediction = self.lda_cls.predict(test_x)
return self.prediction
except:
print(traceback.format_exc())
def accuracy_score(self, test_y):
try:
# return r2_score(test_y, self.prediction)
return self.lda_cls.score(self.test_x, test_y)
except:
print(traceback.format_exc())
开发者ID:obaid22192,项目名称:machine-learning,代码行数:30,代码来源:classifiers.py
示例6: main
def main():
"""Read Train/test log."""
df = pd.read_csv("train.csv")
# train/test split using stratified sampling
labels = df['label']
df = df.drop(['label'], 1)
sss = StratifiedShuffleSplit(labels, 10, test_size=0.2, random_state=23)
for train_index, test_index in sss:
x_train, x_test = df.values[train_index], df.values[test_index]
y_train, y_test = labels[train_index], labels[test_index]
# classification algorithm
classification(x_train, y_train, x_test, y_test)
# Predict Test Set
favorite_clf = LinearDiscriminantAnalysis()
favorite_clf.fit(x_train, y_train)
test = pd.read_csv('test.csv')
test_predictions = favorite_clf.predict(test)
print test_predictions
# Format DataFrame
submission = pd.DataFrame(test_predictions, columns=['Label'])
submission.tail()
submission.insert(0, 'ImageId', np.arange(len(test_predictions)) + 1)
submission.reset_index()
submission.tail()
# Export Submission
submission.to_csv('submission.csv', index=False)
submission.tail()
开发者ID:ishmnnit,项目名称:Kaggle,代码行数:32,代码来源:digit.py
示例7: plot_lda
def plot_lda(features, labels):
"""
Input
features: features to get LDA and plot
labels: labels of features
Description
plots the LDA of features
"""
lda = LinearDiscriminantAnalysis(n_components=2)
new_features = lda.fit(chroma[0], chroma[1]).transform(chroma[0])
colors = list("rgbykrgbyk")
markers = list("xxxxxooooo")
plt.figure(len(genres)) # for all together
for i, genre in enumerate(genres):
plt.figure(i) # for one particular genre
plt.scatter(new_features[i*num_songs:(i+1)*num_songs, 0],
new_features[i*num_songs:(i+1)*num_songs, 1],
c=colors[i], marker=markers[i], label=genre)
plt.title(genre)
plt.figure(len(genres)) # for all together
plt.scatter(new_features[i*num_songs:(i+1)*num_songs, 0],
new_features[i*num_songs:(i+1)*num_songs, 1],
c=colors[i], marker=markers[i], label=genre)
plt.legend()
plt.title('LDA')
plt.show()
开发者ID:hyunwooj,项目名称:unm-cs429,代码行数:29,代码来源:run.py
示例8: LinearDiscriminantAnalysisPredictor
class LinearDiscriminantAnalysisPredictor(PredictorBase):
'''
Linear Discriminant Analysis
'''
def __init__(self, animal_type):
self.animal_type = animal_type
self.clf = LinearDiscriminantAnalysis()
def fit(self, X_train, y_train):
self.clf.fit(X_train, y_train)
def predict(self, X_test):
predictions = self.clf.predict_proba(X_test)
predictions_df = self.bundle_predictions(predictions)
return predictions_df
def find_best_params(self):
parameters = {'solver': ['svd', 'lsqr', 'eigen']}
knn = LinearDiscriminantAnalysis()
clf = grid_search.GridSearchCV(knn, parameters)
train_data = get_data('../data/train.csv')
train_data = select_features(train_data, self.animal_type)
X = train_data.drop(['OutcomeType'], axis=1)
y = train_data['OutcomeType']
clf.fit(X, y)
print clf.best_params_
开发者ID:paul-reiners,项目名称:kaggle-shelter-animal-outcomes,代码行数:28,代码来源:linear_descriminant_analysis_predictor.py
示例9: test_lda_orthogonality
def test_lda_orthogonality():
# arrange four classes with their means in a kite-shaped pattern
# the longer distance should be transformed to the first component, and
# the shorter distance to the second component.
means = np.array([[0, 0, -1], [0, 2, 0], [0, -2, 0], [0, 0, 5]])
# We construct perfectly symmetric distributions, so the LDA can estimate
# precise means.
scatter = np.array([[0.1, 0, 0], [-0.1, 0, 0], [0, 0.1, 0], [0, -0.1, 0],
[0, 0, 0.1], [0, 0, -0.1]])
X = (means[:, np.newaxis, :] + scatter[np.newaxis, :, :]).reshape((-1, 3))
y = np.repeat(np.arange(means.shape[0]), scatter.shape[0])
# Fit LDA and transform the means
clf = LinearDiscriminantAnalysis(solver="svd").fit(X, y)
means_transformed = clf.transform(means)
d1 = means_transformed[3] - means_transformed[0]
d2 = means_transformed[2] - means_transformed[1]
d1 /= np.sqrt(np.sum(d1 ** 2))
d2 /= np.sqrt(np.sum(d2 ** 2))
# the transformed within-class covariance should be the identity matrix
assert_almost_equal(np.cov(clf.transform(scatter).T), np.eye(2))
# the means of classes 0 and 3 should lie on the first component
assert_almost_equal(np.abs(np.dot(d1[:2], [1, 0])), 1.0)
# the means of classes 1 and 2 should lie on the second component
assert_almost_equal(np.abs(np.dot(d2[:2], [0, 1])), 1.0)
开发者ID:aniryou,项目名称:scikit-learn,代码行数:31,代码来源:test_discriminant_analysis.py
示例10: test_lda_predict
def test_lda_predict():
# Test LDA classification.
# This checks that LDA implements fit and predict and returns correct
# values for simple toy data.
for test_case in solver_shrinkage:
solver, shrinkage = test_case
clf = LinearDiscriminantAnalysis(solver=solver, shrinkage=shrinkage)
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y, "solver %s" % solver)
# Assert that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y, "solver %s" % solver)
# Test probability estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y, "solver %s" % solver)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8, "solver %s" % solver)
# Primarily test for commit 2f34950 -- "reuse" of priors
y_pred3 = clf.fit(X, y3).predict(X)
# LDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3), "solver %s" % solver)
# Test invalid shrinkages
clf = LinearDiscriminantAnalysis(solver="lsqr", shrinkage=-0.2231)
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="eigen", shrinkage="dummy")
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="svd", shrinkage="auto")
assert_raises(NotImplementedError, clf.fit, X, y)
# Test unknown solver
clf = LinearDiscriminantAnalysis(solver="dummy")
assert_raises(ValueError, clf.fit, X, y)
开发者ID:nelson-liu,项目名称:scikit-learn,代码行数:35,代码来源:test_discriminant_analysis.py
示例11: tuneSpatialFilters
def tuneSpatialFilters(self):
print colors.MAGENTA
num_total_spatial_filters = self.all_spatial_filters.shape[0]
best_mean = 0
best_num = 0
best_score = None
for i in xrange(num_total_spatial_filters):
num_filters_to_try = i+1
print "trying with first",num_filters_to_try,"spatial filters"
trial_X = self.extractFeatures(self.epochs, self.all_spatial_filters[:num_filters_to_try])
lda = LinearDiscriminantAnalysis()
lda = lda.fit(trial_X, self.y)
cross_validation_folds = 10
xval = cross_val_score(lda, trial_X, self.y, cv=cross_validation_folds)
#print xval
this_mean = xval.mean()
print "mean",this_mean
if this_mean > best_mean:
best_mean = this_mean
best_num = num_filters_to_try
best_score = xval
print "-----------------------------"
print "best mean was", best_mean, "with", best_num, "filters used"
print best_score
print colors.ENDC
开发者ID:octopicorn,项目名称:bcikit,代码行数:31,代码来源:CSP.py
示例12: feature_distribute_4_projection
def feature_distribute_4_projection(channel_length=4, projection='pca'):
''' 六个动作的四个特征组合的2D映射分布,共34个通道 '''
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'w']
markers = ['o', '+', 'v', '^', '*', 'x']
sample_len = 100
subjects = ['subject_'+str(i+1) for i in range(2)] # 受试者
# subjects = ['subject_1']
for subject in subjects:
title_pre = subject + '_feature_class_'
channel_num = 34 # 通道
# channel_num = 18
# channel_num = 1
for channel in range(channel_num):
feature_list = ['MAV', 'ZC', 'SSC', 'WL']
# feature_list = ['MAV']
actions = [i + 1 for i in range(6)] # 动作
# actions = [1, 2]
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot()
trains = np.array([])
targets = np.array([], np.int)
for action in actions:
filename = title_pre + str(action)
feature = np.load(
root_path + '/train1_250_100/' + filename + '.npy')
train = feature[:sample_len, channel * channel_length : channel * channel_length+4]
target = np.ones(train.shape[0], np.int) * action
# print train.shape, target.shape, target[0, 0:5]
trains = np.concatenate((trains, train), axis=None)
targets = np.concatenate(
(targets, target), axis=None)
# sys.exit(0)
trains = trains.reshape((-1, 4))
# print trains.shape, targets.shape
if projection == 'pca':
pca = PCA(n_components=2)
X_r = pca.fit(trains).transform(trains)
elif projection == 'lda':
lda = LinearDiscriminantAnalysis(n_components=2)
X_r = lda.fit(trains, targets).transform(trains)
for action in actions:
plt.scatter(X_r[targets == action, 0], X_r[targets == action, 1],
c=colors[action], marker=markers[
action % 1], alpha=0.5, label=action)
plt.legend()
plt.title(subject + '-channel_' + str(channel) + '-' + projection + '-TD4')
# plt.show()
plt.savefig(
'result/figure/distribute4_proj/' + subject + '-channel_'
+ str(channel) + '-' + projection + '-TD4',
dpi=120)
plt.close()
开发者ID:fanzhe328,项目名称:EMG_Exp_noise_simu,代码行数:59,代码来源:feature_analyse.py
示例13: plot_lda_only
def plot_lda_only(filename, title, filename_fig):
df = pd.read_csv(path+filename, names=['x1','x2','y'], header=None)
fig = plt.figure()
fig.suptitle(title, fontsize=20)
columns_ls = []
for column in df.columns:
columns_ls.append(column)
X = df[columns_ls[0:len(columns_ls)-1]].values
Y = df[columns_ls[len(columns_ls)-1]].values
clf_lda = LinearDiscriminantAnalysis()
clf_lda.fit(X, Y)
w = clf_lda.coef_[0]
a = -w[0]/w[1]
xx = np.linspace(-12, 34)
yy = a*xx-clf_lda.intercept_[0]/w[1]
plt.plot(xx,yy, color="blue", label ="LDA decision boundary")
print "Weights W0 %.2f and W1%.2f"%(w[0], w[1])
plt.text(0, 0, "Y=+1", fontsize=12)
plt.text(10, -20, "Y=-1", fontsize=12)
# plt.plot(xx, yy_down, 'k--')
# plt.plot(xx, yy_up, 'k--')
# plt.plot(xx,yy,color="black", label ="svm decision boundary")
plt.xlabel('X1', fontsize=18)
plt.ylabel('X2', fontsize=16)
# fig.savefig(filename_fig)
# model = LogisticRegression()
# model.fit(X, Y)
# w = model.coef_[0]
# a = -w[0]/w[1]
#
# xx = np.linspace(-12, 34)
# yy = a*xx-model.intercept_[0]/w[1]
#
# plt.plot(xx,yy, label ="logistic decision boundary")
#
# clf_lda = LinearDiscriminantAnalysis()
# clf_lda.fit(X, Y)
# w = clf_lda.coef_[0]
# a = -w[0]/w[1]
#
# xx = np.linspace(-12, 34)
# yy = a*xx-clf_lda.intercept_[0]/w[1]
# plt.plot(xx,yy, color="blue", label ="LDA decision boundary")
# plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
# s=80, color='b')
plt.scatter(X[:, 0], X[:, 1], c=Y)
plt.axis('tight')
plt.legend()
plt.show()
开发者ID:vswetha01,项目名称:PythonCode,代码行数:59,代码来源:svm_hw.py
示例14: Train
def Train(enhancedGeneSet, classLabels):
enhancedGeneSet = np.array(enhancedGeneSet);
classLabels = np.array(classLabels);
classifier = LinearDiscriminantAnalysis();
classifier.fit(enhancedGeneSet, classLabels);
#del enhancedGeneSet;
#del classLabels;
return classifier;
开发者ID:JavedZahoor,项目名称:phd-thesis-II-mattia,代码行数:8,代码来源:Supervised_LDA.py
示例15: lda
def lda(X, y, n):
'''
Returns optimal projection of the data
LDA with n components
'''
selector = LinearDiscriminantAnalysis(n_components=n)
selector.fit(X, y)
return selector.transform(X), y
开发者ID:rhngit,项目名称:spam,代码行数:8,代码来源:helper.py
示例16: train_model
def train_model(self):
### Train spectrum data
# form training data and labels
X = np.empty((0, self.freq_cutoff), int)
y = np.empty((0, 1), int)
data_dir = 'clap_data/claps/spectrum/'
for fname in os.listdir(data_dir):
data = np.load("%s%s"% (data_dir, fname))
X = np.append(X, data, axis=0)
y = np.append(y, [1] * data.shape[0])
data_dir = 'clap_data/noclaps/spectrum/'
for fname in os.listdir(data_dir):
data = np.load("%s%s"% (data_dir, fname))
X = np.append(X, data, axis=0)
y = np.append(y, [0] * data.shape[0])
# pca = PCA(n_components=200)
# X_pca = pca.fit_transform(X)
# fit the model
# clf = LogisticRegression(penalty='l1')
clf = LinearDiscriminantAnalysis()
clf.fit(X, y)
preds = clf.predict(X)
# X_new = clf.transform(X)
# clf2 = LinearDiscriminantAnalysis()
# clf2.fit(X_new, y)
# preds2 = clf2.predict(X_new)
# print X.shape, X_pca.shape
print preds
print np.sum(preds), preds.size
# print preds2, np.sum(preds2)
# save model
pickle.dump(clf, open(clap_model_dir + clap_classifier_fname, 'w'))
self.clap_clf = clf
### Train decay data
X = np.empty((0, self.decay_samples/10), int)
data_dir = 'clap_data/claps/decay/'
for fname in os.listdir(data_dir):
if fname.endswith('npy'):
data = np.load("%s%s"% (data_dir, fname))
print data.shape, X.shape
X = np.append(X, data, axis=0)
print X.shape
X_avg = np.mean(X, axis=0)
plt.plot(X_avg)
plt.show()
# Average decay data
np.save('%s%s' % (clap_model_dir, clap_decay_model_fname), X_avg)
开发者ID:mzw4,项目名称:MorningAssistant,代码行数:58,代码来源:clap.py
示例17: plot_lda
def plot_lda(X, y):
colors = ['b', 'r']
lda = LinearDiscriminantAnalysis(n_components=2)
X_r = lda.fit(X, y).transform(X)
plt.figure()
for i, c in enumerate(colors):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], c=c, label=str(i))
plt.legend()
plt.title('PCA')
开发者ID:pvigier,项目名称:sa,代码行数:9,代码来源:util.py
示例18: _get_lda
def _get_lda(self, data, variables):
domain = Domain(attributes=variables, class_vars=data.domain.class_vars)
data = data.transform(domain)
lda = LinearDiscriminantAnalysis(solver='eigen', n_components=2)
lda.fit(data.X, data.Y)
scalings = lda.scalings_[:, :2].T
if scalings.shape == (1, 1):
scalings = np.array([[1.], [0.]])
return scalings
开发者ID:astaric,项目名称:orange3,代码行数:9,代码来源:owlinearprojection.py
示例19: test_raises_value_error_on_same_number_of_classes_and_samples
def test_raises_value_error_on_same_number_of_classes_and_samples(solver):
"""
Tests that if the number of samples equals the number
of classes, a ValueError is raised.
"""
X = np.array([[0.5, 0.6], [0.6, 0.5]])
y = np.array(["a", "b"])
clf = LinearDiscriminantAnalysis(solver=solver)
with pytest.raises(ValueError, match="The number of samples must be more"):
clf.fit(X, y)
开发者ID:aniryou,项目名称:scikit-learn,代码行数:10,代码来源:test_discriminant_analysis.py
示例20: self_tune
def self_tune(self, X, y, verbose=False):
# fix random seed for reproducibility
seed = 5
np.random.seed(seed)
# define k-fold cross validation test harness
kfold = StratifiedKFold(y=y, n_folds=self.tuning_csp_num_folds, shuffle=True, random_state=seed)
# init scores
cvscores = {}
for i in xrange(1,self.num_spatial_filters):
cvscores[i+1] = 0
for i, (train, test) in enumerate(kfold):
# calculate CSP spatial filters
csp = CSP(n_components=self.num_spatial_filters)
csp.fit(X[train], y[train])
# try all filters, from the given num down to 2
# (1 is too often found to be overfitting)
for j in xrange(2,self.num_spatial_filters):
num_filters_to_try = j
# calculate spatial filters
csp.n_components = num_filters_to_try
# apply CSP filters to train data
tuning_train_LDA_features = csp.transform(X[train])
np.nan_to_num(tuning_train_LDA_features)
check_X_y(tuning_train_LDA_features, y[train])
# apply CSP filters to test data
tuning_test_LDA_features = csp.transform(X[test])
np.nan_to_num(tuning_test_LDA_features)
check_X_y(tuning_test_LDA_features, y[test])
# train LDA
lda = LinearDiscriminantAnalysis()
prediction_score = lda.fit(tuning_train_LDA_features, y[train]).score(tuning_test_LDA_features, y[test])
cvscores[num_filters_to_try] += prediction_score
if verbose:
print "prediction score", prediction_score, "with",num_filters_to_try,"spatial filters"
best_num = max(cvscores, key=cvscores.get)
best_score = cvscores[best_num] / i+1
if verbose:
print "best num filters:", best_num, "(average accuracy ",best_score,")"
print "average scores per filter num:"
for k in cvscores:
print k,":", cvscores[k]/i+1
return [best_num, best_score]
开发者ID:octopicorn,项目名称:bcikit,代码行数:54,代码来源:offline_analysis_grid.py
注:本文中的sklearn.discriminant_analysis.LinearDiscriminantAnalysis类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论