本文整理汇总了Python中sklearn.metrics.get_scorer函数的典型用法代码示例。如果您正苦于以下问题:Python get_scorer函数的具体用法?Python get_scorer怎么用?Python get_scorer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_scorer函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_thresholded_scorers
def test_thresholded_scorers():
# Test scorers that take thresholds.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = get_scorer('log_loss')(clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# test with a regressor (no decision_function)
reg = DecisionTreeRegressor()
reg.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(reg, X_test, y_test)
score2 = roc_auc_score(y_test, reg.predict(X_test))
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
assert_raises(ValueError, get_scorer('roc_auc'), clf, X_test, y_test)
开发者ID:AlexanderFabisch,项目名称:scikit-learn,代码行数:35,代码来源:test_score_objects.py
示例2: test_thresholded_scorers
def test_thresholded_scorers():
# Test scorers that take thresholds.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = get_scorer('neg_log_loss')(clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# test with a regressor (no decision_function)
reg = DecisionTreeRegressor()
reg.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(reg, X_test, y_test)
score2 = roc_auc_score(y_test, reg.predict(X_test))
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
with pytest.raises(ValueError, match="multiclass format is not supported"):
get_scorer('roc_auc')(clf, X_test, y_test)
# test error is raised with a single class present in model
# (predict_proba shape is not suitable for binary auc)
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = DecisionTreeClassifier()
clf.fit(X_train, np.zeros_like(y_train))
with pytest.raises(ValueError, match="need classifier with two classes"):
get_scorer('roc_auc')(clf, X_test, y_test)
# for proba scorers
with pytest.raises(ValueError, match="need classifier with two classes"):
get_scorer('neg_log_loss')(clf, X_test, y_test)
开发者ID:srinivasreddy,项目名称:scikit-learn,代码行数:49,代码来源:test_score_objects.py
示例3: clf_bias_var
def clf_bias_var(clf, X, y, n_replicas):
roc_auc_scorer = get_scorer("roc_auc")
# roc_auc_scorer(clf, X_test, y_test)
auc_scores = []
error_scores = []
counts = np.zeros(X.shape[0], dtype = np.float64)
sum_preds = np.zeros(X.shape[0], dtype = np.float64)
for it in xrange(n_replicas):
# generate train sets and test sets
train_indices = np.random.randint(X.shape[0], size = X.shape[0])
# get test sets
in_train = np.unique(train_indices)
mask = np.ones(X.shape[0], dtype = np.bool)
mask[in_train] = False
test_indices = np.arange(X.shape[0])[mask]
clf.fit(X[train_indices], y[train_indices])
auc_scores.append(roc_auc_scorer(clf, X[test_indices], y[test_indices]))
error_scores.append(zero_one_loss(y[test_indices], clf.predict(X[test_indices])))
preds = clf.predict(X)
for index in test_indices:
counts[index] += 1
sum_preds[index] += preds[index]
test_mask = (counts > 0) # indices of samples that have been tested
# print('counts mean: {}'.format(np.mean(counts)))
# print('counts standard derivation: {}'.format(np.std(counts)))
bias, var = bias_var(y[test_mask], sum_preds[test_mask], counts[test_mask], n_replicas)
return auc_scores, error_scores, bias, var
开发者ID:lidalei,项目名称:DataMining,代码行数:35,代码来源:random_forests.py
示例4: _make_scorer
def _make_scorer(scoring):
"""Make scorer.
Parameters
----------
scoring : str | callable
If str, must be compatible with sklearn sklearn's get_scorer.
If callable, function with signature ``score_func(y, y_pred,
**kwargs)``.
Returns
-------
scorer : callable | None
The scorer.
"""
from sklearn.metrics import make_scorer, get_scorer
# If scoring is None (default), the predictions are internally
# generated by estimator.score(). Else, we must first get the
# predictions based on the scorer.
if scoring is None:
return None
elif isinstance(scoring, str):
return get_scorer(scoring)
else:
return make_scorer(scoring)
开发者ID:Eric89GXL,项目名称:mne-python,代码行数:25,代码来源:base.py
示例5: __init__
def __init__(self, estimator, k_features=1,
forward=True, floating=False,
verbose=0, scoring=None,
cv=5, skip_if_stuck=True, n_jobs=1,
pre_dispatch='2*n_jobs',
clone_estimator=True):
self.estimator = estimator
self.k_features = k_features
self.forward = forward
self.floating = floating
self.pre_dispatch = pre_dispatch
self.scoring = scoring
if isinstance(scoring, str):
self.scorer = get_scorer(scoring)
else:
self.scorer = scoring
self.skip_if_stuck = skip_if_stuck
self.cv = cv
self.n_jobs = n_jobs
self.verbose = verbose
self.named_est = {key: value for key, value in
_name_estimators([self.estimator])}
self.clone_estimator = clone_estimator
if self.clone_estimator:
self.est_ = clone(self.estimator)
else:
self.est_ = self.estimator
self.fitted = False
self.subsets_ = {}
self.interrupted_ = False
# don't mess with this unless testing
self._TESTING_INTERRUPT_MODE = False
开发者ID:chrinide,项目名称:mlxtend,代码行数:34,代码来源:sequential_feature_selector.py
示例6: __init__
def __init__(self,
n_jobs=-1,
offset_scale=1.0,
n_buckets=2,
initial_params=None,
minimizer='BFGS',
basinhopping=False,
scoring='accuracy'):
from numpy import array
self.n_jobs = int(n_jobs)
self.offset_scale = float(offset_scale)
self.n_buckets = int(n_buckets)
if initial_params is None:
#self.initial_offsets_ = [-0.5] * self.n_buckets
pass
else:
self.params = array(initial_params)
#assert(len(self.initial_offsets_) == self.n_buckets)
pass
self.minimizer = minimizer
self.basinhopping = basinhopping
from sklearn.metrics import get_scorer
self.scoring = get_scorer(scoring)
pass
开发者ID:WojciechMigda,项目名称:KAGGLE-prudential-life-insurance-assessment,代码行数:26,代码来源:OptimizedOffsetRegressor.py
示例7: __init__
def __init__(self, estimator, min_features=1, max_features=1,
print_progress=True, scoring='accuracy',
cv=5, n_jobs=1,
pre_dispatch='2*n_jobs',
clone_estimator=True):
self.estimator = estimator
self.min_features = min_features
self.max_features = max_features
self.pre_dispatch = pre_dispatch
self.scoring = scoring
self.scorer = get_scorer(scoring)
self.cv = cv
self.print_progress = print_progress
self.n_jobs = n_jobs
self.named_est = {key: value for key, value in
_name_estimators([self.estimator])}
self.clone_estimator = clone_estimator
if self.clone_estimator:
self.est_ = clone(self.estimator)
else:
self.est_ = self.estimator
self.fitted = False
self.interrupted_ = False
# don't mess with this unless testing
self._TESTING_INTERRUPT_MODE = False
开发者ID:rasbt,项目名称:mlxtend,代码行数:26,代码来源:exhaustive_feature_selector.py
示例8: __init__
def __init__(self, estimator, k_features,
forward=True, floating=False,
print_progress=True, scoring='accuracy',
cv=5, skip_if_stuck=True, n_jobs=1,
pre_dispatch='2*n_jobs',
clone_estimator=True):
self.estimator = estimator
self.k_features = k_features
self.forward = forward
self.floating = floating
self.pre_dispatch = pre_dispatch
self.scoring = scoring
self.scorer = get_scorer(scoring)
self.skip_if_stuck = skip_if_stuck
self.cv = cv
self.print_progress = print_progress
self.n_jobs = n_jobs
self.named_est = {key: value for key, value in
_name_estimators([self.estimator])}
self.clone_estimator = clone_estimator
if self.clone_estimator:
self.est_ = clone(self.estimator)
else:
self.est_ = self.estimator
self.fitted = False
开发者ID:varunnathan,项目名称:mlxtend,代码行数:25,代码来源:sequential_feature_selector.py
示例9: test_rfecv
def test_rfecv():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
# All the noisy variable were filtered out
assert_array_equal(X_r, iris.data)
# same in sparse
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Test using a customized loss function
scoring = make_scorer(zero_one_loss, greater_is_better=False)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5, scoring=scoring)
ignore_warnings(rfecv.fit)(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test using a scorer
scorer = get_scorer("accuracy")
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5, scoring=scorer)
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test fix on grid_scores
def test_scorer(estimator, X, y):
return 1.0
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5, scoring=test_scorer)
rfecv.fit(X, y)
assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_)))
# Same as the first two tests, but with step=2
rfecv = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
rfecv.fit(X, y)
assert_equal(len(rfecv.grid_scores_), 6)
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
开发者ID:albertotb,项目名称:scikit-learn,代码行数:59,代码来源:test_rfe.py
示例10: test_classification_scores
def test_classification_scores():
"""Test classification scorers."""
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
for prefix, metric in [('f1', f1_score), ('precision', precision_score),
('recall', recall_score)]:
score1 = get_scorer('%s_weighted' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='weighted')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_macro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='macro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_micro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='micro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=1)
assert_almost_equal(score1, score2)
# test fbeta score that takes an argument
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
score2 = fbeta_score(y_test, clf.predict(X_test), beta=2,
average='weighted')
assert_almost_equal(score1, score2)
# test that custom scorer can be pickled
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score3 = unpickled_scorer(clf, X_test, y_test)
assert_almost_equal(score1, score3)
# smoke test the repr:
repr(fbeta_score)
开发者ID:Anuragch,项目名称:scikit-learn,代码行数:43,代码来源:test_score_objects.py
示例11: test_thresholded_scorers_multilabel_indicator_data
def test_thresholded_scorers_multilabel_indicator_data():
"""Test that the scorer work with multilabel-indicator format
for multilabel and multi-output multi-class classifier
"""
X, y = make_multilabel_classification(return_indicator=True,
allow_unlabeled=False,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Multi-output multi-class predict_proba
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_proba = clf.predict_proba(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multi-output multi-class decision_function
# TODO Is there any yet?
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
clf._predict_proba = clf.predict_proba
clf.predict_proba = None
clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
y_proba = clf.decision_function(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multilabel predict_proba
clf = OneVsRestClassifier(DecisionTreeClassifier())
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
assert_almost_equal(score1, score2)
# Multilabel decision function
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
assert_almost_equal(score1, score2)
开发者ID:amitmse,项目名称:scikit-learn,代码行数:43,代码来源:test_score_objects.py
示例12: test_regression_scorers
def test_regression_scorers():
# Test regression scorers.
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = get_scorer('r2')(clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
开发者ID:AlexanderFabisch,项目名称:scikit-learn,代码行数:10,代码来源:test_score_objects.py
示例13: test_supervised_cluster_scorers
def test_supervised_cluster_scorers():
# Test clustering scorers against gold standard labeling.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
for name in CLUSTER_SCORERS:
score1 = get_scorer(name)(km, X_test, y_test)
score2 = getattr(cluster_module, name)(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
开发者ID:chavan-vjti,项目名称:scikit-learn,代码行数:10,代码来源:test_score_objects.py
示例14: test_unsupervised_scorers
def test_unsupervised_scorers():
# Test clustering scorers against gold standard labeling.
# We don't have any real unsupervised Scorers yet.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
score1 = get_scorer('adjusted_rand_score')(km, X_test, y_test)
score2 = adjusted_rand_score(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
开发者ID:AlexanderFabisch,项目名称:scikit-learn,代码行数:10,代码来源:test_score_objects.py
示例15: __init__
def __init__(self, estimator, k_features=1,
forward=True, floating=False,
verbose=0, scoring=None,
cv=5, n_jobs=1,
pre_dispatch='2*n_jobs',
clone_estimator=True):
self.estimator = estimator
self.k_features = k_features
self.forward = forward
self.floating = floating
self.pre_dispatch = pre_dispatch
# Want to raise meaningful error message if a
# cross-validation generator is inputted
if isinstance(cv, types.GeneratorType):
err_msg = ('Input cv is a generator object, which is not '
'supported. Instead please input an iterable yielding '
'train, test splits. This can usually be done by '
'passing a cross-validation generator to the '
'built-in list function. I.e. cv=list(<cv-generator>)')
raise TypeError(err_msg)
self.cv = cv
self.n_jobs = n_jobs
self.verbose = verbose
self.named_est = {key: value for key, value in
_name_estimators([self.estimator])}
self.clone_estimator = clone_estimator
if self.clone_estimator:
self.est_ = clone(self.estimator)
else:
self.est_ = self.estimator
self.scoring = scoring
if scoring is None:
if self.est_._estimator_type == 'classifier':
scoring = 'accuracy'
elif self.est_._estimator_type == 'regressor':
scoring = 'r2'
else:
raise AttributeError('Estimator must '
'be a Classifier or Regressor.')
if isinstance(scoring, str):
self.scorer = get_scorer(scoring)
else:
self.scorer = scoring
self.fitted = False
self.subsets_ = {}
self.interrupted_ = False
# don't mess with this unless testing
self._TESTING_INTERRUPT_MODE = False
开发者ID:JJLWHarrison,项目名称:mlxtend,代码行数:53,代码来源:sequential_feature_selector.py
示例16: _test_ridge_loo
def _test_ridge_loo(filter_):
# test that can work with both dense or sparse matrices
n_samples = X_diabetes.shape[0]
ret = []
fit_intercept = filter_ == DENSE_FILTER
ridge_gcv = _RidgeGCV(fit_intercept=fit_intercept)
# check best alpha
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
alpha_ = ridge_gcv.alpha_
ret.append(alpha_)
# check that we get same best alpha with custom loss_func
f = ignore_warnings
scoring = make_scorer(mean_squared_error, greater_is_better=False)
ridge_gcv2 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv2.fit)(filter_(X_diabetes), y_diabetes)
assert ridge_gcv2.alpha_ == pytest.approx(alpha_)
# check that we get same best alpha with custom score_func
func = lambda x, y: -mean_squared_error(x, y)
scoring = make_scorer(func)
ridge_gcv3 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv3.fit)(filter_(X_diabetes), y_diabetes)
assert ridge_gcv3.alpha_ == pytest.approx(alpha_)
# check that we get same best alpha with a scorer
scorer = get_scorer('neg_mean_squared_error')
ridge_gcv4 = RidgeCV(fit_intercept=False, scoring=scorer)
ridge_gcv4.fit(filter_(X_diabetes), y_diabetes)
assert ridge_gcv4.alpha_ == pytest.approx(alpha_)
# check that we get same best alpha with sample weights
if filter_ == DENSE_FILTER:
ridge_gcv.fit(filter_(X_diabetes), y_diabetes,
sample_weight=np.ones(n_samples))
assert ridge_gcv.alpha_ == pytest.approx(alpha_)
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
ridge_gcv.fit(filter_(X_diabetes), Y)
Y_pred = ridge_gcv.predict(filter_(X_diabetes))
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge_gcv.predict(filter_(X_diabetes))
assert_allclose(np.vstack((y_pred, y_pred)).T,
Y_pred, rtol=1e-5)
return ret
开发者ID:manhhomienbienthuy,项目名称:scikit-learn,代码行数:52,代码来源:test_ridge.py
示例17: score
def score(self, X, y):
"""Score each estimator/data slice couple.
Parameters
----------
X : array, shape (n_samples, nd_features, n_estimators)
The input samples. For each data slice, the corresponding estimator
scores the prediction: e.g. [estimators[ii].score(X[..., ii], y)
for ii in range(n_estimators)]
The feature dimension can be multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_estimators)
y : array, shape (n_samples,) | (n_samples, n_targets)
The target values.
Returns
-------
score : array, shape (n_samples, n_estimators)
Score for each estimator / data slice couple.
"""
from sklearn.metrics import make_scorer, get_scorer
self._check_Xy(X)
if X.shape[-1] != len(self.estimators_):
raise ValueError('The number of estimators does not match '
'X.shape[-1]')
# If scoring is None (default), the predictions are internally
# generated by estimator.score(). Else, we must first get the
# predictions based on the scorer.
if not isinstance(self.scoring, str):
scoring_ = (make_scorer(self.scoring) if self.scoring is
not None else self.scoring)
elif self.scoring is not None:
scoring_ = get_scorer(self.scoring)
# For predictions/transforms the parallelization is across the data and
# not across the estimators to avoid memory load.
parallel, p_func, n_jobs = parallel_func(_sl_score, self.n_jobs)
n_jobs = min(n_jobs, X.shape[-1])
X_splits = np.array_split(X, n_jobs, axis=-1)
est_splits = np.array_split(self.estimators_, n_jobs)
score = parallel(p_func(est, scoring_, X, y)
for (est, x) in zip(est_splits, X_splits))
if n_jobs > 1:
score = np.concatenate(score, axis=0)
else:
score = score[0]
return score
开发者ID:mvdoc,项目名称:mne-python,代码行数:50,代码来源:search_light.py
示例18: __init__
def __init__(self, estimator, k_features=1,
forward=True, floating=False,
verbose=0, scoring=None,
cv=5, n_jobs=1,
pre_dispatch='2*n_jobs',
clone_estimator=True):
self.estimator = estimator
self.k_features = k_features
self.forward = forward
self.floating = floating
self.pre_dispatch = pre_dispatch
self.cv = cv
self.n_jobs = n_jobs
self.named_est = {key: value for key, value in
_name_estimators([self.estimator])}
self.cv = cv
self.n_jobs = n_jobs
self.verbose = verbose
self.named_est = {key: value for key, value in
_name_estimators([self.estimator])}
self.clone_estimator = clone_estimator
if self.clone_estimator:
self.est_ = clone(self.estimator)
else:
self.est_ = self.estimator
self.scoring = scoring
if scoring is None:
if self.est_._estimator_type == 'classifier':
scoring = 'accuracy'
elif self.est_._estimator_type == 'regressor':
scoring = 'r2'
else:
raise AttributeError('Estimator must '
'be a Classifier or Regressor.')
if isinstance(scoring, str):
self.scorer = get_scorer(scoring)
else:
self.scorer = scoring
self.fitted = False
self.subsets_ = {}
self.interrupted_ = False
# don't mess with this unless testing
self._TESTING_INTERRUPT_MODE = False
开发者ID:NextNight,项目名称:mlxtend,代码行数:48,代码来源:sequential_feature_selector.py
示例19: __init__
def __init__(self, estimator, k_features,
forward=True, floating=False,
print_progress=True, scoring='accuracy',
cv=5, skip_if_stuck=True, n_jobs=1,
pre_dispatch='2*n_jobs'):
self.estimator = clone(estimator)
self.k_features = k_features
self.forward = forward
self.floating = floating
self.pre_dispatch = pre_dispatch
self.scoring = scoring
self.scorer = get_scorer(scoring)
self.skip_if_stuck = skip_if_stuck
self.cv = cv
self.print_progress = print_progress
self.n_jobs = n_jobs
开发者ID:eduedix,项目名称:mlxtend,代码行数:16,代码来源:sequential_feature_selector.py
示例20: test_deprecated_names
def test_deprecated_names():
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
for name in ('mean_absolute_error', 'mean_squared_error',
'median_absolute_error', 'log_loss'):
warning_msg = "Scoring method %s was renamed to" % name
for scorer in (get_scorer(name), SCORERS[name]):
assert_warns_message(DeprecationWarning,
warning_msg,
scorer, clf, X, y)
assert_warns_message(DeprecationWarning,
warning_msg,
cross_val_score, clf, X, y, scoring=name)
开发者ID:chavan-vjti,项目名称:scikit-learn,代码行数:17,代码来源:test_score_objects.py
注:本文中的sklearn.metrics.get_scorer函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论