本文整理汇总了Python中sklearn.utils.multiclass.unique_labels函数的典型用法代码示例。如果您正苦于以下问题:Python unique_labels函数的具体用法?Python unique_labels怎么用?Python unique_labels使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了unique_labels函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_unique_labels_mixed_types
def test_unique_labels_mixed_types():
# Mix of multilabel-indicator and multilabel-sequences
mix_multilabel_format = product(EXAMPLES["multilabel-indicator"],
EXAMPLES["multilabel-sequences"])
for y_multilabel, y_multiclass in mix_multilabel_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
# Mix with binary or multiclass and multilabel
mix_clf_format = product(EXAMPLES["multilabel-indicator"] +
EXAMPLES["multilabel-sequences"],
EXAMPLES["multiclass"] +
EXAMPLES["binary"])
for y_multilabel, y_multiclass in mix_clf_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
# Mix string and number input type
assert_raises(ValueError, unique_labels, [[1, 2], [3]],
[["a", "d"]])
assert_raises(ValueError, unique_labels, ["1", 2])
assert_raises(ValueError, unique_labels, [["1", 2], [3]])
assert_raises(ValueError, unique_labels, [["1", "2"], [3]])
assert_array_equal(unique_labels([(2,), (0, 2,)], [(), ()]), [0, 2])
assert_array_equal(unique_labels([("2",), ("0", "2",)], [(), ()]),
["0", "2"])
开发者ID:1TTT9,项目名称:scikit-learn,代码行数:28,代码来源:test_multiclass.py
示例2: test_unique_labels
def test_unique_labels():
# Empty iterable
assert_raises(ValueError, unique_labels)
# Multiclass problem
assert_array_equal(unique_labels(xrange(10)), np.arange(10))
assert_array_equal(unique_labels(np.arange(10)), np.arange(10))
assert_array_equal(unique_labels([4, 0, 2]), np.array([0, 2, 4]))
# Multilabels
assert_array_equal(
assert_warns(DeprecationWarning, unique_labels, [(0, 1, 2), (0,), tuple(), (2, 1)]), np.arange(3)
)
assert_array_equal(assert_warns(DeprecationWarning, unique_labels, [[0, 1, 2], [0], list(), [2, 1]]), np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1], [1, 0, 1], [0, 0, 0]])), np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1], [0, 0, 0]])), np.arange(3))
# Several arrays passed
assert_array_equal(unique_labels([4, 0, 2], xrange(5)), np.arange(5))
assert_array_equal(unique_labels((0, 1, 2), (0,), (2, 1)), np.arange(3))
# Border line case with binary indicator matrix
assert_raises(ValueError, unique_labels, [4, 0, 2], np.ones((5, 5)))
assert_raises(ValueError, unique_labels, np.ones((5, 4)), np.ones((5, 5)))
assert_array_equal(unique_labels(np.ones((4, 5)), np.ones((5, 5))), np.arange(5))
# Some tests with strings input
assert_array_equal(unique_labels(["a", "b", "c"], ["d"]), ["a", "b", "c", "d"])
assert_array_equal(
assert_warns(DeprecationWarning, unique_labels, [["a", "b"], ["c"]], [["d"]]), ["a", "b", "c", "d"]
)
开发者ID:93sam,项目名称:scikit-learn,代码行数:34,代码来源:test_multiclass.py
示例3: test_unique_labels
def test_unique_labels():
# Empty iterable
assert_raises(ValueError, unique_labels)
# Multiclass problem
assert_array_equal(unique_labels(range(10)), np.arange(10))
assert_array_equal(unique_labels(np.arange(10)), np.arange(10))
assert_array_equal(unique_labels([4, 0, 2]), np.array([0, 2, 4]))
# Multilabel indicator
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[1, 0, 1],
[0, 0, 0]])),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[0, 0, 0]])),
np.arange(3))
# Several arrays passed
assert_array_equal(unique_labels([4, 0, 2], range(5)),
np.arange(5))
assert_array_equal(unique_labels((0, 1, 2), (0,), (2, 1)),
np.arange(3))
# Border line case with binary indicator matrix
assert_raises(ValueError, unique_labels, [4, 0, 2], np.ones((5, 5)))
assert_raises(ValueError, unique_labels, np.ones((5, 4)), np.ones((5, 5)))
assert_array_equal(unique_labels(np.ones((4, 5)), np.ones((5, 5))),
np.arange(5))
开发者ID:hmshan,项目名称:scikit-learn,代码行数:30,代码来源:test_multiclass.py
示例4: test_unique_labels_non_specific
def test_unique_labels_non_specific():
"""Test unique_labels with a variety of collected examples"""
# Smoke test for all supported format
for format in ["binary", "multiclass", "multilabel-sequences", "multilabel-indicator"]:
for y in EXAMPLES[format]:
unique_labels(y)
# We don't support those format at the moment
for example in NON_ARRAY_LIKE_EXAMPLES:
assert_raises(ValueError, unique_labels, example)
for y_type in ["unknown", "continuous", "continuous-multioutput", "multiclass-multioutput"]:
for example in EXAMPLES[y_type]:
assert_raises(ValueError, unique_labels, example)
开发者ID:93sam,项目名称:scikit-learn,代码行数:15,代码来源:test_multiclass.py
示例5: run_intent_evaluation
def run_intent_evaluation(config, model_path, component_builder=None):
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.utils.multiclass import unique_labels
# get the metadata config from the package data
test_data = load_data(config['data'])
metadata = Metadata.load(model_path)
interpreter = Interpreter.load(metadata, config, component_builder)
test_y = [e.get("intent") for e in test_data.training_examples]
preds = []
for e in test_data.training_examples:
res = interpreter.parse(e.text)
if res.get('intent'):
preds.append(res['intent'].get('name'))
else:
preds.append(None)
logger.info("Intent Evaluation Results")
logger.info("F1-Score: {}".format(f1_score(test_y, preds, average='weighted')))
logger.info("Precision: {}".format(precision_score(test_y, preds, average='weighted')))
logger.info("Accuracy: {}".format(accuracy_score(test_y, preds)))
logger.info("Classification report: \n{}".format(classification_report(test_y, preds)))
cnf_matrix = confusion_matrix(test_y, preds)
plot_intent_confusion_matrix(cnf_matrix, classes=unique_labels(test_y, preds),
title='Intent Confusion matrix')
plt.show()
return
开发者ID:maruyue,项目名称:rasa_nlu,代码行数:35,代码来源:evaluate.py
示例6: validate
def validate(self, model, test_x, test_y):
pred_test_y = model.predict(test_x)
cr = classification_report(test_y, pred_test_y, output_dict=True)
cm = confusion_matrix(test_y, pred_test_y)
validation_metrics = OrderedDict()
for metric in self.metrics:
v = metric(test_y, pred_test_y)
validation_metrics[metric.name] = v
labs = unique_labels(test_y)
report = "\n"
report += "\tClassificationReport for `{}`\n".format(model.name)
report += "\n"
report += pretty_df("Report", pd.DataFrame(cr).transpose())
report += "\n\n"
report += pretty_table(
"Metric results",
list(validation_metrics.values()),
["Metric"],
validation_metrics.keys()
)
report += "\n\n"
report += pretty_table("Confusion matrix", cm, labs, labs)
logger.info("VClassificationReport: \n{}".format(report))
return ModelInstance.apply_config(
model,
validation_metrics=validation_metrics
)
开发者ID:alexeyche,项目名称:alexeyche-junk,代码行数:33,代码来源:validation.py
示例7: confusion_matrix_instances
def confusion_matrix_instances(y_true, y_pred, labels=None):
y_type, y_true, y_pred = _check_clf_targets(y_true, y_pred)
if y_type not in ("binary", "multiclass"):
raise ValueError("%s is not supported" % y_type)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
n_labels = labels.size
label_to_ind = dict((y, x) for x, y in enumerate(labels))
# convert yt, yp into index
y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
CM = np.zeros((n_labels, n_labels, y_true.shape[0]), dtype=np.bool)
CM[y_true, y_pred, np.arange(y_true.shape[0])] = True
return CM
开发者ID:EdwardBetts,项目名称:twitter-sentiment,代码行数:26,代码来源:evaluation.py
示例8: fit
def fit(self, X, y):
if self.activation is None:
# Useful to quantify the impact of the non-linearity
self._activate = lambda x: x
else:
self._activate = self.activations[self.activation]
rng = check_random_state(self.random_state)
# one-of-K coding for output values
self.classes_ = unique_labels(y)
Y = label_binarize(y, self.classes_)
# set hidden layer parameters randomly
n_features = X.shape[1]
if self.rank is None:
if self.density == 1:
self.weights_ = rng.randn(n_features, self.n_hidden)
else:
self.weights_ = sparse_random_matrix(
self.n_hidden, n_features, density=self.density,
random_state=rng).T
else:
# Low rank weight matrix
self.weights_u_ = rng.randn(n_features, self.rank)
self.weights_v_ = rng.randn(self.rank, self.n_hidden)
self.biases_ = rng.randn(self.n_hidden)
# map the input data through the hidden layer
H = self.transform(X)
# fit the linear model on the hidden layer activation
self.beta_ = np.dot(pinv2(H), Y)
return self
开发者ID:ddofer,项目名称:Kaggle-HUJI-ML,代码行数:33,代码来源:ELM.py
示例9: run_intent_evaluation
def run_intent_evaluation(config, model_path, component_builder=None):
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
# get the metadata config from the package data
test_data = load_data(config['data'])
interpreter = Interpreter.load(model_path, config, component_builder)
test_y = [e.get("intent") for e in test_data.training_examples]
preds = []
for e in test_data.training_examples:
res = interpreter.parse(e.text)
if res.get('intent'):
preds.append(res['intent'].get('name'))
else:
preds.append(None)
log_evaluation_table(test_y, preds)
cnf_matrix = confusion_matrix(test_y, preds)
plot_confusion_matrix(cnf_matrix, classes=unique_labels(test_y, preds),
title='Intent Confusion matrix')
plt.show()
return
开发者ID:DominicBreuker,项目名称:rasa_nlu,代码行数:26,代码来源:evaluate.py
示例10: test_losses
def test_losses():
"""Test loss functions"""
y_true, y_pred, _ = make_prediction(binary=True)
n_samples = y_true.shape[0]
n_classes = np.size(unique_labels(y_true))
# Classification
# --------------
with warnings.catch_warnings(True):
# Throw deprecated warning
assert_equal(zero_one(y_true, y_pred), 13)
assert_almost_equal(zero_one(y_true, y_pred, normalize=True),
13 / float(n_samples), 2)
assert_almost_equal(zero_one_loss(y_true, y_pred),
13 / float(n_samples), 2)
assert_equal(zero_one_loss(y_true, y_pred, normalize=False), 13)
assert_almost_equal(zero_one_loss(y_true, y_true), 0.0, 2)
assert_almost_equal(zero_one_loss(y_true, y_true, normalize=False), 0, 2)
assert_almost_equal(hamming_loss(y_true, y_pred),
2 * 13. / (n_samples * n_classes), 2)
assert_equal(accuracy_score(y_true, y_pred),
1 - zero_one_loss(y_true, y_pred))
assert_equal(accuracy_score(y_true, y_pred, normalize=False),
n_samples - zero_one_loss(y_true, y_pred, normalize=False))
with warnings.catch_warnings(True):
# Throw deprecated warning
assert_equal(zero_one_score(y_true, y_pred),
1 - zero_one_loss(y_true, y_pred))
# Regression
# ----------
assert_almost_equal(mean_squared_error(y_true, y_pred),
12.999 / n_samples, 2)
assert_almost_equal(mean_squared_error(y_true, y_true),
0.00, 2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
assert_almost_equal(mean_absolute_error(y_true, y_pred),
12.999 / n_samples, 2)
assert_almost_equal(mean_absolute_error(y_true, y_true), 0.00, 2)
assert_almost_equal(explained_variance_score(y_true, y_pred), -0.04, 2)
assert_almost_equal(explained_variance_score(y_true, y_true), 1.00, 2)
assert_equal(explained_variance_score([0, 0, 0], [0, 1, 1]), 0.0)
assert_almost_equal(r2_score(y_true, y_pred), -0.04, 2)
assert_almost_equal(r2_score(y_true, y_true), 1.00, 2)
assert_equal(r2_score([0, 0, 0], [0, 0, 0]), 1.0)
assert_equal(r2_score([0, 0, 0], [0, 1, 1]), 0.0)
开发者ID:dannymulligan,项目名称:scikit-learn,代码行数:55,代码来源:test_metrics.py
示例11: fit
def fit(self, X, y):
# Check data
X, y = np.array(X), np.array(y)
X, y = check_X_y(X, y)
# Split to grow cascade and validate
mask = np.random.random(y.shape[0]) < self.validation_fraction
X_tr, X_vl = X[mask], X[~mask]
y_tr, y_vl = y[mask], y[~mask]
self.classes_ = unique_labels(y)
self.layers_, inp_tr, inp_vl = [], X_tr, X_vl
self.scores_ = []
# First layer
forests = [RandomForestClassifier(max_features=1, n_estimators=self.n_estimators, min_samples_split=10, criterion='gini', n_jobs=-1), # Complete random
RandomForestClassifier(max_features=1, n_estimators=self.n_estimators, min_samples_split=10, criterion='gini', n_jobs=-1), # Complete random
RandomForestClassifier(n_estimators=self.n_estimators, n_jobs=-1),
RandomForestClassifier(n_estimators=self.n_estimators, n_jobs=-1)]
_ = [f.fit(inp_tr, y_tr) for f in forests]
p_vl = [f.predict_proba(inp_vl) for f in forests]
labels = [self.classes_[i] for i in np.argmax(np.array(p_vl).mean(axis=0), axis=1)]
score = self.scoring(y_vl, labels)
self.layers_.append(forests)
self.scores_.append(score)
p_tr = [cross_val_predict(f, inp_tr, y_tr, cv=self.cv, method='predict_proba') for f in forests]
# Fit other layers
last_score = score
inp_tr, inp_vl = np.concatenate([X_tr]+p_tr, axis=1), np.concatenate([X_vl]+p_vl, axis=1)
while True: # Grow cascade
forests = [RandomForestClassifier(max_features=1, n_estimators=self.n_estimators, min_samples_split=10, criterion='gini', n_jobs=-1), # Complete random
RandomForestClassifier(max_features=1, n_estimators=self.n_estimators, min_samples_split=10, criterion='gini', n_jobs=-1), # Complete random
RandomForestClassifier(n_estimators=self.n_estimators, n_jobs=-1),
RandomForestClassifier(n_estimators=self.n_estimators, n_jobs=-1)]
_ = [forest.fit(inp_tr, y_tr) for forest in forests] # Fit the forest
p_vl = [forest.predict_proba(inp_vl) for forest in forests]
labels = [self.classes_[i] for i in np.argmax(np.array(p_vl).mean(axis=0), axis=1)]
score = self.scoring(y_vl, labels)
if score - last_score > self.tolerance:
self.layers_.append(forests)
p_tr = [cross_val_predict(f, inp_tr, y_tr, cv=self.cv, method='predict_proba') for f in forests]
inp_tr, inp_vl = np.concatenate([X_tr]+p_tr, axis=1), np.concatenate([X_vl]+p_vl, axis=1)
self.scores_.append(score)
last_score = score
print(self.scores_)
else:
break
# Retrain on entire dataset
inp_ = X
for forests in self.layers_:
_ = [f.fit(inp_, y) for f in forests]
p = [cross_val_predict(f, inp_, y, cv=self.cv, method='predict_proba') for f in forests]
inp_ = np.concatenate([X]+p, axis=1)
return self
开发者ID:sig-ml,项目名称:bleedml,代码行数:55,代码来源:classifiers.py
示例12: fit
def fit(self, X, y):
X, y = check_X_y(X, y)
self.classes_ = unique_labels(y)
self.X_ = DynamicBayesianClassifier._first_col(X)
self.y_ = y
self.size_ = self.X_.size
for i in range(self.X_.size):
if y[i] not in self.dbayesmode_major_.keys():
self.dbayesmode_major_[y[i]] = scalgoutil.DBayesMode(y[i])
self.dbayesmode_major_[y[i]].update(self.X_[i])
self.update_priors()
return self
开发者ID:KeyboardNerd,项目名称:PredictiveServer,代码行数:12,代码来源:scestimator.py
示例13: plot_confusion_matrix
def plot_confusion_matrix(y_true, y_pred, classes,
normalize=False,
title=None,
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Only use the labels that appear in the data
classes = classes[unique_labels(y_true, y_pred)]
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
return ax
开发者ID:allefpablo,项目名称:scikit-learn,代码行数:52,代码来源:plot_confusion_matrix.py
示例14: score
def score(self, X, y, **kwargs):
"""
Generates a 2D array where each row is the count of the
predicted classes and each column is the true class
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
Returns
-------
score_ : float
Global accuracy score
"""
# We're relying on predict to raise NotFitted
y_pred = self.predict(X)
y_type, y_true, y_pred = _check_targets(y, y_pred)
if y_type not in ("binary", "multiclass"):
raise YellowbrickValueError("%s is not supported" % y_type)
indices = unique_labels(y_true, y_pred)
if len(self.classes_) > len(indices):
raise ModelError("y and y_pred contain zero values "
"for one of the specified classes")
elif len(self.classes_) < len(indices):
raise NotImplementedError("filtering classes is "
"currently not supported")
# Create a table of predictions whose rows are the true classes
# and whose columns are the predicted classes; each element
# is the count of predictions for that class that match the true
# value of that class.
self.predictions_ = np.array([
[
(y_pred[y == label_t] == label_p).sum()
for label_p in indices
]
for label_t in indices
])
self.draw()
self.score_ = self.estimator.score(X, y)
return self.score_
开发者ID:DistrictDataLabs,项目名称:yellowbrick,代码行数:52,代码来源:class_prediction_error.py
示例15: plot_story_evaluation
def plot_story_evaluation(test_y, preds, out_file):
"""Plot the results. of story evaluation"""
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
import matplotlib.pyplot as plt
log_evaluation_table(test_y, preds)
cnf_matrix = confusion_matrix(test_y, preds)
plot_confusion_matrix(cnf_matrix, classes=unique_labels(test_y, preds),
title='Action Confusion matrix')
fig = plt.gcf()
fig.set_size_inches(int(20), int(20))
fig.savefig(out_file, bbox_inches='tight')
开发者ID:githubclj,项目名称:rasa_core,代码行数:14,代码来源:evaluate.py
示例16: evaluate
def evaluate(
X_train,
X_test,
y_train,
y_test,
templates_env,
store_metadata,
n_folds,
n_jobs,
paper,
pool,
):
pipeline = Pipeline(
[
('svd', TruncatedSVD(n_components=50)),
('nn', KNeighborsClassifier()),
]
)
logger.info('Training.')
pipeline.fit(X_train, y_train)
logger.info('Predicting %d labels.', X_test.shape[0])
y_predicted = pipeline.predict(X_test)
prfs = precision_recall_fscore_support(y_test, y_predicted)
util.display(
templates_env.get_template('classification_report.rst').render(
argv=' '.join(sys.argv) if not util.inside_ipython() else 'ipython',
paper=paper,
clf=pipeline,
tprfs=zip(unique_labels(y_test, y_predicted), *prfs),
p_avg=np.average(prfs[0], weights=prfs[3]),
r_avg=np.average(prfs[1], weights=prfs[3]),
f_avg=np.average(prfs[2], weights=prfs[3]),
s_sum=np.sum(prfs[3]),
store_metadata=store_metadata,
accuracy=accuracy_score(y_test, y_predicted),
)
)
pd.DataFrame(y_predicted).to_csv('out.csv')
pd.DataFrame(y_test).to_csv('y_test.csv')
开发者ID:dimazest,项目名称:fowler.corpora,代码行数:44,代码来源:main.py
示例17: do_full_svm
def do_full_svm(self):
self.accuracies = [0]*self.iterations
this_round = []
for i in range(self.iterations):
self.clf = LinearSVC()
self.clf.fit(self.x_train_arr[i], self.y_train_arr[i])
svm_prediction = self.clf.predict(self.x_test_arr[i])
this_round.append( metrics.precision_recall_fscore_support(y_true=self.y_test_arr[i], y_pred=svm_prediction) )
self.accuracies[i] = metrics.accuracy_score(self.y_test_arr[i], svm_prediction)
self.labels = unique_labels(self.y_test_arr[i], svm_prediction)
print ".",
self.performance = np.mean(this_round, axis=0)
print ""
开发者ID:jenningsanderson,项目名称:ner-twitter-ml,代码行数:20,代码来源:learner.py
示例18: evaluate_intents
def evaluate_intents(intent_results,
errors_filename,
confmat_filename,
intent_hist_filename,
): # pragma: no cover
"""Creates a confusion matrix and summary statistics for intent predictions.
Log samples which could not be classified correctly and save them to file.
Creates a confidence histogram which is saved to file.
Only considers those examples with a set intent.
Others are filtered out."""
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
import matplotlib.pyplot as plt
# remove empty intent targets
num_examples = len(intent_results)
intent_results = remove_empty_intent_examples(intent_results)
logger.info("Intent Evaluation: Only considering those "
"{} examples that have a defined intent out "
"of {} examples".format(len(intent_results), num_examples))
targets, predictions = _targets_predictions_from(intent_results)
log_evaluation_table(targets, predictions)
# log and save misclassified samples to file for debugging
errors = collect_nlu_errors(intent_results)
if errors:
save_nlu_errors(errors, errors_filename)
cnf_matrix = confusion_matrix(targets, predictions)
labels = unique_labels(targets, predictions)
plot_confusion_matrix(cnf_matrix, classes=labels,
title='Intent Confusion matrix',
out=confmat_filename)
plt.show()
plot_intent_confidences(intent_results,
intent_hist_filename)
plt.show()
开发者ID:shiva16,项目名称:rasa_nlu,代码行数:41,代码来源:evaluate.py
示例19: evaluate_intents
def evaluate_intents(targets, predictions): # pragma: no cover
"""Creates a confusion matrix and summary statistics for intent predictions.
Only considers those examples with a set intent. Others are filtered out.
"""
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
import matplotlib.pyplot as plt
# remove empty intent targets
num_examples = len(targets)
targets, predictions = remove_empty_intent_examples(targets, predictions)
logger.info("Intent Evaluation: Only considering those {} examples that "
"have a defined intent out of {} examples".format(targets.size, num_examples))
log_evaluation_table(targets, predictions)
cnf_matrix = confusion_matrix(targets, predictions)
plot_confusion_matrix(cnf_matrix, classes=unique_labels(targets, predictions),
title='Intent Confusion matrix')
plt.show()
开发者ID:codealphago,项目名称:rasa_nlu,代码行数:21,代码来源:evaluate.py
示例20: test_unique_labels
def test_unique_labels():
# Empty iterable
assert_raises(ValueError, unique_labels)
# Multiclass problem
assert_array_equal(unique_labels(xrange(10)), np.arange(10))
assert_array_equal(unique_labels(np.arange(10)), np.arange(10))
assert_array_equal(unique_labels([4, 0, 2]), np.array([0, 2, 4]))
# Multilabels
assert_array_equal(unique_labels([(0, 1, 2), (0,), tuple(), (2, 1)]),
np.arange(3))
assert_array_equal(unique_labels([[0, 1, 2], [0], list(), [2, 1]]),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[1, 0, 1],
[0, 0, 0]])),
np.arange(3))
# Several arrays passed
assert_array_equal(unique_labels([4, 0, 2], xrange(5)),
np.arange(5))
assert_array_equal(unique_labels((0, 1, 2), (0,), (2, 1)),
np.arange(3))
开发者ID:alfa07,项目名称:scikit-learn,代码行数:24,代码来源:test_multiclass.py
注:本文中的sklearn.utils.multiclass.unique_labels函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论