• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python data.iris_data函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中mlxtend.data.iris_data函数的典型用法代码示例。如果您正苦于以下问题:Python iris_data函数的具体用法?Python iris_data怎么用?Python iris_data使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了iris_data函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: test_scoring

def test_scoring():
    X, y = iris_data()
    clf1 = LogisticRegression(random_state=1)
    clf2 = DecisionTreeClassifier(random_state=1)

    X_train, X_test, y_train, y_test = \
        train_test_split(X, y, test_size=0.25,
                         random_state=123)

    score1 = clf1.fit(X_train, y_train).score(X_test, y_test)
    score2 = clf2.fit(X_train, y_train).score(X_test, y_test)

    assert round(score1, 2) == 0.97
    assert round(score2, 2) == 0.95

    t, p = paired_ttest_5x2cv(estimator1=clf1,
                              estimator2=clf2,
                              X=X, y=y,
                              scoring='accuracy',
                              random_seed=1)

    assert round(t, 3) == -1.539, t
    assert round(p, 3) == 0.184, p

    t, p = paired_ttest_5x2cv(estimator1=clf1,
                              estimator2=clf2,
                              X=X, y=y,
                              scoring='f1_macro',
                              random_seed=1)

    assert round(t, 3) == -1.510, t
    assert round(p, 3) == 0.191, p
开发者ID:JJLWHarrison,项目名称:mlxtend,代码行数:32,代码来源:test_paired_ttest_5x2cv.py


示例2: test_scoring

def test_scoring():
    X, y = iris_data()
    clf1 = LogisticRegression(random_state=1,
                              solver='liblinear',
                              multi_class='ovr')
    clf2 = DecisionTreeClassifier(random_state=1)

    X_train, X_test, y_train, y_test = \
        train_test_split(X, y, test_size=0.5,
                         random_state=123)

    score1 = clf1.fit(X_train, y_train).score(X_test, y_test)
    score2 = clf2.fit(X_train, y_train).score(X_test, y_test)

    assert round(score1, 2) == 0.96, round(score1, 2)
    assert round(score2, 2) == 0.91, round(score2, 2)

    t, p = paired_ttest_kfold_cv(estimator1=clf1,
                                 estimator2=clf2,
                                 X=X, y=y,
                                 scoring='accuracy',
                                 random_seed=1)

    assert round(t, 3) == -1.861, t
    assert round(p, 3) == 0.096, p

    t, p = paired_ttest_kfold_cv(estimator1=clf1,
                                 estimator2=clf2,
                                 X=X, y=y,
                                 scoring='recall_micro',
                                 random_seed=1)

    assert round(t, 3) == -1.861, t
    assert round(p, 3) == 0.096, p
开发者ID:rasbt,项目名称:mlxtend,代码行数:34,代码来源:test_paired_ttest_kfold.py


示例3: test_iris_data_uci

def test_iris_data_uci():
    tmp = np.genfromtxt(fname=DATA_PATH, delimiter=',')
    original_uci_data_x, original_uci_data_y = tmp[:, :-1], tmp[:, -1]
    original_uci_data_y = original_uci_data_y.astype(int)
    iris_x, iris_y = iris_data()
    assert_array_equal(original_uci_data_x, iris_x)
    assert_array_equal(original_uci_data_y, iris_y)
开发者ID:rasbt,项目名称:mlxtend,代码行数:7,代码来源:test_iris.py


示例4: test_not_fitted

def test_not_fitted():
    np.random.seed(123)
    meta = LogisticRegression(multi_class='ovr', solver='liblinear')
    clf1 = RandomForestClassifier(n_estimators=10)
    clf2 = GaussianNB()
    sclf = StackingCVClassifier(classifiers=[clf1, clf2],
                                use_probas=True,
                                meta_classifier=meta, shuffle=False)

    X, y = iris_data()
    assert_raises(NotFittedError,
                  "This StackingCVClassifier instance is not fitted yet."
                  " Call 'fit' with appropriate arguments"
                  " before using this method.",
                  sclf.predict,
                  X)

    assert_raises(NotFittedError,
                  "This StackingCVClassifier instance is not fitted yet."
                  " Call 'fit' with appropriate arguments"
                  " before using this method.",
                  sclf.predict_proba,
                  X)

    assert_raises(NotFittedError,
                  "This StackingCVClassifier instance is not fitted yet."
                  " Call 'fit' with appropriate arguments"
                  " before using this method.",
                  sclf.predict_meta_features,
                  X)
开发者ID:rasbt,项目名称:mlxtend,代码行数:30,代码来源:test_stacking_cv_classifier.py


示例5: test_threshold

def test_threshold():

    X, y = iris_data()
    ax, threshold, count = ecdf(x=X[:, 0],
                                x_label='sepal length (cm)',
                                percentile=0.8)
    assert threshold == 6.5
    assert count == 120
开发者ID:JJLWHarrison,项目名称:mlxtend,代码行数:8,代码来源:test_ecdf.py


示例6: test_iris_data_r

def test_iris_data_r():
    tmp = np.genfromtxt(fname=DATA_PATH, delimiter=',')
    original_r_data_x, original_r_data_y = tmp[:, :-1], tmp[:, -1]
    original_r_data_y = original_r_data_y.astype(int)
    original_r_data_x[34] = [4.9, 3.1, 1.5, 0.2]
    original_r_data_x[37] = [4.9, 3.6, 1.4, 0.1]
    iris_x, iris_y = iris_data(version='corrected')
    assert_array_equal(original_r_data_x, iris_x)
开发者ID:rasbt,项目名称:mlxtend,代码行数:8,代码来源:test_iris.py


示例7: test_verbose

def test_verbose():
    np.random.seed(123)
    meta = LogisticRegression(solver='liblinear',
                              multi_class='ovr')
    clf1 = RandomForestClassifier(n_estimators=10)
    clf2 = GaussianNB()
    sclf = StackingClassifier(classifiers=[clf1, clf2],
                              use_probas=True,
                              meta_classifier=meta,
                              verbose=3)
    X, y = iris_data()
    sclf.fit(X, y)
开发者ID:rasbt,项目名称:mlxtend,代码行数:12,代码来源:test_stacking_classifier.py


示例8: test_gridsearch_enumerate_names

def test_gridsearch_enumerate_names():
    np.random.seed(123)
    meta = LogisticRegression(multi_class='ovr', solver='liblinear')
    clf1 = RandomForestClassifier(n_estimators=10)
    clf2 = GaussianNB()
    sclf = StackingCVClassifier(classifiers=[clf1, clf1, clf2],
                                meta_classifier=meta,
                                shuffle=False)

    params = {'meta_classifier__C': [1.0, 100.0],
              'randomforestclassifier-1__n_estimators': [5, 10],
              'randomforestclassifier-2__n_estimators': [5, 20],
              'use_probas': [True, False]}

    grid = GridSearchCV(estimator=sclf, param_grid=params, cv=5, iid=False)
    X, y = iris_data()
    grid = grid.fit(X, y)
开发者ID:rasbt,项目名称:mlxtend,代码行数:17,代码来源:test_stacking_cv_classifier.py


示例9: test_use_features_in_secondary_predict_proba

def test_use_features_in_secondary_predict_proba():
    np.random.seed(123)
    X, y = iris_data()
    meta = LogisticRegression(solver='liblinear',
                              multi_class='ovr',
                              random_state=1)
    clf1 = RandomForestClassifier(n_estimators=10, random_state=1)
    clf2 = GaussianNB()
    sclf = StackingClassifier(classifiers=[clf1, clf2],
                              use_features_in_secondary=True,
                              meta_classifier=meta)

    sclf.fit(X, y)
    idx = [0, 1, 2]
    y_pred = sclf.predict_proba(X[idx])[:, 0]
    expect = np.array([0.916, 0.828, 0.889])
    np.testing.assert_almost_equal(y_pred, expect, 3)
开发者ID:rasbt,项目名称:mlxtend,代码行数:17,代码来源:test_stacking_classifier.py


示例10: test_use_features_in_secondary_sparse_input_predict

def test_use_features_in_secondary_sparse_input_predict():
    np.random.seed(123)
    X, y = iris_data()
    meta = LogisticRegression(solver='liblinear',
                              multi_class='ovr',
                              random_state=1)
    clf1 = RandomForestClassifier(n_estimators=10, random_state=1)
    sclf = StackingClassifier(classifiers=[clf1],
                              use_features_in_secondary=True,
                              meta_classifier=meta)

    scores = cross_val_score(sclf,
                             sparse.csr_matrix(X),
                             y,
                             cv=5,
                             scoring='accuracy')
    scores_mean = (round(scores.mean(), 2))
    assert scores_mean == 0.97, scores_mean
开发者ID:rasbt,项目名称:mlxtend,代码行数:18,代码来源:test_stacking_classifier.py


示例11: test_use_features_in_secondary_predict

def test_use_features_in_secondary_predict():
    np.random.seed(123)
    X, y = iris_data()
    meta = LogisticRegression(solver='liblinear',
                              multi_class='ovr')
    clf1 = RandomForestClassifier(n_estimators=10)
    clf2 = GaussianNB()
    sclf = StackingClassifier(classifiers=[clf1, clf2],
                              use_features_in_secondary=True,
                              meta_classifier=meta)

    scores = cross_val_score(sclf,
                             X,
                             y,
                             cv=5,
                             scoring='accuracy')
    scores_mean = (round(scores.mean(), 2))
    assert scores_mean == 0.95, scores_mean
开发者ID:rasbt,项目名称:mlxtend,代码行数:18,代码来源:test_stacking_classifier.py


示例12: test_01_loss_tree

def test_01_loss_tree():

    X, y = iris_data()
    X_train, X_test, y_train, y_test = train_test_split(X, y,
                                                        test_size=0.3,
                                                        random_state=123,
                                                        shuffle=True,
                                                        stratify=y)

    tree = DecisionTreeClassifier(random_state=123)
    avg_expected_loss, avg_bias, avg_var = bias_variance_decomp(
            tree, X_train, y_train, X_test, y_test,
            loss='0-1_loss',
            random_seed=123)

    assert round(avg_expected_loss, 3) == 0.062
    assert round(avg_bias, 3) == 0.022
    assert round(avg_var, 3) == 0.040
开发者ID:rasbt,项目名称:mlxtend,代码行数:18,代码来源:test_bias_variance_decomp.py


示例13: test_classifier_defaults

def test_classifier_defaults():
    X, y = iris_data()
    clf1 = LogisticRegression(multi_class='ovr',
                              solver='liblinear',
                              random_state=1)
    clf2 = DecisionTreeClassifier(random_state=1)

    X_train, X_test, y_train, y_test = \
        train_test_split(X, y, test_size=0.25,
                         random_state=123)

    score1 = clf1.fit(X_train, y_train).score(X_test, y_test)
    score2 = clf2.fit(X_train, y_train).score(X_test, y_test)

    assert round(score1, 2) == 0.97
    assert round(score2, 2) == 0.95

    t, p = paired_ttest_resampled(estimator1=clf1,
                                  estimator2=clf2,
                                  X=X, y=y,
                                  random_seed=1)

    if Version(sklearn_version) < Version("0.20"):
        assert round(t, 3) == -1.809, t
        assert round(p, 3) == 0.081, p
    else:
        assert round(t, 3) == -1.702, t
        assert round(p, 3) == 0.10, p

    # change maxdepth of decision tree classifier

    clf2 = DecisionTreeClassifier(max_depth=1, random_state=1)

    score3 = clf2.fit(X_train, y_train).score(X_test, y_test)

    assert round(score3, 2) == 0.63

    t, p = paired_ttest_resampled(estimator1=clf1,
                                  estimator2=clf2,
                                  X=X, y=y,
                                  random_seed=1)

    assert round(t, 3) == 39.214, t
    assert round(p, 3) == 0.000, p
开发者ID:rasbt,项目名称:mlxtend,代码行数:44,代码来源:test_paired_ttest_resampled.py


示例14: test_train_size

def test_train_size():
    X, y = iris_data()
    clf1 = LogisticRegression(solver='liblinear', multi_class='ovr')
    clf2 = DecisionTreeClassifier()

    expected_err_msg = ("train_size must be of type int or float. "
                        "Got <class 'NoneType'>.")

    if sys.version_info < (3, 0):
        expected_err_msg = expected_err_msg.replace('<class', '<type')

    assert_raises(ValueError,
                  expected_err_msg,
                  paired_ttest_resampled,
                  clf1,
                  clf2,
                  X,
                  y,
                  test_size=None)
开发者ID:rasbt,项目名称:mlxtend,代码行数:19,代码来源:test_paired_ttest_resampled.py


示例15: test_scoring

def test_scoring():
    X, y = iris_data()
    clf1 = LogisticRegression(multi_class='ovr',
                              solver='liblinear',
                              random_state=1)
    clf2 = DecisionTreeClassifier(random_state=1)

    X_train, X_test, y_train, y_test = \
        train_test_split(X, y, test_size=0.25,
                         random_state=123)

    score1 = clf1.fit(X_train, y_train).score(X_test, y_test)
    score2 = clf2.fit(X_train, y_train).score(X_test, y_test)

    assert round(score1, 2) == 0.97
    assert round(score2, 2) == 0.95

    t, p = paired_ttest_resampled(estimator1=clf1,
                                  estimator2=clf2,
                                  X=X, y=y,
                                  scoring='accuracy',
                                  random_seed=1)

    if Version(sklearn_version) < Version('0.20'):
        assert round(t, 3) == -1.809, t
        assert round(p, 3) == 0.081, p
    else:
        assert round(t, 3) == -1.702, t
        assert round(p, 3) == 0.1, p

    t, p = paired_ttest_resampled(estimator1=clf1,
                                  estimator2=clf2,
                                  X=X, y=y,
                                  scoring='f1_macro',
                                  random_seed=1)

    if Version(sklearn_version) < Version("0.20"):
        assert round(t, 3) == -1.690, t
        assert round(p, 3) == 0.102, p
    else:
        assert round(t, 3) == -1.561, t
        assert round(p, 3) == 0.129, p
开发者ID:rasbt,项目名称:mlxtend,代码行数:42,代码来源:test_paired_ttest_resampled.py


示例16: test_gridsearch

def test_gridsearch():
    np.random.seed(123)
    meta = LogisticRegression(solver='liblinear',
                              multi_class='ovr')
    clf1 = RandomForestClassifier(n_estimators=10)
    clf2 = GaussianNB()
    sclf = StackingClassifier(classifiers=[clf1, clf2],
                              meta_classifier=meta)

    params = {'meta_classifier__C': [1.0, 100.0],
              'randomforestclassifier__n_estimators': [20, 200]}

    grid = GridSearchCV(estimator=sclf, param_grid=params, cv=5, iid=False)
    X, y = iris_data()
    grid.fit(X, y)

    mean_scores = [round(s, 2) for s
                   in grid.cv_results_['mean_test_score']]

    assert mean_scores == [0.95, 0.97, 0.96, 0.96], mean_scores
开发者ID:rasbt,项目名称:mlxtend,代码行数:20,代码来源:test_stacking_classifier.py


示例17: test_classifier_defaults

def test_classifier_defaults():
    X, y = iris_data()
    clf1 = LogisticRegression(random_state=1,
                              multi_class='ovr',
                              solver='liblinear')
    clf2 = DecisionTreeClassifier(random_state=1)

    X_train, X_test, y_train, y_test = \
        train_test_split(X, y, test_size=0.25,
                         random_state=123)

    score1 = clf1.fit(X_train, y_train).score(X_test, y_test)
    score2 = clf2.fit(X_train, y_train).score(X_test, y_test)

    assert round(score1, 2) == 0.97
    assert round(score2, 2) == 0.95

    t, p = paired_ttest_kfold_cv(estimator1=clf1,
                                 estimator2=clf2,
                                 X=X, y=y,
                                 random_seed=1)

    assert round(t, 3) == -1.861, t
    assert round(p, 3) == 0.096, p

    # change maxdepth of decision tree classifier

    clf2 = DecisionTreeClassifier(max_depth=1, random_state=1)

    score3 = clf2.fit(X_train, y_train).score(X_test, y_test)

    assert round(score3, 2) == 0.63

    t, p = paired_ttest_kfold_cv(estimator1=clf1,
                                 estimator2=clf2,
                                 X=X, y=y,
                                 random_seed=1)

    assert round(t, 3) == 13.491, t
    assert round(p, 3) == 0.000, p
开发者ID:rasbt,项目名称:mlxtend,代码行数:40,代码来源:test_paired_ttest_kfold.py


示例18: test_classifier_defaults

def test_classifier_defaults():
    X, y = iris_data()
    clf1 = LogisticRegression(random_state=1,
                              multi_class='ovr',
                              solver='liblinear')
    clf2 = DecisionTreeClassifier(random_state=1)

    X_train, X_test, y_train, y_test = \
        train_test_split(X, y, test_size=0.25,
                         random_state=123)

    score1 = clf1.fit(X_train, y_train).score(X_test, y_test)
    score2 = clf2.fit(X_train, y_train).score(X_test, y_test)

    assert round(score1, 2) == 0.97
    assert round(score2, 2) == 0.95

    f, p = combined_ftest_5x2cv(estimator1=clf1,
                                estimator2=clf2,
                                X=X, y=y,
                                random_seed=1)

    assert round(f, 3) == 1.053, f
    assert round(p, 3) == 0.509, p

    # change maxdepth of decision tree classifier

    clf2 = DecisionTreeClassifier(max_depth=1, random_state=1)

    score3 = clf2.fit(X_train, y_train).score(X_test, y_test)

    assert round(score3, 2) == 0.63

    f, p = combined_ftest_5x2cv(estimator1=clf1,
                                estimator2=clf2,
                                X=X, y=y,
                                random_seed=1)

    assert round(f, 3) == 34.934, f
    assert round(p, 3) == 0.001, p
开发者ID:rasbt,项目名称:mlxtend,代码行数:40,代码来源:test_combined_ftest_5x2cv.py


示例19: test_classifier_defaults

def test_classifier_defaults():
    X, y = iris_data()
    clf1 = LogisticRegression(random_state=1)
    clf2 = DecisionTreeClassifier(random_state=1)

    X_train, X_test, y_train, y_test = \
        train_test_split(X, y, test_size=0.25,
                         random_state=123)

    score1 = clf1.fit(X_train, y_train).score(X_test, y_test)
    score2 = clf2.fit(X_train, y_train).score(X_test, y_test)

    assert round(score1, 2) == 0.97
    assert round(score2, 2) == 0.95

    t, p = paired_ttest_5x2cv(estimator1=clf1,
                              estimator2=clf2,
                              X=X, y=y,
                              random_seed=1)

    assert round(t, 3) == -1.539, t
    assert round(p, 3) == 0.184, p

    # change maxdepth of decision tree classifier

    clf2 = DecisionTreeClassifier(max_depth=1, random_state=1)

    score3 = clf2.fit(X_train, y_train).score(X_test, y_test)

    assert round(score3, 2) == 0.63

    t, p = paired_ttest_5x2cv(estimator1=clf1,
                              estimator2=clf2,
                              X=X, y=y,
                              random_seed=1)

    assert round(t, 3) == 5.386, t
    assert round(p, 3) == 0.003, p
开发者ID:JJLWHarrison,项目名称:mlxtend,代码行数:38,代码来源:test_paired_ttest_5x2cv.py


示例20: test_scoring

def test_scoring():
    X, y = iris_data()
    clf1 = LogisticRegression(random_state=1, solver='liblinear',
                              multi_class='ovr')
    clf2 = DecisionTreeClassifier(random_state=1)

    X_train, X_test, y_train, y_test = \
        train_test_split(X, y, test_size=0.25,
                         random_state=123)

    score1 = clf1.fit(X_train, y_train).score(X_test, y_test)
    score2 = clf2.fit(X_train, y_train).score(X_test, y_test)

    assert round(score1, 2) == 0.97
    assert round(score2, 2) == 0.95

    f, p = combined_ftest_5x2cv(estimator1=clf1,
                                estimator2=clf2,
                                X=X, y=y,
                                scoring='accuracy',
                                random_seed=1)

    assert round(f, 3) == 1.053, f
    assert round(p, 3) == 0.509, p

    f, p = combined_ftest_5x2cv(estimator1=clf1,
                                estimator2=clf2,
                                X=X, y=y,
                                scoring='f1_macro',
                                random_seed=1)

    if Version(sklearn_version) < Version('0.20'):
        assert round(f, 3) == -1.510, f
        assert round(p, 3) == 0.191, p
    else:
        assert round(f, 3) == 1.046, f
        assert round(p, 3) == 0.513, p
开发者ID:rasbt,项目名称:mlxtend,代码行数:37,代码来源:test_combined_ftest_5x2cv.py



注:本文中的mlxtend.data.iris_data函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python evaluate.scoring函数代码示例发布时间:2022-05-27
下一篇:
Python classifier.SoftmaxRegression类代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap