• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python datasets.load_iris函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中sklearn.datasets.load_iris函数的典型用法代码示例。如果您正苦于以下问题:Python load_iris函数的具体用法?Python load_iris怎么用?Python load_iris使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了load_iris函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: test_LabelBinarizer2

    def test_LabelBinarizer2(self):
        arr = np.array(['X', 'Y', 'Z', 'X'])
        s = pdml.ModelSeries(arr)

        lb = s.preprocessing.LabelBinarizer()
        s.fit(lb)

        binarized = s.transform(lb)
        self.assertTrue(isinstance(binarized, pdml.ModelFrame))

        expected = pd.DataFrame({0: [1, 0, 0, 1], 1: [0, 1, 0, 0], 2: [0, 0, 1, 0]})
        self.assert_frame_equal(binarized, expected)

        df = pdml.ModelFrame(datasets.load_iris())
        df.target.fit(lb)
        binarized = df.target.transform(lb)

        expected = pd.DataFrame({0: [1] * 50 + [0] * 100,
                                 1: [0] * 50 + [1] * 50 + [0] * 50,
                                 2: [0] * 100 + [1] * 50})
        self.assert_frame_equal(binarized, expected)

        df = pdml.ModelFrame(datasets.load_iris())
        df.target.fit(lb)
        df.target = df.target.transform(lb)
        self.assertEqual(df.shape, (150, 7))
        self.assert_frame_equal(df.target, expected)
开发者ID:Sandy4321,项目名称:pandas-ml,代码行数:27,代码来源:test_preprocessing.py


示例2: main

def main():
    all_targets = load_iris()['target']
    data_set = load_iris()['data']

    train_set, test_set, targets, targets_test = train_test_split(data_set, all_targets, train_size=0.9)

    targets_class = (transform_target_vars(targets, class_num=0),
                     transform_target_vars(targets, class_num=1),
                     transform_target_vars(targets, class_num=2))

    for n_trees in range(1, 150, 10):
        classifiers = (GradientBoostingClassifier(n_trees=n_trees, max_tree_depth=1, n_features=3),
                       GradientBoostingClassifier(n_trees=n_trees, max_tree_depth=1, n_features=3),
                       GradientBoostingClassifier(n_trees=n_trees, max_tree_depth=1, n_features=3))

        classifiers[0].fit(train_set, targets_class[0])
        classifiers[1].fit(train_set, targets_class[1])
        classifiers[2].fit(train_set, targets_class[2])

        predicts = (classifiers[0].predict(test_set),
                    classifiers[1].predict(test_set),
                    classifiers[2].predict(test_set))

        fin_predict = decision_function(predicts[0], predicts[1], predicts[2])

        print "Number of trees:", n_trees, ":", accuracy_score(targets_test, fin_predict)
开发者ID:antongoy,项目名称:sfera_dm,代码行数:26,代码来源:gradient_boosting.py


示例3: setUp

 def setUp(self):
     self.x = datasets.load_iris().data
     self.y = datasets.load_iris().target
     # test without pretraining
     self.model = dbn([nn.layer(4, linear, dlinear),
                       nn.layer(5, tanh, dtanh),
                       nn.layer(1, linear, dlinear, bias=False)], False)
开发者ID:arider,项目名称:riderml,代码行数:7,代码来源:test_dbn.py


示例4: main

def main():
    data_set = load_iris()['data']
    target_set = load_iris()['target']

    cartTree = CartTree(min_leaf_size=5)

    cartTree.fit(data_set, target_set)

    print cartTree.tree
    print target_set
    print numpy.array([int(round(cartTree.predict([x]))) for x in data_set])
开发者ID:shayakhmetov,项目名称:sfera-DataMining,代码行数:11,代码来源:CART.py


示例5: createDataSet

def createDataSet():
    dataSet = datasets.load_iris()
    iris_X = dataSet.data
    iris_y = dataSet.target
    np.random.seed(1)
    indices = np.random.permutation(len(iris_X))
    iris_X_train = iris_X[indices[:-10]]
    iris_y_train = iris_y[indices[:-10]]
    iris_X_test  = iris_X[indices[-10:]]
    iris_y_test  = iris_y[indices[-10:]]
    dataSet = datasets.load_iris()
    return iris_X_train, iris_y_train, iris_X_test, iris_y_test
开发者ID:LeonKennedy,项目名称:LearningByLanguage,代码行数:12,代码来源:decisionTress.py


示例6: test_load_iris

def test_load_iris():
    res = load_iris()
    assert_equal(res.data.shape, (150, 4))
    assert_equal(res.target.size, 150)
    assert_equal(res.target_names.size, 3)
    assert_true(res.DESCR)

    # test return_X_y option
    X_y_tuple = load_iris(return_X_y=True)
    bunch = load_iris()
    assert_true(isinstance(X_y_tuple, tuple))
    assert_array_equal(X_y_tuple[0], bunch.data)
    assert_array_equal(X_y_tuple[1], bunch.target)
开发者ID:NazBen,项目名称:scikit-learn,代码行数:13,代码来源:test_base.py


示例7: load_iris_data

def load_iris_data() :

    # load the iris dataset from the sklearn module
    iris = datasets.load_iris()

    # extract the elements of the data that are used in this exercise
    return (iris.data, iris.target, iris.target_names)
开发者ID:ruslan-d,项目名称:GA_Homework,代码行数:7,代码来源:hw1.py


示例8: testIris_proba

 def testIris_proba(self):
     random.seed(42)
     iris = datasets.load_iris()
     classifier = skflow.TensorFlowClassifier(n_classes=3)
     classifier.fit(iris.data, iris.target)
     score = log_loss(iris.target, classifier.predict_proba(iris.data))
     self.assertLess(score, 0.8, "Failed with score = {0}".format(score))
开发者ID:hellios78,项目名称:skflow,代码行数:7,代码来源:test_base.py


示例9: testIrisSummaries

 def testIrisSummaries(self):
     random.seed(42)
     iris = datasets.load_iris()
     classifier = skflow.TensorFlowLinearClassifier(n_classes=3)
     classifier.fit(iris.data, iris.target, logdir='/tmp/skflow_tests/')
     score = accuracy_score(classifier.predict(iris.data), iris.target)
     self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
开发者ID:hellios78,项目名称:skflow,代码行数:7,代码来源:test_base.py


示例10: main

def main():

    # http://scikit-learn.org/stable/tutorial/basic/tutorial.html#loading-an-example-dataset
    # "A dataset is a dictionary-like object that holds all the data and some
    # metadata about the data. This data is stored in the .data member, which
    # is a n_samples, n_features array. In the case of supervised problem, one
    # or more response variables are stored in the .target member."

    # Toy datasets

    iris = datasets.load_iris()         # The iris dataset (classification)
    digits = datasets.load_digits()     # The digits dataset (classification)

    #boston = datasets.load_boston()     # The boston house-prices dataset (regression)
    #diabetes = datasets.load_diabetes() # The diabetes dataset (regression)
    #linnerud = datasets.load_linnerud() # The linnerud dataset (multivariate regression)

    print(iris.feature_names)
    print(iris.data)
    print(iris.target_names)
    print(iris.target)

    print(digits.images[0])
    print(digits.target_names)
    print(digits.target)

    plt.imshow(digits.images[0], cmap='gray', interpolation='nearest')
    plt.show()
开发者ID:jeremiedecock,项目名称:snippets,代码行数:28,代码来源:datasets.py


示例11: test_sparse_fit_params

def test_sparse_fit_params():
    iris = load_iris()
    X, y = iris.data, iris.target
    clf = MockClassifier()
    fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
    a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
    assert_array_equal(a, np.ones(3))
开发者ID:AppliedArtificialIntelligence,项目名称:scikit-learn,代码行数:7,代码来源:test_cross_validation.py


示例12: test_score_memmap

def test_score_memmap():
    # Ensure a scalar score of memmap type is accepted
    iris = load_iris()
    X, y = iris.data, iris.target
    clf = MockClassifier()
    tf = tempfile.NamedTemporaryFile(mode='wb', delete=False)
    tf.write(b'Hello world!!!!!')
    tf.close()
    scores = np.memmap(tf.name, dtype=np.float64)
    score = np.memmap(tf.name, shape=(), mode='r', dtype=np.float64)
    try:
        cross_val_score(clf, X, y, scoring=lambda est, X, y: score)
        # non-scalar should still fail
        assert_raises(ValueError, cross_val_score, clf, X, y,
                      scoring=lambda est, X, y: scores)
    finally:
        # Best effort to release the mmap file handles before deleting the
        # backing file under Windows
        scores, score = None, None
        for _ in range(3):
            try:
                os.unlink(tf.name)
                break
            except WindowsError:
                sleep(1.)
开发者ID:YinongLong,项目名称:scikit-learn,代码行数:25,代码来源:test_validation.py


示例13: test_classification_report_multiclass_with_digits

def test_classification_report_multiclass_with_digits():
    """Test performance report with added digits in floating point values"""
    iris = datasets.load_iris()
    y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)

    # print classification report with class names
    expected_report = """\
             precision    recall  f1-score   support

     setosa    0.82609   0.79167   0.80851        24
 versicolor    0.33333   0.09677   0.15000        31
  virginica    0.41860   0.90000   0.57143        20

avg / total    0.51375   0.53333   0.47310        75
"""
    report = classification_report(
        y_true, y_pred, labels=np.arange(len(iris.target_names)),
        target_names=iris.target_names, digits=5)
    assert_equal(report, expected_report)

    # print classification report with label detection
    expected_report = """\
             precision    recall  f1-score   support

          0       0.83      0.79      0.81        24
          1       0.33      0.10      0.15        31
          2       0.42      0.90      0.57        20

avg / total       0.51      0.53      0.47        75
"""
    report = classification_report(y_true, y_pred)
    assert_equal(report, expected_report)
开发者ID:nateyoder,项目名称:scikit-learn,代码行数:32,代码来源:test_classification.py


示例14: test_classification_report_multiclass

def test_classification_report_multiclass():
    """Test performance report"""
    iris = datasets.load_iris()
    y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)

    # print classification report with class names
    expected_report = """\
             precision    recall  f1-score   support

     setosa       0.83      0.79      0.81        24
 versicolor       0.33      0.10      0.15        31
  virginica       0.42      0.90      0.57        20

avg / total       0.51      0.53      0.47        75
"""
    report = classification_report(
        y_true, y_pred, labels=np.arange(len(iris.target_names)),
        target_names=iris.target_names)
    assert_equal(report, expected_report)

    # print classification report with label detection
    expected_report = """\
             precision    recall  f1-score   support

          0       0.83      0.79      0.81        24
          1       0.33      0.10      0.15        31
          2       0.42      0.90      0.57        20

avg / total       0.51      0.53      0.47        75
"""
    report = classification_report(y_true, y_pred)
    assert_equal(report, expected_report)
开发者ID:nateyoder,项目名称:scikit-learn,代码行数:32,代码来源:test_classification.py


示例15: test_classification_report

def test_classification_report():
    """Test performance report"""
    iris = datasets.load_iris()
    y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)

    # print classification report with class names
    expected_report = """\
             precision    recall  f1-score   support

     setosa       0.82      0.92      0.87        25
 versicolor       0.56      0.17      0.26        30
  virginica       0.47      0.90      0.62        20

avg / total       0.62      0.61      0.56        75
"""
    report = classification_report(
        y_true, y_pred, labels=range(len(iris.target_names)),
        target_names=iris.target_names)
    assert_equal(report, expected_report)

    # print classification report with label detection
    expected_report = """\
             precision    recall  f1-score   support

          0       0.82      0.92      0.87        25
          1       0.56      0.17      0.26        30
          2       0.47      0.90      0.62        20

avg / total       0.62      0.61      0.56        75
"""
    report = classification_report(y_true, y_pred)
    assert_equal(report, expected_report)
开发者ID:conradlee,项目名称:scikit-learn,代码行数:32,代码来源:test_metrics.py


示例16: test_check_estimator_clones

def test_check_estimator_clones():
    # check that check_estimator doesn't modify the estimator it receives
    from sklearn.datasets import load_iris
    iris = load_iris()

    for Estimator in [GaussianMixture, LinearRegression,
                      RandomForestClassifier, NMF, SGDClassifier,
                      MiniBatchKMeans]:
        with ignore_warnings(category=FutureWarning):
            # when 'est = SGDClassifier()'
            est = Estimator()
        set_checking_parameters(est)
        set_random_state(est)
        # without fitting
        old_hash = joblib.hash(est)
        check_estimator(est)
        assert_equal(old_hash, joblib.hash(est))

        with ignore_warnings(category=FutureWarning):
            # when 'est = SGDClassifier()'
            est = Estimator()
        set_checking_parameters(est)
        set_random_state(est)
        # with fitting
        est.fit(iris.data + 10, iris.target)
        old_hash = joblib.hash(est)
        check_estimator(est)
        assert_equal(old_hash, joblib.hash(est))
开发者ID:ZIP97,项目名称:scikit-learn,代码行数:28,代码来源:test_estimator_checks.py


示例17: check_classifiers_input_shapes

def check_classifiers_input_shapes(name, Classifier):
    iris = load_iris()
    X, y = iris.data, iris.target
    X, y = shuffle(X, y, random_state=1)
    X = StandardScaler().fit_transform(X)
    # catch deprecation warnings
    with warnings.catch_warnings(record=True):
        classifier = Classifier()
    set_fast_parameters(classifier)
    set_random_state(classifier)
    # fit
    classifier.fit(X, y)
    y_pred = classifier.predict(X)

    set_random_state(classifier)
    # Check that when a 2D y is given, a DataConversionWarning is
    # raised
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter("always", DataConversionWarning)
        warnings.simplefilter("ignore", RuntimeWarning)
        classifier.fit(X, y[:, np.newaxis])
    msg = "expected 1 DataConversionWarning, got: %s" % (
        ", ".join([str(w_x) for w_x in w]))
    assert_equal(len(w), 1, msg)
    assert_array_equal(y_pred, classifier.predict(X))
开发者ID:AlexMarshall011,项目名称:scikit-learn,代码行数:25,代码来源:estimator_checks.py


示例18: test_svm

def test_svm():
iris = load_iris()
X, Y = zip(*[(x,y) for x,y in zip(iris.data,iris.target) if y in [0, 1]])  #Select 0, 1 data.
svm = SVM(C=1.0, kernel='rbf')
svm.fit(X, Y)
    
    svm.assert_almost_equal(svm.cost, 2.4034163345438264, sv4)
开发者ID:homk,项目名称:watakit,代码行数:7,代码来源:test_svm.py


示例19: test_cross_val_predict_with_method

def test_cross_val_predict_with_method():
    iris = load_iris()
    X, y = iris.data, iris.target
    X, y = shuffle(X, y, random_state=0)
    classes = len(set(y))

    kfold = KFold(len(iris.target))

    methods = ['decision_function', 'predict_proba', 'predict_log_proba']
    for method in methods:
        est = LogisticRegression()

        predictions = cross_val_predict(est, X, y, method=method)
        assert_equal(len(predictions), len(y))

        expected_predictions = np.zeros([len(y), classes])
        func = getattr(est, method)

        # Naive loop (should be same as cross_val_predict):
        for train, test in kfold.split(X, y):
            est.fit(X[train], y[train])
            expected_predictions[test] = func(X[test])

        predictions = cross_val_predict(est, X, y, method=method,
                                        cv=kfold)
        assert_array_almost_equal(expected_predictions, predictions)
开发者ID:447327642,项目名称:scikit-learn,代码行数:26,代码来源:test_validation.py


示例20: test_pipeline_methods_preprocessing_svm

def test_pipeline_methods_preprocessing_svm():
    # Test the various methods of the pipeline (preprocessing + svm).
    iris = load_iris()
    X = iris.data
    y = iris.target
    n_samples = X.shape[0]
    n_classes = len(np.unique(y))
    scaler = StandardScaler()
    pca = RandomizedPCA(n_components=2, whiten=True)
    clf = SVC(probability=True, random_state=0)

    for preprocessing in [scaler, pca]:
        pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
        pipe.fit(X, y)

        # check shapes of various prediction functions
        predict = pipe.predict(X)
        assert_equal(predict.shape, (n_samples,))

        proba = pipe.predict_proba(X)
        assert_equal(proba.shape, (n_samples, n_classes))

        log_proba = pipe.predict_log_proba(X)
        assert_equal(log_proba.shape, (n_samples, n_classes))

        decision_function = pipe.decision_function(X)
        assert_equal(decision_function.shape, (n_samples, n_classes))

        pipe.score(X, y)
开发者ID:Givonaldo,项目名称:scikit-learn,代码行数:29,代码来源:test_pipeline.py



注:本文中的sklearn.datasets.load_iris函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python datasets.load_linnerud函数代码示例发布时间:2022-05-27
下一篇:
Python datasets.load_files函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap