• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python feature_selection.SequentialFeatureSelector类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中mlxtend.feature_selection.SequentialFeatureSelector的典型用法代码示例。如果您正苦于以下问题:Python SequentialFeatureSelector类的具体用法?Python SequentialFeatureSelector怎么用?Python SequentialFeatureSelector使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了SequentialFeatureSelector类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: get_best_logisitc

def get_best_logisitc(y):
    
  from mlxtend.feature_selection import SequentialFeatureSelector as SFS
  from sklearn.cross_validation import StratifiedKFold
  import pandas as pd
  from sklearn.linear_model import LogisticRegression
  from sklearn.cross_validation import cross_val_score
  
  my_data = pd.read_csv('data/my_data_test.csv', encoding='utf-8')
  
  y = my_data.target
  my_data = my_data.drop('target', axis=1)
  
    
  # To have better CV
  skf = StratifiedKFold(y, n_folds=5, random_state=17, shuffle=False)

  C_params = [0.01 , 1, 10, 50, 70, 100]
  solvers = ['newton-cg', 'lbfgs', 'liblinear', 'sag']

  my_result_list = []
  for C_param in C_params:
      for solver in solvers:
          print "Looking for C : %s and solver : %s" % (C_param, solver)
          model = LogisticRegression(class_weight='balanced', random_state=17, 
                                     solver=solver, C=C_param)
          sfs = SFS(model, 
                    k_features=len(my_data.columns), 
                    forward=True, 
                    floating=False, 
                    scoring='roc_auc',
                    print_progress=False,
                    cv=skf,
                    n_jobs=-1)
          
          sfs = sfs.fit(my_data.values, y.values)

          result_sfs = pd.DataFrame.from_dict(sfs.get_metric_dict()).T
          result_sfs.sort_values('avg_score', ascending=0, inplace=True)
          features_sfs = result_sfs.feature_idx.head(1).tolist()
          select_features_sfs = list(my_data.columns[features_sfs])

          scores = cross_val_score(model, my_data[select_features_sfs], y, cv=skf, scoring='roc_auc')
          my_result_list.append({'C' : C_param,
                               'solver' : solver,
                               'auc' : scores.mean(),
                               'std' : scores.std(),
                               'best_columns' : select_features_sfs,
                               'estimator' : model})

  my_result = pd.DataFrame(my_result_list)
  my_result.sort_values('auc', ascending=0, inplace=True)

  best_features = my_result.best_columns.head(1).values[0]
  best_model = my_result.estimator.head(1).values[0]

  return best_features, best_model
开发者ID:armgilles,项目名称:frateli,代码行数:57,代码来源:utils.py


示例2: test_knn_scoring_metric

def test_knn_scoring_metric():
    iris = load_iris()
    X = iris.data
    y = iris.target
    knn = KNeighborsClassifier(n_neighbors=4)
    sfs5 = SFS(knn,
               k_features=3,
               forward=False,
               floating=True,
               scoring='accuracy',
               cv=4,
               skip_if_stuck=True,
               print_progress=False)
    sfs5 = sfs5.fit(X, y)
    assert round(sfs5.k_score_, 4) == 0.9728

    sfs6 = SFS(knn,
               k_features=3,
               forward=False,
               floating=True,
               scoring='precision',
               cv=4,
               skip_if_stuck=True,
               print_progress=False)
    sfs6 = sfs6.fit(X, y)
    assert round(sfs6.k_score_, 4) == 0.9737
开发者ID:GQiuQi,项目名称:mlxtend,代码行数:26,代码来源:test_sequential_feature_selector.py


示例3: test_knn_cv3

def test_knn_cv3():
    iris = load_iris()
    X = iris.data
    y = iris.target
    knn = KNeighborsClassifier(n_neighbors=4)
    sfs1 = SFS(knn,
               k_features=3,
               forward=True,
               floating=False,
               scoring='accuracy',
               cv=4,
               skip_if_stuck=True,
               print_progress=False)
    sfs1 = sfs1.fit(X, y)
    sfs1.subsets_
    expect = {1: {'avg_score': 0.95299145299145294,
                  'cv_scores': np.array([0.97435897,
                                         0.94871795,
                                         0.88888889,
                                         1.0]),
                  'feature_idx': (3,)},
              2: {'avg_score': 0.95993589743589736,
                  'cv_scores': np.array([0.97435897,
                                         0.94871795,
                                         0.91666667,
                                         1.0]),
                  'feature_idx': (2, 3)},
              3: {'avg_score': 0.97275641025641035,
                  'cv_scores': np.array([0.97435897,
                                         1.0,
                                         0.94444444,
                                         0.97222222]),
                  'feature_idx': (1, 2, 3)}}
    dict_compare_utility(d1=expect, d2=sfs1.subsets_)
开发者ID:GQiuQi,项目名称:mlxtend,代码行数:34,代码来源:test_sequential_feature_selector.py


示例4: test_knn_cv3_groups

def test_knn_cv3_groups():
    iris = load_iris()
    X = iris.data
    y = iris.target
    knn = KNeighborsClassifier(n_neighbors=4)
    sfs1 = SFS(knn,
               k_features=3,
               forward=True,
               floating=False,
               cv=GroupKFold(n_splits=3),
               verbose=0)
    np.random.seed(1630672634)
    groups = np.random.randint(0, 6, size=len(y))
    sfs1 = sfs1.fit(X, y, groups=groups)
    # print(sfs1.subsets_)
    expect = {
        1: {'cv_scores': np.array([0.97916667, 0.93877551, 0.96226415]),
            'feature_idx': (3,),
            'avg_score': 0.9600687759380482},
        2: {'cv_scores': np.array([0.95833333, 0.93877551, 0.98113208]),
            'feature_idx': (1, 3),
            'avg_score': 0.9594136396697044},
        3: {'cv_scores': np.array([0.97916667, 0.95918367, 0.94339623]),
            'feature_idx': (1, 2, 3),
            'avg_score': 0.9605821888503829}}
    dict_compare_utility(d_actual=sfs1.subsets_, d_desired=expect, decimal=3)
开发者ID:rasbt,项目名称:mlxtend,代码行数:26,代码来源:test_sequential_feature_selector.py


示例5: test_knn_cv3

def test_knn_cv3():
    iris = load_iris()
    X = iris.data
    y = iris.target
    knn = KNeighborsClassifier(n_neighbors=4)
    sfs1 = SFS(knn,
               k_features=3,
               forward=True,
               floating=False,
               cv=4,
               verbose=0)
    sfs1 = sfs1.fit(X, y)
    sfs1.subsets_
    expect = {1: {'avg_score': 0.95299145299145294,
                  'cv_scores': np.array([0.97435897,
                                         0.94871795,
                                         0.88888889,
                                         1.0]),
                  'feature_idx': (3,)},
              2: {'avg_score': 0.95993589743589736,
                  'cv_scores': np.array([0.97435897,
                                         0.94871795,
                                         0.91666667,
                                         1.0]),
                  'feature_idx': (2, 3)},
              3: {'avg_score': 0.97275641025641035,
                  'cv_scores': np.array([0.97435897,
                                         1.0,
                                         0.94444444,
                                         0.97222222]),
                  'feature_idx': (1, 2, 3)}}
    dict_compare_utility(d_actual=sfs1.subsets_, d_desired=expect)
开发者ID:rasbt,项目名称:mlxtend,代码行数:32,代码来源:test_sequential_feature_selector.py


示例6: test_run_default

def test_run_default():
    iris = load_iris()
    X = iris.data
    y = iris.target
    knn = KNeighborsClassifier()
    sfs = SFS(estimator=knn,
              verbose=0)
    sfs.fit(X, y)
    assert sfs.k_feature_idx_ == (3,)
开发者ID:rasbt,项目名称:mlxtend,代码行数:9,代码来源:test_sequential_feature_selector.py


示例7: test_fit_params

def test_fit_params():
    iris = load_iris()
    X = iris.data
    y = iris.target
    sample_weight = np.ones(X.shape[0])
    forest = RandomForestClassifier(n_estimators=100, random_state=123)
    sfs = SFS(estimator=forest,
              verbose=0)
    sfs.fit(X, y, sample_weight=sample_weight)
    assert sfs.k_feature_idx_ == (3,)
开发者ID:rasbt,项目名称:mlxtend,代码行数:10,代码来源:test_sequential_feature_selector.py


示例8: test_max_feature_subset_parsimonious

def test_max_feature_subset_parsimonious():
    boston = load_boston()
    X, y = boston.data, boston.target
    lr = LinearRegression()

    sfs = SFS(lr,
              k_features='parsimonious',
              forward=True,
              floating=False,
              cv=10)

    sfs = sfs.fit(X, y)
    assert sfs.k_feature_idx_ == (5, 10, 11, 12)
开发者ID:rasbt,项目名称:mlxtend,代码行数:13,代码来源:test_sequential_feature_selector.py


示例9: test_regression_sbfs

def test_regression_sbfs():
    boston = load_boston()
    X, y = boston.data, boston.target
    lr = LinearRegression()
    sfs_r = SFS(lr,
                k_features=3,
                forward=False,
                floating=True,
                scoring='neg_mean_squared_error',
                cv=10,
                verbose=0)
    sfs_r = sfs_r.fit(X, y)
    assert sfs_r.k_feature_idx_ == (7, 10, 12), sfs_r.k_feature_idx_
开发者ID:rasbt,项目名称:mlxtend,代码行数:13,代码来源:test_sequential_feature_selector.py


示例10: test_knn_option_sfbs

def test_knn_option_sfbs():
    iris = load_iris()
    X = iris.data
    y = iris.target
    knn = KNeighborsClassifier(n_neighbors=4)
    sfs4 = SFS(knn,
               k_features=3,
               forward=False,
               floating=True,
               cv=4,
               verbose=0)
    sfs4 = sfs4.fit(X, y)
    assert sfs4.k_feature_idx_ == (1, 2, 3)
开发者ID:rasbt,项目名称:mlxtend,代码行数:13,代码来源:test_sequential_feature_selector.py


示例11: test_max_feature_subset_best

def test_max_feature_subset_best():
    boston = load_boston()
    X, y = boston.data, boston.target
    lr = LinearRegression()

    sfs = SFS(lr,
              k_features='best',
              forward=True,
              floating=False,
              cv=10)

    sfs = sfs.fit(X, y)
    assert sfs.k_feature_idx_ == (1, 3, 5, 7, 8, 9, 10, 11, 12)
开发者ID:rasbt,项目名称:mlxtend,代码行数:13,代码来源:test_sequential_feature_selector.py


示例12: test_knn_option_sbs_tuplerange_1

def test_knn_option_sbs_tuplerange_1():
    iris = load_iris()
    X = iris.data
    y = iris.target
    knn = KNeighborsClassifier(n_neighbors=3)
    sfs4 = SFS(knn,
               k_features=(1, 3),
               forward=False,
               floating=False,
               cv=4,
               verbose=0)
    sfs4 = sfs4.fit(X, y)
    assert round(sfs4.k_score_, 3) == 0.967, sfs4.k_score_
    assert sfs4.k_feature_idx_ == (0, 2, 3), sfs4.k_feature_idx_
开发者ID:rasbt,项目名称:mlxtend,代码行数:14,代码来源:test_sequential_feature_selector.py


示例13: test_regression_in_range

def test_regression_in_range():
    boston = load_boston()
    X, y = boston.data, boston.target
    lr = LinearRegression()
    sfs_r = SFS(lr,
                k_features=(1, 13),
                forward=True,
                floating=False,
                scoring='neg_mean_squared_error',
                cv=10,
                verbose=0)
    sfs_r = sfs_r.fit(X, y)
    assert len(sfs_r.k_feature_idx_) == 9
    assert round(sfs_r.k_score_, 4) == -31.1537
开发者ID:JJLWHarrison,项目名称:mlxtend,代码行数:14,代码来源:test_sequential_feature_selector.py


示例14: test_regression

def test_regression():
    boston = load_boston()
    X, y = boston.data, boston.target
    lr = LinearRegression()
    sfs_r = SFS(lr,
                k_features=13,
                forward=True,
                floating=False,
                scoring='mean_squared_error',
                cv=10,
                skip_if_stuck=True,
                print_progress=False)
    sfs_r = sfs_r.fit(X, y)
    assert round(sfs_r.k_score_, 4) == -34.7631
开发者ID:GQiuQi,项目名称:mlxtend,代码行数:14,代码来源:test_sequential_feature_selector.py


示例15: test_knn_rbf_groupkfold

def test_knn_rbf_groupkfold():
    nan_roc_auc_scorer = make_scorer(nan_roc_auc_score)
    rng = np.random.RandomState(123)
    iris = load_iris()
    X = iris.data
    # knn = KNeighborsClassifier(n_neighbors=4)
    forest = RandomForestClassifier(n_estimators=100, random_state=123)
    bool_01 = [True if item == 0 else False for item in iris['target']]
    bool_02 = [True if (item == 1 or item == 2) else False for item in
               iris['target']]
    groups = []
    y_new = []
    for ind, _ in enumerate(bool_01):
        if bool_01[ind]:
            groups.append('attribute_A')
            y_new.append(0)
        if bool_02[ind]:
            throw = rng.rand()
            if throw < 0.5:
                groups.append('attribute_B')
            else:
                groups.append('attribute_C')
            throw2 = rng.rand()
            if throw2 < 0.5:
                y_new.append(0)
            else:
                y_new.append(1)
    y_new_bool = [True if item is 1 else False for item in y_new]
    cv_obj = GroupKFold(n_splits=3)
    cv_obj_list = list(cv_obj.split(X, np.array(y_new_bool), groups))
    sfs1 = SFS(forest,
               k_features=3,
               forward=True,
               floating=False,
               cv=cv_obj_list,
               scoring=nan_roc_auc_scorer,
               verbose=0
               )
    sfs1 = sfs1.fit(X, y_new)
    expect = {
        1: {'cv_scores': np.array([0.52, nan, 0.72]), 'avg_score': 0.62,
            'feature_idx': (1,)},
        2: {'cv_scores': np.array([0.42, nan, 0.65]), 'avg_score': 0.53,
            'feature_idx': (1, 2)},
        3: {'cv_scores': np.array([0.47, nan, 0.63]),
            'avg_score': 0.55,
            'feature_idx': (1, 2, 3)}}

    dict_compare_utility(d_actual=sfs1.subsets_, d_desired=expect, decimal=1)
开发者ID:rasbt,项目名称:mlxtend,代码行数:49,代码来源:test_sequential_feature_selector.py


示例16: test_max_feature_subset_size_in_tuple_range

def test_max_feature_subset_size_in_tuple_range():
    boston = load_boston()
    X, y = boston.data, boston.target

    lr = LinearRegression()

    sfs = SFS(lr,
              k_features=(1, 5),
              forward=False,
              floating=True,
              scoring='neg_mean_squared_error',
              cv=10)

    sfs = sfs.fit(X, y)
    assert len(sfs.k_feature_idx_) == 5
开发者ID:rasbt,项目名称:mlxtend,代码行数:15,代码来源:test_sequential_feature_selector.py


示例17: test_knn_option_sffs

def test_knn_option_sffs():
    iris = load_iris()
    X = iris.data
    y = iris.target
    knn = KNeighborsClassifier(n_neighbors=4)
    sfs2 = SFS(knn,
               k_features=3,
               forward=True,
               floating=True,
               scoring='accuracy',
               cv=4,
               skip_if_stuck=True,
               verbose=0)
    sfs2 = sfs2.fit(X, y)
    assert sfs2.k_feature_idx_ == (1, 2, 3)
开发者ID:vdthatte,项目名称:mlxtend,代码行数:15,代码来源:test_sequential_feature_selector.py


示例18: test_knn_option_sfs

def test_knn_option_sfs():
    iris = load_iris()
    X = iris.data
    y = iris.target
    knn = KNeighborsClassifier(n_neighbors=4)
    sfs1 = SFS(knn,
               k_features=3,
               forward=True,
               floating=False,
               scoring='accuracy',
               cv=4,
               skip_if_stuck=True,
               print_progress=False)
    sfs1 = sfs1.fit(X, y)
    assert sfs1.k_feature_idx_ == (1, 2, 3)
开发者ID:GQiuQi,项目名称:mlxtend,代码行数:15,代码来源:test_sequential_feature_selector.py


示例19: test_knn_option_sfbs_tuplerange_2

def test_knn_option_sfbs_tuplerange_2():
    iris = load_iris()
    X = iris.data
    y = iris.target
    knn = KNeighborsClassifier(n_neighbors=3)
    sfs4 = SFS(knn,
               k_features=(1, 4),
               forward=False,
               floating=True,
               scoring='accuracy',
               cv=4,
               skip_if_stuck=True,
               verbose=0)
    sfs4 = sfs4.fit(X, y)
    assert round(sfs4.k_score_, 3) == 0.966, sfs4.k_score_
    assert sfs4.k_feature_idx_ == (1, 2, 3), sfs4.k_feature_idx_
开发者ID:vdthatte,项目名称:mlxtend,代码行数:16,代码来源:test_sequential_feature_selector.py


示例20: test_clone_params_pass

def test_clone_params_pass():
    iris = load_iris()
    X = iris.data
    y = iris.target
    lr = SoftmaxRegression(random_seed=1)
    sfs1 = SFS(lr,
               k_features=2,
               forward=True,
               floating=False,
               scoring='accuracy',
               cv=0,
               clone_estimator=True,
               verbose=0,
               n_jobs=1)
    sfs1 = sfs1.fit(X, y)
    assert (sfs1.k_feature_idx_ == (1, 3))
开发者ID:rasbt,项目名称:mlxtend,代码行数:16,代码来源:test_sequential_feature_selector.py



注:本文中的mlxtend.feature_selection.SequentialFeatureSelector类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python regressor.LinearRegression类代码示例发布时间:2022-05-27
下一篇:
Python evaluate.scoring函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap