• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python model_selection.cross_val_predict函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中sklearn.model_selection.cross_val_predict函数的典型用法代码示例。如果您正苦于以下问题:Python cross_val_predict函数的具体用法?Python cross_val_predict怎么用?Python cross_val_predict使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了cross_val_predict函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: Random_forest

def Random_forest(features,target,test_size_percent=0.2,cv_split=3):
    X_array = features.as_matrix()
    y_array = target.as_matrix()        
    model_rdf = RandomForestRegressor()
    X_train, X_test, y_train, y_test = train_test_split(X_array, y_array.T.squeeze(), test_size=test_size_percent, random_state=4)
    model_rdf.fit(X_train,y_train)
    test_prediction = model_rdf.predict(X_test)
    tscv = TimeSeriesSplit(cv_split)
    
    training_score = cross_val_score(model_rdf,X_train,y_train,cv=tscv.n_splits) 
    testing_score = cross_val_score(model_rdf,X_test,y_test,cv=tscv.n_splits)
    print"Cross-val Training score:", training_score.mean()
#    print"Cross-val Testing score:", testing_score.mean()
    training_predictions = cross_val_predict(model_rdf,X_train,y_train,cv=tscv.n_splits)
    testing_predictions = cross_val_predict(model_rdf,X_test,y_test,cv=tscv.n_splits)
    
    training_accuracy = metrics.r2_score(y_train,training_predictions) 
#    test_accuracy_model = metrics.r2_score(y_test,test_prediction_model)
    test_accuracy = metrics.r2_score(y_test,testing_predictions)
    
#    print"Cross-val predicted accuracy:", training_accuracy
    print"Test-predictions accuracy:",test_accuracy

    plot_model(target,y_train,y_test,training_predictions,testing_predictions)
    return model_rdf
开发者ID:SOLIMAN68,项目名称:Data-driven_Building_simulation_Polimi_EETBS,代码行数:25,代码来源:master_1_4_eachBuilding_allModels.py


示例2: svm_regressor

def svm_regressor(features,target,test_size_percent=0.2,cv_split=5):
    
    scale=preprocessing.MinMaxScaler()
    X_array = scale.fit_transform(features)
    y_array = scale.fit_transform(target)  
    X_train, X_test, y_train, y_test = train_test_split(X_array, y_array.T.squeeze(), test_size=test_size_percent, random_state=4)
    svr = SVR(kernel='rbf',C=10,gamma=1)
    svr.fit(X_train,y_train.ravel())
    test_prediction = svr.predict(X_test)
    tscv = TimeSeriesSplit(cv_split)
    
    training_score = cross_val_score(svr,X_train,y_train,cv=tscv.n_splits) 
    testing_score = cross_val_score(svr,X_test,y_test,cv=tscv.n_splits)
    print"Cross-val Training score:", training_score.mean()
#    print"Cross-val Testing score:", testing_score.mean()
    training_predictions = cross_val_predict(svr,X_train,y_train,cv=tscv.n_splits)
    testing_predictions = cross_val_predict(svr,X_test,y_test,cv=tscv.n_splits)
    
    training_accuracy = metrics.r2_score(y_train,training_predictions) 
#    test_accuracy_model = metrics.r2_score(y_test,test_prediction_model)
    test_accuracy = metrics.r2_score(y_test,testing_predictions)
    
#    print"Cross-val predicted accuracy:", training_accuracy
    print"Test-predictions accuracy:",test_accuracy
    return svr
开发者ID:SOLIMAN68,项目名称:Data-driven_Building_simulation_Polimi_EETBS,代码行数:25,代码来源:master_1_4_eachBuilding_allModels.py


示例3: test_cross_val_predict_with_method

def test_cross_val_predict_with_method():
    iris = load_iris()
    X, y = iris.data, iris.target
    X, y = shuffle(X, y, random_state=0)
    classes = len(set(y))

    kfold = KFold(len(iris.target))

    methods = ['decision_function', 'predict_proba', 'predict_log_proba']
    for method in methods:
        est = LogisticRegression()

        predictions = cross_val_predict(est, X, y, method=method)
        assert_equal(len(predictions), len(y))

        expected_predictions = np.zeros([len(y), classes])
        func = getattr(est, method)

        # Naive loop (should be same as cross_val_predict):
        for train, test in kfold.split(X, y):
            est.fit(X[train], y[train])
            expected_predictions[test] = func(X[test])

        predictions = cross_val_predict(est, X, y, method=method,
                                        cv=kfold)
        assert_array_almost_equal(expected_predictions, predictions)
开发者ID:447327642,项目名称:scikit-learn,代码行数:26,代码来源:test_validation.py


示例4: linear_regression

def linear_regression(features,target,test_size_percent=0.2,cv_split=5):
    ''' Features -> Pandas Dataframe with attributes as columns
        target -> Pandas Dataframe with target column for prediction
        Test_size_percent -> Percentage of data point to be used for testing'''
    X_array = features.as_matrix()
    y_array = target.as_matrix()    
    ols = linear_model.LinearRegression()
    X_train, X_test, y_train, y_test = train_test_split(X_array, y_array.T.squeeze(), test_size=test_size_percent, random_state=4)
#    model = ols.fit(X_train, y_train)
    ols.fit(X_train, y_train)
#    test_prediction_model = ols.predict(X_test)
    tscv = TimeSeriesSplit(cv_split)
    
    training_score = cross_val_score(ols,X_train,y_train,cv=tscv.n_splits) 
    testing_score = cross_val_score(ols,X_test,y_test,cv=tscv.n_splits)
    print"Cross-val Training score:", training_score.mean()
#    print"Cross-val Testing score:", testing_score.mean()
    training_predictions = cross_val_predict(ols,X_train,y_train,cv=tscv.n_splits)
    testing_predictions = cross_val_predict(ols,X_test,y_test,cv=tscv.n_splits)
    
    training_accuracy = metrics.r2_score(y_train,training_predictions) 
#    test_accuracy_model = metrics.r2_score(y_test,test_prediction_model)
    test_accuracy = metrics.r2_score(y_test,testing_predictions)
    
#    print"Cross-val predicted accuracy:", training_accuracy
    print"Test-predictions accuracy:",test_accuracy

    plot_model(target,y_train,y_test,training_predictions,testing_predictions)
    return ols
开发者ID:SOLIMAN68,项目名称:Data-driven_Building_simulation_Polimi_EETBS,代码行数:29,代码来源:master_1_4_eachBuilding_allModels.py


示例5: neural_net

def neural_net(features,target,test_size_percent=0.2,cv_split=3,n_iter=100,learning_rate=0.01):
    '''Features -> Pandas Dataframe with attributes as columns
        target -> Pandas Dataframe with target column for prediction
        Test_size_percent -> Percentage of data point to be used for testing'''
    scale=preprocessing.MinMaxScaler()
    X_array = scale.fit_transform(features)
    y_array = scale.fit_transform(target)
    mlp = Regressor(layers=[Layer("Rectifier",units=5), # Hidden Layer1
                            Layer("Rectifier",units=3)  # Hidden Layer2
                            ,Layer("Linear")],     # Output Layer
                        n_iter = n_iter, learning_rate=0.01)
    X_train, X_test, y_train, y_test = train_test_split(X_array, y_array.T.squeeze(), test_size=test_size_percent, random_state=4)
    mlp.fit(X_train,y_train)
    test_prediction = mlp.predict(X_test)
    tscv = TimeSeriesSplit(cv_split)
    
    training_score = cross_val_score(mlp,X_train,y_train,cv=tscv.n_splits) 
    testing_score = cross_val_score(mlp,X_test,y_test,cv=tscv.n_splits)
    print"Cross-val Training score:", training_score.mean()
#    print"Cross-val Testing score:", testing_score.mean()
    training_predictions = cross_val_predict(mlp,X_train,y_train,cv=tscv.n_splits)
    testing_predictions = cross_val_predict(mlp,X_test,y_test,cv=tscv.n_splits)
    
    training_accuracy = metrics.r2_score(y_train,training_predictions) 
#    test_accuracy_model = metrics.r2_score(y_test,test_prediction_model)
    test_accuracy = metrics.r2_score(y_test,testing_predictions)
    
#    print"Cross-val predicted accuracy:", training_accuracy
    print"Test-predictions accuracy:",test_accuracy

    plot_model(target,y_train,y_test,training_predictions,testing_predictions)
    return mlp
开发者ID:SOLIMAN68,项目名称:Data-driven_Building_simulation_Polimi_EETBS,代码行数:32,代码来源:master_1_4_eachBuilding_allModels.py


示例6: scan2D

def scan2D(X, y, window=(10, 10), estimator_params=dict(n_jobs=-1), cv=3):
    "2D scanning"
    inputs, labels, instances = [], [], []
    instance_count = 0
    for sample, label in zip(X, y):
        sample_shape = sample.shape
        for s1 in range(sample.shape[0]-window[0]):
            for s2 in range(sample.shape[1]-window[1]):
                part = sample[s1:s1+window[0], s2:s2+window[1]]
                inputs.append(part.flatten())
                labels.append(label)
                instances.append(instance_count)
        instance_count += 1
    rf = RandomForestClassifier(**estimator_params)
    estimator_params.update({'max_features': 1})
    cf = RandomForestClassifier(**estimator_params)
    probas1 = cross_val_predict(rf, inputs, labels, cv=cv, method='predict_proba')
    probas2 = cross_val_predict(cf, inputs, labels, cv=cv, method='predict_proba')
    probas = []
    for instance in set(instances):
        mask = [i == instance for i in instances]
        p1 = probas1[mask]
        p2 = probas2[mask]
        p = np.concatenate([p1.flatten(), p2.flatten()], axis=0)
        probas.append(p)
    return probas
开发者ID:sig-ml,项目名称:bleedml,代码行数:26,代码来源:utils.py


示例7: fit

    def fit(self, X, y):
        # Check data
        X, y = np.array(X), np.array(y)
        X, y = check_X_y(X, y)
        # Split to grow cascade and validate
        mask = np.random.random(y.shape[0]) < self.validation_fraction
        X_tr, X_vl = X[mask], X[~mask]
        y_tr, y_vl = y[mask], y[~mask]

        self.classes_ = unique_labels(y)
        self.layers_, inp_tr, inp_vl = [], X_tr, X_vl
        self.scores_ = []

        # First layer
        forests = [RandomForestClassifier(max_features=1, n_estimators=self.n_estimators, min_samples_split=10, criterion='gini', n_jobs=-1),  # Complete random
                    RandomForestClassifier(max_features=1, n_estimators=self.n_estimators, min_samples_split=10, criterion='gini', n_jobs=-1),  # Complete random
                    RandomForestClassifier(n_estimators=self.n_estimators, n_jobs=-1),
                    RandomForestClassifier(n_estimators=self.n_estimators, n_jobs=-1)]
        _ = [f.fit(inp_tr, y_tr) for f in forests]
        p_vl = [f.predict_proba(inp_vl) for f in forests]
        labels = [self.classes_[i] for i in np.argmax(np.array(p_vl).mean(axis=0), axis=1)]
        score = self.scoring(y_vl, labels)
        self.layers_.append(forests)
        self.scores_.append(score)
        p_tr = [cross_val_predict(f, inp_tr, y_tr, cv=self.cv, method='predict_proba') for f in forests]

        # Fit other layers
        last_score = score
        inp_tr, inp_vl = np.concatenate([X_tr]+p_tr, axis=1), np.concatenate([X_vl]+p_vl, axis=1)
        while True:  # Grow cascade
            forests = [RandomForestClassifier(max_features=1, n_estimators=self.n_estimators, min_samples_split=10, criterion='gini', n_jobs=-1),  # Complete random
                    RandomForestClassifier(max_features=1, n_estimators=self.n_estimators, min_samples_split=10, criterion='gini', n_jobs=-1),  # Complete random
                    RandomForestClassifier(n_estimators=self.n_estimators, n_jobs=-1),
                    RandomForestClassifier(n_estimators=self.n_estimators, n_jobs=-1)]
            _ = [forest.fit(inp_tr, y_tr) for forest in forests] # Fit the forest
            p_vl = [forest.predict_proba(inp_vl) for forest in forests]
            labels = [self.classes_[i] for i in np.argmax(np.array(p_vl).mean(axis=0), axis=1)]
            score = self.scoring(y_vl, labels)

            if score - last_score > self.tolerance:
                self.layers_.append(forests)
                p_tr = [cross_val_predict(f, inp_tr, y_tr, cv=self.cv, method='predict_proba') for f in forests]
                inp_tr, inp_vl = np.concatenate([X_tr]+p_tr, axis=1), np.concatenate([X_vl]+p_vl, axis=1)
                self.scores_.append(score)
                last_score = score
                print(self.scores_)
            else:
                break
        # Retrain on entire dataset
        inp_ = X
        for forests in self.layers_:
            _ = [f.fit(inp_, y) for f in forests]
            p = [cross_val_predict(f, inp_, y, cv=self.cv, method='predict_proba') for f in forests]
            inp_ = np.concatenate([X]+p, axis=1)
        return self
开发者ID:sig-ml,项目名称:bleedml,代码行数:55,代码来源:classifiers.py


示例8: test_cross_val_predict_sparse_prediction

def test_cross_val_predict_sparse_prediction():
    # check that cross_val_predict gives same result for sparse and dense input
    X, y = make_multilabel_classification(n_classes=2, n_labels=1,
                                          allow_unlabeled=False,
                                          return_indicator=True,
                                          random_state=1)
    X_sparse = csr_matrix(X)
    y_sparse = csr_matrix(y)
    classif = OneVsRestClassifier(SVC(kernel='linear'))
    preds = cross_val_predict(classif, X, y, cv=10)
    preds_sparse = cross_val_predict(classif, X_sparse, y_sparse, cv=10)
    preds_sparse = preds_sparse.toarray()
    assert_array_almost_equal(preds_sparse, preds)
开发者ID:447327642,项目名称:scikit-learn,代码行数:13,代码来源:test_validation.py


示例9: test_cross_val_predict_pandas

def test_cross_val_predict_pandas():
    # check cross_val_score doesn't destroy pandas dataframe
    types = [(MockDataFrame, MockDataFrame)]
    try:
        from pandas import Series, DataFrame
        types.append((Series, DataFrame))
    except ImportError:
        pass
    for TargetType, InputFeatureType in types:
        # X dataframe, y series
        X_df, y_ser = InputFeatureType(X), TargetType(y2)
        check_df = lambda x: isinstance(x, InputFeatureType)
        check_series = lambda x: isinstance(x, TargetType)
        clf = CheckingClassifier(check_X=check_df, check_y=check_series)
        cross_val_predict(clf, X_df, y_ser)
开发者ID:447327642,项目名称:scikit-learn,代码行数:15,代码来源:test_validation.py


示例10: fit

 def fit(self, x, y, **params):
     """ fit training data """
     preds = []
     for i, clf in enumerate(self.clfs):
         log.info("fit %s"%i)
         if "Keras" in str(clf) and "verbose" in params:
             params["fit_params"] = dict(verbose=params["verbose"])
             
         # save out-of-fold predictions to fit metaclf
         if clf.hasattr("predict_proba"):
             method = "predict_proba"
         else:
             method = "predict"
         pred = cross_val_predict(clf, x, y, 
                                  cv=self.cv, verbose=0,
                                  method=method,
                                  **params)        
         preds.append(pred)
         
         # fully fitted to predict test data
         clf.fit(x, y, verbose=0)
     
     # fit metaclf on out-of-fold predictions
     log.info("fit metaclf")
     self.metaclf.fit(np.hstack(preds), y)
     return self
开发者ID:simonm3,项目名称:analysis,代码行数:26,代码来源:classifiers.py


示例11: crossval

 def crossval(self, verbose=0, seed=0, method="predict", **params):
     """ returns crossval score
         sets self.preds
     """
     # track time spent per run
     starttime = time()
     
     np.random.seed(seed)         
 
     # useful for keras but throws exception for others
     if "Keras" in get_clfname(self.clf):
         self.clf.set_params(verbose=verbose)
         
     self.clf.set_params(**params)
     
     self.preds = cross_val_predict(self.clf, self.xtrain, self.ytrain,
                                    method=method)
     score = self.scorer._score_func(self.ytrain, self.preds) \
                     * self.scorer._sign
     
     # log results
     params.update(clf=get_clfname(self.clf),
                   name=self.name,
                   score=score, 
                   elapsed=time()-starttime)
     if self.runs:
         self.runs.append(params, self.preds)
         
     return score
开发者ID:simonm3,项目名称:analysis,代码行数:29,代码来源:model.py


示例12: test_cross_val_predict_input_types

def test_cross_val_predict_input_types():
    clf = Ridge()
    # Smoke test
    predictions = cross_val_predict(clf, X, y)
    assert_equal(predictions.shape, (10,))

    # test with multioutput y
    predictions = cross_val_predict(clf, X_sparse, X)
    assert_equal(predictions.shape, (10, 2))

    predictions = cross_val_predict(clf, X_sparse, y)
    assert_array_equal(predictions.shape, (10,))

    # test with multioutput y
    predictions = cross_val_predict(clf, X_sparse, X)
    assert_array_equal(predictions.shape, (10, 2))

    # test with X and y as list
    list_check = lambda x: isinstance(x, list)
    clf = CheckingClassifier(check_X=list_check)
    predictions = cross_val_predict(clf, X.tolist(), y.tolist())

    clf = CheckingClassifier(check_y=list_check)
    predictions = cross_val_predict(clf, X, y.tolist())

    # test with 3d X and
    X_3d = X[:, :, np.newaxis]
    check_3d = lambda x: x.ndim == 3
    clf = CheckingClassifier(check_X=check_3d)
    predictions = cross_val_predict(clf, X_3d, y)
    assert_array_equal(predictions.shape, (10,))
开发者ID:AlexanderFabisch,项目名称:scikit-learn,代码行数:31,代码来源:test_validation.py


示例13: crossVertifyTestData

 def crossVertifyTestData(self):
     """
     交叉验证Test数据并返回结果
         :param self: 类变量本身
         :returns: 返回真正的y和预测的y,真正的y在前面
     """   
     # 进行交叉验证
     predict_y = cross_val_predict(self.model, self.test_X, cv=10)
     return self.test_y, predict_y
开发者ID:WQ-huziang,项目名称:WQ-Testcode,代码行数:9,代码来源:modelEngineer.py


示例14: _get_estimator_mse

    def _get_estimator_mse(self, x, y, estimator):
        """Return the RMSE for *estimator*.

        Use GroupKFold where a group is a combination of input size and number
        of workers. The prediction of a group is done when it is out of the
        training set.
        """
        groups = self._groups.loc[x.index]
        cv = GroupKFold(n_splits=3)
        prediction = cross_val_predict(estimator, x, y, groups, cv)
        return metrics.mean_squared_error(y, prediction)
开发者ID:cemsbr,项目名称:phd_notebook,代码行数:11,代码来源:notebook005.py


示例15: save_fit_plot

def save_fit_plot(x, y, fit, name, folder):
    predicted = cross_val_predict(fit, x, y, cv=10)
    linfit = np.polyfit(y, predicted, 1)

    fig, ax = plt.subplots()
    ax.scatter(y, predicted, s=1, alpha=0.1)
    ax.plot([y.min(), y.max()], [y.min(), y.max()], "k--", lw=2)
    ax.plot(y, np.poly1d(linfit)(y), "g--", lw=2)
    ax.set_xlabel("Measured")
    ax.set_ylabel("Predicted")
    f_name = timed_filename(name, "pdf")
    plt.savefig(os.path.join(folder, f_name))
开发者ID:Geonovum,项目名称:smartemission,代码行数:12,代码来源:data.py


示例16: evaluate

    def evaluate(self, exp):
        """Split data, fit, transfrom features, tf*idf, svd, report."""
        t1 = time()

        exp.seed = 42
        exp.nj = -1
        exp.test_size = 0.3 if not hasattr(exp, 'test_size') else exp.test_size
        np.random.RandomState(exp.seed)

        # report features
        if hasattr(exp.pln[0], 'features'):
            exp.log.head(exp.pln.features, exp.name, exp.seed)

        # stream data to features
        X, y = exp.vec.fit_transform(exp.data)

        # if no test data, split
        if not hasattr(self, 'test_data'):
            X, Xi, y, yi = train_test_split(
                X, y, test_size=exp.test_size, stratify=y)
        else:
            Xi, yi = exp.vec.transform(self.test_data)

        av = self.average
        # grid search and fit best model choice
        exp.pln = self.grid_search(exp.pln, X, y, exp.seed)
        print("\n Training model...")
        exp.pln.fit(X, y)
        print(" done!")

        labs = exp.vec.encoder.classes_
        exp.log.data('sparse', 'train', X)

        # if user wants to report more than best score, do another CV on train
        # if hasattr(self, 'detailed_train'):
        sco = cross_val_predict(exp.pln, X, y, cv=self.cv, n_jobs=exp.nj)
        self.res['train'] = exp.log.report('train', y, sco, av, labs)

        exp.log.data('sparse', 'test', Xi, dump=True)
        res = exp.pln.predict(Xi)
        self.res['test'] = exp.log.report('test', yi, res, av, labs)

        if hasattr(self, 'proportions'):
            self._run_proportions((X, Xi, y, yi), exp)

        print("\n # ------------------------------------------ \n")
        t2 = time()
        dur = round(t2 - t1, 1)
        self.res['dur'] = dur
        print("\n Experiment took {0} seconds".format(dur))

        exp.store()
        print("\n" + '-' * 10, "\n")
开发者ID:cmry,项目名称:omesa,代码行数:53,代码来源:components.py


示例17: test_cross_val_predict

def test_cross_val_predict():
    """Test cross_val_predict with predict_proba."""
    from sklearn.linear_model import LinearRegression
    from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
    from sklearn.base import BaseEstimator, clone
    from sklearn.model_selection import cross_val_predict
    rng = np.random.RandomState(42)
    X = rng.randn(10, 1, 3)
    y = rng.randint(0, 2, 10)

    estimator = SlidingEstimator(LinearRegression())
    cross_val_predict(estimator, X, y, cv=2)

    class Classifier(BaseEstimator):
        """Moch class that does not have classes_ attribute."""

        def __init__(self):
            self.base_estimator = LinearDiscriminantAnalysis()

        def fit(self, X, y):
            self.estimator_ = clone(self.base_estimator).fit(X, y)
            return self

        def predict_proba(self, X):
            return self.estimator_.predict_proba(X)

    with pytest.raises(AttributeError, match="classes_ attribute"):
        estimator = SlidingEstimator(Classifier())
        cross_val_predict(estimator, X, y, method='predict_proba', cv=2)

    estimator = SlidingEstimator(LinearDiscriminantAnalysis())
    cross_val_predict(estimator, X, y, method='predict_proba', cv=2)
开发者ID:Eric89GXL,项目名称:mne-python,代码行数:32,代码来源:test_search_light.py


示例18: test_cross_val_predict

def test_cross_val_predict():
    """Make sure it works in cross_val_predict."""

    X, y = load_iris(return_X_y=True)
    X = StandardScaler().fit_transform(X)

    clf = FMClassifier(rank=2, solver='L-BFGS-B', random_state=4567).fit(X, y)

    cv = KFold(n_splits=4, random_state=457, shuffle=True)
    y_oos = cross_val_predict(clf, X, y, cv=cv, method='predict')
    acc = accuracy_score(y, y_oos)

    assert acc >= 0.90, "accuracy is too low for iris in cross_val_predict!"
开发者ID:keithing,项目名称:muffnn,代码行数:13,代码来源:test_fm_classifier.py


示例19: cross_val_pred_plot

def cross_val_pred_plot(model,X,y,consum_col,consum_col_pred,denorm_target,model_name=None,print_plot=False,cv=5):
    if 'multi' or 'mlp' or 'preceptron' in model_name.lower():
        warnings.filterwarnings("ignore", category=DeprecationWarning) #run this line separately
        whole_pred = cross_val_predict(model,X.values,y.values,cv=5)
    else:
        whole_pred = cross_val_predict(model,X,y,cv=5)
    whole_predictions=pd.Series(whole_pred.ravel(),index=y.index)
    whole_predictions = whole_predictions.rename(consum_col_pred)
    whole = pd.DataFrame(whole_predictions).join(y)
    whole[whole[consum_col_pred] <0.0] = 0
    r2 = metrics.r2_score(y,whole_pred)
    if print_plot:
        if ('multi' or 'mlp' or 'preceptron') in model_name.lower():
            whole.plot(title=model_name+'-Whole dataset predictions - score {}'.format(r2))
        else:
            if model_name==None:
                model_name = 'Model';print"\nInsert model name\n";
            whole.plot(title=model_name+'-Whole dataset predictions - score {}'.format(r2))
        plt.ylabel('Power consumption in Watts')
#        plt.xlabel('Date Time')
    #    print"\nR2 score: ",metrics.r2_score(y,whole_pred),"\n"
    
    if (model_name == 'svr') or (model_name == 'mlp'):
        denorm_whole = whole*(denorm_target.max().values[0]-denorm_target.min().values[0])+denorm_target.min().values[0]
        mae = metrics.mean_absolute_error(denorm_whole[consum_col],denorm_whole[consum_col_pred])
        mse = metrics.mean_squared_error(denorm_whole[consum_col],denorm_whole[consum_col_pred])
        whole = denorm_whole
#        if 'mlp' in model_name:
#            print'calculating metrics of MLP'
#            acc = model.score(X.values,y.values)
#        else:
#            print'calculating metrics of SVR'
#            acc = model.score(X,y)
    else:
        print'calculating metrics of LNR or RDF'
        mae = metrics.mean_absolute_error(y,whole_pred)
        mse = metrics.mean_squared_error(y,whole_pred)
#        acc = model.score(X,y)
    return whole,r2,mae,mse
开发者ID:SOLIMAN68,项目名称:Data-driven_Building_simulation_Polimi_EETBS,代码行数:39,代码来源:master_1_4_eachBuilding_allModels.py


示例20: test_cross_val_predict_class_subset

def test_cross_val_predict_class_subset():

    X = np.arange(8).reshape(4, 2)
    y = np.array([0, 0, 1, 2])
    classes = 3

    kfold3 = KFold(n_splits=3)
    kfold4 = KFold(n_splits=4)

    le = LabelEncoder()

    methods = ['decision_function', 'predict_proba', 'predict_log_proba']
    for method in methods:
        est = LogisticRegression()

        # Test with n_splits=3
        predictions = cross_val_predict(est, X, y, method=method,
                                        cv=kfold3)

        # Runs a naive loop (should be same as cross_val_predict):
        expected_predictions = get_expected_predictions(X, y, kfold3, classes,
                                                        est, method)
        assert_array_almost_equal(expected_predictions, predictions)

        # Test with n_splits=4
        predictions = cross_val_predict(est, X, y, method=method,
                                        cv=kfold4)
        expected_predictions = get_expected_predictions(X, y, kfold4, classes,
                                                        est, method)
        assert_array_almost_equal(expected_predictions, predictions)

        # Testing unordered labels
        y = [1, 1, -4, 6]
        predictions = cross_val_predict(est, X, y, method=method,
                                        cv=kfold3)
        y = le.fit_transform(y)
        expected_predictions = get_expected_predictions(X, y, kfold3, classes,
                                                        est, method)
        assert_array_almost_equal(expected_predictions, predictions)
开发者ID:RomainBrault,项目名称:scikit-learn,代码行数:39,代码来源:test_validation.py



注:本文中的sklearn.model_selection.cross_val_predict函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python model_selection.cross_val_score函数代码示例发布时间:2022-05-27
下一篇:
Python gaussian_mixture.GaussianMixture类代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap