• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python cross_validation.cross_val_predict函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中sklearn.cross_validation.cross_val_predict函数的典型用法代码示例。如果您正苦于以下问题:Python cross_val_predict函数的具体用法?Python cross_val_predict怎么用?Python cross_val_predict使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了cross_val_predict函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: perform_classifier_cross_validation

def perform_classifier_cross_validation(classifier, dtm_train,targets_train,
                                                 dtm_test, targets_test):
    cv = 3
    k_fold = KFold(len(targets_train), n_folds=cv,shuffle=True, 
                                        random_state=42)
    scoring = 'f1_macro'
    scores = cross_validation.cross_val_score(classifier, dtm_train, 
                                    targets_train,cv=k_fold, 
                                    scoring=scoring)
    
    print("Same classifier with cross validation:")
    print("Scores for folds" +"("+str(cv)+"):"+ str(scores))
    print(scoring + ": %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
    
    targets_train_predicted = cross_validation.cross_val_predict(classifier, 
                                            dtm_train,targets_train, cv=cv)
    
    print_classifier_metrics(targets_train,targets_train_predicted, 
                               "train-with-cv")

    targets_test_predicted = cross_validation.cross_val_predict(classifier, 
                                                    dtm_test,targets_test,cv=cv)
    
    print_classifier_metrics(targets_test, targets_test_predicted, 
                               "test-with-cv")
    
    return classifier
开发者ID:davcem,项目名称:stackexchange_text_classification,代码行数:27,代码来源:classifier_perform.py


示例2: apply_cross_validated_learning

def apply_cross_validated_learning(datasetname, X, y, resultsfolder, nfolds=5):

    dataspacename = datasetname + "_nfolds-" + str(nfolds)
    experimentrootpath = IOtools.ensure_dir(os.path.join(resultsfolder, dataspacename))
    scorefilepath = os.path.join(experimentrootpath, metaexperimentation.scorefilename+".csv")
    metaexperimentation.initialize_score_file(scorefilepath)
    
    # SVM
    kernels = ["linear", "rbf", "sigmoid", "poly"]
    Cs = [1, 10, 100, 1000]
    
    for kernel in kernels:
        for c in Cs:
            
            alg = "SVM"
            modelname = "_m-" + alg + "_k-" + kernel + "_C-" + str(c)
            experimentname = "nfolds-" + str(nfolds) + modelname
            
            clf = svm.SVC(kernel=kernel, C=c)
            ypredicted = cross_validation.cross_val_predict(clf, X, y, cv=nfolds)
            #print metrics.accuracy_score(y, ypredicted)
            reportresults(y, ypredicted, experimentname, experimentrootpath, scorefilepath)
    
    
    # Naive Bayes
    NBmodels = [naive_bayes.MultinomialNB(), naive_bayes.GaussianNB()]
    for nbmodel in NBmodels:
        alg = "NB"
        modelname = "_m-" + nbmodel.__class__.__name__
        experimentname = "nfolds-" + str(nfolds) + modelname
        
        ypredicted = cross_validation.cross_val_predict(nbmodel, X, y, cv=nfolds)
        reportresults(y, ypredicted, experimentname, experimentrootpath, scorefilepath)
开发者ID:dicleoztur,项目名称:language_identification,代码行数:33,代码来源:classification.py


示例3: main

def main():
    
    parser = argparse.ArgumentParser(description='Train an ML model')
    required = parser.add_argument_group('required options')

    required.add_argument('-x', '--trainfile', required=True, help='File containing training data')
    required.add_argument('-y', '--targetfile', required=True, help='File containing target data')
    #required.add_argument('-o', '--modelfile', required=True, help='Output filename for trained model object')
    #required.add_argument('-t', '--targettype', default=int)
    
    args = parser.parse_args()

    #X = np.loadtxt(args.trainfile, skiprows=1)
    X = np.loadtxt(args.trainfile)
    #Y = np.loadtxt(args.targetfile, dtype=args.targettype)
    #Y = np.loadtxt(args.targetfile)   
    Y = np.genfromtxt(args.targetfile,dtype='str')

    assert len(X) == len(Y), "length mismatch between train and target data"

    clf1 = linear_model.LogisticRegression(penalty='l2',C=1e5,solver='newton-cg',tol=0.00001)
    clf1.fit(X, Y)
    predicted1=cross_validation.cross_val_predict(clf1,X,Y,cv=2)
    print("Prediction accuracy of logistic regression : ", metrics.accuracy_score(Y, predicted1))
    #predicted=cross_validation.cross_val_predict(clf1,x,x_tr,cv=2)
    
    clf2 = svm.SVC(C=1e5,kernel='rbf')
    clf2.fit(X, Y)
    predicted2=cross_validation.cross_val_predict(clf2,X,Y,cv=2)
    print("Prediction accuracy of SVM : ", metrics.accuracy_score(Y, predicted2))

    clf3 = naive_bayes.BernoulliNB(alpha=1.9)
    clf3.fit(X, Y)
    predicted3=cross_validation.cross_val_predict(clf3,X,Y,cv=2)
    print("Prediction accuracy of naive bayes : ", metrics.accuracy_score(Y, predicted3))

    clf4 = tree.DecisionTreeClassifier(criterion='entropy')
    clf4.fit(X, Y)
    predicted4=cross_validation.cross_val_predict(clf4,X,Y,cv=2)
    print("Prediction accuracy of decision trees : ", metrics.accuracy_score(Y, predicted4))
        
    #with open(args.modelfile, "wb") as outfile:
    #    pickle.dump(clf1, outfile, pickle.HIGHEST_PROTOCOL)
    
    with open('bin_file_lr',"wb") as outfile1:
         pickle.dump(clf1, outfile1, pickle.HIGHEST_PROTOCOL)

    with open('bin_file_svm',"wb") as outfile2:
         pickle.dump(clf2, outfile2, pickle.HIGHEST_PROTOCOL)

    with open('bin_file_bayes',"wb") as outfile3:
         pickle.dump(clf3, outfile3, pickle.HIGHEST_PROTOCOL)

    with open('bin_file_dtree',"wb") as outfile4:
         pickle.dump(clf4, outfile4, pickle.HIGHEST_PROTOCOL)
开发者ID:biplabks,项目名称:MLTUNE,代码行数:55,代码来源:train_ml.py


示例4: test_cross_val_predict_sparse_prediction

def test_cross_val_predict_sparse_prediction():
    # check that cross_val_predict gives same result for sparse and dense input
    X, y = make_multilabel_classification(
        n_classes=2, n_labels=1, allow_unlabeled=False, return_indicator=True, random_state=1
    )
    X_sparse = csr_matrix(X)
    y_sparse = csr_matrix(y)
    classif = OneVsRestClassifier(SVC(kernel="linear"))
    preds = cval.cross_val_predict(classif, X, y, cv=10)
    preds_sparse = cval.cross_val_predict(classif, X_sparse, y_sparse, cv=10)
    preds_sparse = preds_sparse.toarray()
    assert_array_almost_equal(preds_sparse, preds)
开发者ID:mannby,项目名称:scikit-learn,代码行数:12,代码来源:test_cross_validation.py


示例5: test_cross_val_predict_pandas

def test_cross_val_predict_pandas():
    # check cross_val_score doesn't destroy pandas dataframe
    types = [(MockDataFrame, MockDataFrame)]
    try:
        from pandas import Series, DataFrame
        types.append((Series, DataFrame))
    except ImportError:
        pass
    for TargetType, InputFeatureType in types:
        # X dataframe, y series
        X_df, y_ser = InputFeatureType(X), TargetType(y)
        check_df = lambda x: isinstance(x, InputFeatureType)
        check_series = lambda x: isinstance(x, TargetType)
        clf = CheckingClassifier(check_X=check_df, check_y=check_series)
        cval.cross_val_predict(clf, X_df, y_ser)
开发者ID:AppliedArtificialIntelligence,项目名称:scikit-learn,代码行数:15,代码来源:test_cross_validation.py


示例6: predict_evaluate_models

def predict_evaluate_models(fn ,ax=None, sel=["Penalties_Conceeded","Tries_Scored"], goal="Referee", verbosity=0):
    class_weight = 'auto'
    X, y, names = data_prepare(fn, sel=sel, goal=goal, verbosity=verbosity-1)
    if verbosity > 2:
        y_shuffled = y.copy()
        np.random.shuffle(y_shuffled)
        print ("All zeros accuracy:",1.0-np.sum(y)/len(y)) 
        print ("y_shuffled f1_csore:",metrics.f1_score(y, y_shuffled))

    n_folds = 10
    cv = cross_validation.StratifiedKFold(y, n_folds=n_folds)
    #cv = cross_validation.LeaveOneOut(n=len(y))
    results = []
    for sclf in ('svm','svmp','svmr','lgCV','gnb','rf','knc'):
        clf = get_clf(sclf,class_weight=class_weight)
        y_pred = cross_validation.cross_val_predict(clf, X, y, cv=cv)
        #print "pred:",y_pred
        res = [
            metrics.accuracy_score(y, y_pred),
            metrics.precision_score(y, y_pred),
            metrics.recall_score(y, y_pred),
            metrics.f1_score(y, y_pred),
            ]
        if verbosity > 0:
            print (sclf,res) 
        results.append( (sclf,res) )

    return results
开发者ID:sbsar6,项目名称:python-data-mining,代码行数:28,代码来源:predictive_analysisSCRef.py


示例7: training

def training(features, targets, feature_description,
             validation_features, model_flag):
    """
    Train the data with XGBoost model and 10-cross fold validation
    method. Output the result in confusion matrix.
    :param model_flag:
    :param validation_features:
    :param features: X, 2-D matrix
    :param targets: Y 1-D target array
    :param feature_description: brief description of the feature
    """
    model_name = model_name_dict[model_flag]
    model = model_dict[model_flag]
    model.fit(features, targets)
    prediction = model.predict(validation_features)

    file_names = np.load('ZL_validation_file_names.npy')
    validation_result = open('validation_result_' + model_name +
                             feature_description, 'w')

    # output validation result with specified format.
    p = re.compile('(validation\.[0-9]+)')
    for i in range(len(prediction)):
        # format: validation_xxxxx type
        print >> validation_result, \
            p.findall(file_names[i])[0].replace('.', '_'), \
            type_array[int(prediction[i])]
    validation_result.close()

    prediction = cross_validation.cross_val_predict(
            model, features, targets, cv=10)

    cm = confusion_matrix(targets, prediction)
    output_confusion_matrix_tex(
            cm, model_name + '_' + feature_description)
开发者ID:versemonger,项目名称:MusicClassification,代码行数:35,代码来源:music_classifier.py


示例8: kfCrossVal

def kfCrossVal(loansData):
    
    # Import required libraries
    from sklearn.cross_validation import cross_val_predict
    from sklearn import linear_model
    import sklearn.metrics as met
    import matplotlib.pyplot as plt
    from sklearn.preprocessing import PolynomialFeatures

    # Create linear regression model using FICO score as the only predictor
    # Interest Rate is the dependent variable
    lr = linear_model.LinearRegression()
    y = loansData.as_matrix(columns=['Interest.Rate'])
    x = loansData[['Loan.Length', 'FICO.Score']].as_matrix()

    # Run the kfold cross validation and store the results as an array
    predicted = cross_val_predict(lr, x, y, cv=10)

    # Try and run as quadratic?
    # POLY2 = smf.ols(formula = 'Y ~ 1 + X + I(X**2)', data=TRAIN_DF).fit()

    # Calculate MAE, MSE, and R2
    print("Mean Absolute Error: {}".format(met.mean_absolute_error(y, predicted)))
    print("Mean Squared Error: {}".format(met.mean_squared_error(y, predicted)))
    print("R Squared: {}".format(met.r2_score(y, predicted)))

    # Plot the actual versus predicted values
    fix, ax = plt.subplots()
    ax.scatter(y, predicted)
    ax.plot([y.min(), y.max()], [y.min(), y.max()], 'k--', lw=4)
    ax.set_xlabel('Measured')
    ax.set_ylabel('Predicted')
    plt.show()
开发者ID:yorktronic,项目名称:data_science,代码行数:33,代码来源:cross_validation.py


示例9: main

def main():
    dataset = samples.get_dataset()

    X, y, page_labels = build_Xy_from_pages_dataset(dataset)
    clf = create_classifier()

    # this gives the prediction result for every element
    # when it was in the test dataset during cross validation
    cv_iter = cross_validation.LabelKFold(page_labels, n_folds=10)
    predicted = cross_validation.cross_val_predict(clf, X, y, cv=cv_iter)

    cm = metrics.confusion_matrix(y, predicted)
    print('\nConfusion matrix:')
    print(cm, '\n\n')
    print(metrics.classification_report(y, predicted))

    print('Training and peeking at the word weights...')
    X_train, y_train = X[:-20], y[:-20]
    clf = get_trained_classifier(X_train, y_train)
    cv = clf.steps[-2][1]
    svc = clf.steps[-1][1]
    word_weights = zip(svc.coef_[0], cv.vocabulary_)

    print('Top 10 weights for negative cases')
    for weight, word in sorted(word_weights)[:10]:
        print('%0.5f  %s' % (weight, word))

    print('\nTop 10 weights for positive cases')
    for weight, word in sorted(word_weights)[-10:][::-1]:
        print('%0.5f  %s' % (weight, word))

    import pickle
    with open('classifier.pickle', 'w') as f:
        pickle.dump(clf, f)
开发者ID:eliasdorneles,项目名称:author_finder,代码行数:34,代码来源:classify.py


示例10: main

def main():

    dataTuples=getDataInFormat()
    print "Length of dataTuples is: ",  len(dataTuples)
    shuffle(dataTuples)
    trainTuples=dataTuples
    del dataTuples
    ids, labels, vectors= getLabelsAndVectors(trainTuples)
    del trainTuples
    followerCountsList = loadFollowerCountsFromFile()
    space=getSpace(vectors)
    reducedSpace=getReducedSpace(vectors, space)
    spaceWithMetaFeatures= augmentSpace(reducedSpace, emotionFeatures)

    print "Total # of features in your space is: ", len(space)
    print "Total # of features in your reducedSpace is: ", len(reducedSpace)
    oneHotVectors=getOneHotVectors(ids, labels, vectors,spaceWithMetaFeatures , followerCountsList)
    trainVectors, trainLabels=getOneHotVectorsAndLabels(oneHotVectors)
    del oneHotVectors
    clf = OneVsRestClassifier(SVC(C=1, kernel = 'linear',gamma=0.1, verbose= False, probability=False))
    clf.fit(trainVectors, trainLabels)
    
    print "\nDone fitting classifier on training data...\n"
    print "\nDone fitting classifier on training data...\n"
    print "="*50, "\n"
    print "Results with 10-fold cross validation:\n"
    print "="*50, "\n"
    predicted = cross_validation.cross_val_predict(clf, trainVectors, trainLabels, cv=10)
    print "*"*20
    print "\t accuracy_score\t", metrics.accuracy_score(trainLabels, predicted)
    print "*"*20
    print "precision_score\t", metrics.precision_score(trainLabels, predicted)
    print "recall_score\t", metrics.recall_score(trainLabels, predicted)
    print "\nclassification_report:\n\n", metrics.classification_report(trainLabels, predicted)
    print "\nconfusion_matrix:\n\n", metrics.confusion_matrix(trainLabels, predicted)
开发者ID:pratiksanghvi,项目名称:PersonalityClassifier,代码行数:35,代码来源:OneVsRest_metafeature_CV10.py


示例11: transform

    def transform(self, X):
        # Purpose of skip is to skip the estimator
        if self.skip:
            return X

        # Is the data being transformed the same as the training data
        is_train_data = False
        if isinstance(X, pd.DataFrame) and self.hashed_value == hash(X.values.data.tobytes()):
            is_train_data = True
        if isinstance(X, np.ndarray) and self.hashed_value == hash(X.data.tobytes()):
            is_train_data = True

        # If the dataset is the training data, use CV predictions
        if is_train_data:
            feature = cross_val_predict(clone(self.model), X, self.y)#, cv=self.train_cv)

        # Otherwise, use the model to predict
        else:
            feature = self.model.predict(X)

        # Add feature to dataset
        if isinstance(X, pd.DataFrame):
            X[self.feature_name] = feature
        if isinstance(X, np.ndarray):
            X = np.c_[X, feature]
        return X
开发者ID:ChiuYeeLau,项目名称:KaggleSFCrimePrediction,代码行数:26,代码来源:Ensemble1.py


示例12: classify_cv

def classify_cv(data, cats, k):
    clf = svm.SVC(gamma=0.001, C=100.)
    vect = TfidfVectorizer(analyzer = 'word', stop_words = stopwords)
    tfidf_matrix = vect.fit_transform(data)
    predicted = cross_validation.cross_val_predict(clf, tfidf_matrix, cats, cv=k)
    conf_matrix = metrics.confusion_matrix(cats, predicted)
    print (metrics.classification_report(cats, predicted))
开发者ID:hugohrosa,项目名称:express,代码行数:7,代码来源:learn.py


示例13: checkSkflowAccuracy

def checkSkflowAccuracy(dataset,target):
    # baseline: 0.6923 with max_feat=0.5
    classifier = RandomForestClassifier(max_depth=8, n_estimators=500, n_jobs=8, random_state=1, max_features=0.9)
    predicted = cross_validation.cross_val_predict(classifier,dataset,target,cv=5)
    score = metrics.accuracy_score(target,predicted)
    print("Accuracy: " + str(score))
    print(metrics.confusion_matrix(target,predicted,labels=[0,1,2,3,4,5]))
开发者ID:Waffleboy,项目名称:DMC2016,代码行数:7,代码来源:DMC2016.py


示例14: run

def run(params):
    train = loadDataFrame(params,'train')
    if params['test']:
        test = loadDataFrame(params,'test')
        
    train = runPreprocess(train,params)
    clf = getSpecifiedClf(params)
    try:
        dataset,target = splitDatasetTarget(train,params['target'])
    except:
        raise Exception('Target not specified')
    try:
        cross_val = params['cross_validate']
    except:
        cross_val = False
        
    clfName = getNameFromModel(clf)
    if cross_val and clfName != 'XGBClassifier':
        print('Beginning cross validation')
        predicted = cross_validation.cross_val_predict(clf,dataset,target,cv=5,n_jobs=-1)
        accuracyChecker(target,predicted)
        return
        
    if clfName == 'XGBClassifier':
        print('Xgboost CV selected. Beginning to find optimal rounds')
        clf = xgboostCV(clf,dataset,target)
        print('Xgboost Accuracy on 80-20 split (for speed)')
            
    trainX,testX,trainY,testY = splitTrainTest(dataset,target)
    clf.fit(trainX,trainY)
    predicted = clf.predict(testX)
    accuracyChecker(testY,predicted)

        
开发者ID:Waffleboy,项目名称:Predict-Me-Now,代码行数:32,代码来源:backend.py


示例15: get_testing_metrics

def get_testing_metrics(model, X, y, metrics, as_indexes, n_folds, X_test=None):
    y_pred = cross_val_predict(
        model,
        X,
        y,
        cv=StratifiedKFold(
            y,
            n_folds=n_folds,
            shuffle=True,
            random_state=RANDOM_STATE
        )
    )
    print "y_pred", y_pred
    model.fit(X, y)
    result = get_y_true_y_pred_based_metrics(y, y_pred, metrics)
    if FEATURES in metrics:
        result[FEATURES] = model.get_support(indices=True)
    if OBJECTS in metrics:
        if as_indexes:
            result[OBJECTS] = [get_data_keeper().get_object_name_by_index(index) for (index,) in X]
        else:
            result[OBJECTS] = list(X.index)
    if TEST_PREDICTIONS in metrics:
        result[TEST_PREDICTIONS] = X_test, model.predict(X_test)
    return result
开发者ID:satanevsky,项目名称:diploma,代码行数:25,代码来源:testing.py


示例16: cross_validate

 def cross_validate(self):
     progress_logger.info("Starting cross validation.")
     validate_clf = linear_model.LogisticRegression(class_weight=self.weights)
     predictions = cross_validation.cross_val_predict(validate_clf, self.X, self.Y.ravel(), cv=5)
     fp_count = 0.0
     tp_count = 0.0
     fn_count = 0.0
     tn_count = 0.0
     miscount = 0.0
     for i in range(len(predictions)):
         prediction = predictions[i]
         expected = self.Y[i][0]
         if prediction == 1 and expected == 1:
             tp_count += 1
         elif prediction == 1 and expected == 0:
             fp_count += 1
         elif prediction == 0 and expected == 1:
             fn_count += 1
         elif prediction == 0 and expected == 0:
             tn_count += 1
         else:
             miscount += 1
     if miscount > 0:
         debug_logger.warn("During cross validation, found {} miscounts.".format(miscount))
     total_count = fp_count + tp_count + fn_count + tn_count
     self.validation_accuracy = (tp_count + tn_count) / total_count if total_count != 0 else 0.0
     fp_rate = fp_count / (fp_count + tn_count) if fp_count + tn_count != 0 else 0.0
     fn_rate = fn_count / (fn_count + tp_count) if fn_count + tp_count != 0 else 0.0
     progress_logger.info("Confusion matrix - True positives: {}, False positives: {}, False negatives: {}, True negatives: {}".format(
         tp_count, fp_count, fn_count, tn_count))
     progress_logger.info("Validation Accuracy: {}".format(self.validation_accuracy))
     progress_logger.info("False positive rate: {}".format(fp_rate))
     progress_logger.info("False negative rate: {}".format(fn_rate))
开发者ID:mikeaboody,项目名称:phishing-research,代码行数:33,代码来源:classify.py


示例17: crossvalidation

def crossvalidation(x, y):
    """
    Cross validation metric. Also plot confusion matrix and save cls if flags are set to 1.
    :param x: features (valence, arousal)
    :param y: target (emotion)
    :return:
    """
    c_array = np.logspace(0, 3, 4)
    gamma_array = np.logspace(-3, 3, 7)

    # feature scaling
    if feature_scaling:
        std_scale = preprocessing.StandardScaler().fit(x)
        x = std_scale.transform(x)

    for c in c_array:
        for gamma in gamma_array:
            clf = svm.SVC(kernel='linear', C=c, gamma=gamma) #kernel= rbf #kernel= poly #kernel= linear
            scores = cross_validation.cross_val_score(clf, x, y, cv=3)
            print("Accuracy: %0.2f (+/- %0.2f) %f %f" % (scores.mean(), scores.std() * 2, c, gamma))
            pred = cross_validation.cross_val_predict(clf, x, y, cv=3)
            print("Classes accuracy: ", classes_accuracy(y, pred))

    print(np.array(y))
    print(pred)

    #plot last one, not best, CARE!!!
    if plot_confusion_matrix:
        confusion_matrix.prepare_plot(y, pred)

    if save_clf:
        clf.fit(x, y)
        joblib.dump(clf, 'classifiers\\'+configuration.get('clf_name')+'.pkl')
开发者ID:Matlino,项目名称:emotionDetection,代码行数:33,代码来源:subjective_to_emotion_svm.py


示例18: main

def main():
    pickle_folder = '../pickles_no_rms'
    pickle_folders_to_load = [f for f in os.listdir(pickle_folder) if os.path.isdir(join(pickle_folder, f))]
    pickle_folders_to_load = sorted(pickle_folders_to_load)
    pickle_folders_to_load = [p for p in pickle_folders_to_load if 'drums1__' not in p]

    sdr_type = 'background'

    fits = []
    sdrs = []
    for pick in pickle_folders_to_load:
        beat_spec_name = join(pickle_folder, pick, pick + '__beat_spec.pick')
        beat_spec = pickle.load(open(beat_spec_name, 'rb'))

        entropy, log_mean = beat_spectrum_prediction_statistics(beat_spec)
        fit_X = [entropy, log_mean]
        fits.append(fit_X)

        sdrs_name = join(pickle_folder, pick, pick + '__sdrs.pick')
        sdr_vals = pickle.load(open(sdrs_name, 'rb'))
        cur_sdr = sdr_vals[sdr_type][0]
        sdrs.append(cur_sdr)

    fits = np.array(fits)
    sdrs = np.array(sdrs).reshape(-1, 1)
    knn = neighbors.KNeighborsRegressor(5, weights='distance')
    scores = cross_validation.cross_val_predict(knn, fits, sdrs, cv=10, verbose=1)
    print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean() - sdrs.mean(), scores.std() * 2))
开发者ID:ethman,项目名称:prediction,代码行数:28,代码来源:prediction2.py


示例19: eval_log_reg

def eval_log_reg(the_training_data, the_truth): 
    K_FOLD = 10
    
    # Linear regression
    lr = linear_model.LogisticRegression()

    # Evaluate
    scores = cross_validation.cross_val_score(lr, the_training_data, the_truth, cv=K_FOLD)
    print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
    
    predicted = cross_validation.cross_val_predict(lr, the_training_data, the_truth, cv=K_FOLD)
    print "Confusion matrix:"
    print metrics.confusion_matrix(the_truth, predicted)
    
    X_train, X_test, y_train, y_test = cross_validation.train_test_split(
        the_training_data, the_truth, test_size=1.0/K_FOLD, random_state=0)
    lr.fit(X_train, y_train)
    labels = X_train.columns
    coefficients = [(labels[i],val) for i,val in enumerate(lr.__dict__['coef_'][0])]
    coefficients.sort(key=lambda x: abs(x[1]), reverse=True)
    print "Most predictive features:"
    for i in range(0,5):
        print "    %s: %0.2f" % (coefficients[i][0], coefficients[i][1])
    
    numExamples = np.shape(X_train)[0]
    print "Training examples: %d" % numExamples
    usedUtterances = [example.split(".csv_")[0] for example in X_train.index]
    numUtterances = len(set(usedUtterances))
    print  "Training utterances: %d" % numUtterances
    
    return [scores.mean(), scores.std() * 2, len(coefficients), numExamples, numUtterances]
开发者ID:akuefler,项目名称:ASLrecog,代码行数:31,代码来源:build_and_eval_human_theory_models.py


示例20: run_svm

def run_svm(x, y):
    s = svm.SVR()
    scores = cross_validation.cross_val_score(s, x, y, cv=10)
    print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
    predictions = cross_validation.cross_val_predict(s, x, y, cv=10)

    return predictions
开发者ID:ethman,项目名称:prediction,代码行数:7,代码来源:duet_classifier.py



注:本文中的sklearn.cross_validation.cross_val_predict函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python cross_validation.cross_val_score函数代码示例发布时间:2022-05-27
下一篇:
Python cross_validation.check_cv函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap