• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python metrics.auc_score函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中sklearn.metrics.auc_score函数的典型用法代码示例。如果您正苦于以下问题:Python auc_score函数的具体用法?Python auc_score怎么用?Python auc_score使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了auc_score函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: test_prf

def test_prf(fn1,fn2,sth,L):
    y_true=[]
    y_score=[]
    edges_1=prep.read_edges(fn1)
    edges_2=prep.read_edges(fn2)
    
    predict_set={}
    for key in sth.keys():
        predict_set[key]=predict_set.get(key,0.)+sth[key]
    predict_set=sorted(predict_set.iteritems(),key=lambda d:d[1],reverse=True)#

    threshold=predict_set[L][1]
    for i in edges_1:
        if sth[i]>threshold:
            y_score.append(1)
        else:
            y_score.append(0)

    for i in edges_1:
        if i not in edges_2:
            y_true.append(0)
        else:
            y_true.append(1)

    print classification_report(y_true,y_score)
    print auc_score(y_true,y_score)
开发者ID:noosc,项目名称:exp-code,代码行数:26,代码来源:statistic.py


示例2: eval_model

def eval_model():
    comments, labels = load_extended_data()

    clf1 = build_base_model()
    clf2 = build_elasticnet_model()
    clf3 = build_stacked_model()
    clf4 = build_nltk_model()
    models = [clf1, clf2, clf3, clf4]
    #models = [clf1]
    cv = ShuffleSplit(len(comments), n_iterations=5, test_size=0.2,
            indices=True)
    scores = []
    for train, test in cv:
        probs_common = np.zeros((len(test), 2))
        for clf in models:
            X_train, y_train = comments[train], labels[train]
            X_test, y_test = comments[test], labels[test]
            clf.fit(X_train, y_train)
            probs = clf.predict_proba(X_test)
            print("score: %f" % auc_score(y_test, probs[:, 1]))
            probs_common += probs
        probs_common /= 4.
        scores.append(auc_score(y_test, probs_common[:, 1]))
        print("combined score: %f" % scores[-1])

    print(np.mean(scores), np.std(scores))
开发者ID:Karamcse,项目名称:kaggle_insults,代码行数:26,代码来源:train.py


示例3: test_thresholded_scorers

def test_thresholded_scorers():
    """Test scorers that take thresholds."""
    X, y = make_blobs(random_state=0, centers=2)
    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
    clf = LogisticRegression(random_state=0)
    clf.fit(X_train, y_train)
    score1 = SCORERS['roc_auc'](clf, X_test, y_test)
    score2 = auc_score(y_test, clf.decision_function(X_test))
    score3 = auc_score(y_test, clf.predict_proba(X_test)[:, 1])
    assert_almost_equal(score1, score2)
    assert_almost_equal(score1, score3)

    logscore = SCORERS['log_loss'](clf, X_test, y_test)
    logloss = log_loss(y_test, clf.predict_proba(X_test))
    assert_almost_equal(-logscore, logloss)

    # same for an estimator without decision_function
    clf = DecisionTreeClassifier()
    clf.fit(X_train, y_train)
    score1 = SCORERS['roc_auc'](clf, X_test, y_test)
    score2 = auc_score(y_test, clf.predict_proba(X_test)[:, 1])
    assert_almost_equal(score1, score2)

    # Test that an exception is raised on more than two classes
    X, y = make_blobs(random_state=0, centers=3)
    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
    clf.fit(X_train, y_train)
    assert_raises(ValueError, SCORERS['roc_auc'], clf, X_test, y_test)
开发者ID:Comy,项目名称:scikit-learn,代码行数:28,代码来源:test_score_objects.py


示例4: test_score_scale_invariance

def test_score_scale_invariance():
    # Test that average_precision_score and auc_score are invariant by
    # the scaling or shifting of probabilities
    y_true, _, probas_pred = make_prediction(binary=True)
    roc_auc = auc_score(y_true, probas_pred)
    roc_auc_scaled = auc_score(y_true, 100 * probas_pred)
    roc_auc_shifted = auc_score(y_true, probas_pred - 10)
    assert_equal(roc_auc, roc_auc_scaled)
    assert_equal(roc_auc, roc_auc_shifted)
    pr_auc = average_precision_score(y_true, probas_pred)
    pr_auc_scaled = average_precision_score(y_true, 100 * probas_pred)
    pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
    assert_equal(pr_auc, pr_auc_scaled)
    assert_equal(pr_auc, pr_auc_shifted)
开发者ID:conradlee,项目名称:scikit-learn,代码行数:14,代码来源:test_metrics.py


示例5: bagging

def bagging():
    from sklearn.feature_selection import SelectPercentile, chi2

    comments, dates, labels = load_data()
    select = SelectPercentile(score_func=chi2, percentile=4)

    clf = LogisticRegression(tol=1e-8, penalty='l2', C=7)
    #clf = BaggingClassifier(logr, n_estimators=50)
    countvect_char = TfidfVectorizer(ngram_range=(1, 5),
            analyzer="char", binary=False)
    countvect_word = TfidfVectorizer(ngram_range=(1, 3),
            analyzer="word", binary=False)
    badwords = BadWordCounter()

    ft = FeatureStacker([("badwords", badwords), ("chars", countvect_char),
        ("words", countvect_word)])
    #ft = TextFeatureTransformer()
    pipeline = Pipeline([('vect', ft), ('select', select), ('logr', clf)])

    cv = ShuffleSplit(len(comments), n_iterations=20, test_size=0.2,
            indices=True)
    scores = []
    for train, test in cv:
        X_train, y_train = comments[train], labels[train]
        X_test, y_test = comments[test], labels[test]
        pipeline.fit(X_train, y_train)
        probs = pipeline.predict_proba(X_test)
        scores.append(auc_score(y_test, probs[:, 1]))
        print("score: %f" % scores[-1])
    print(np.mean(scores), np.std(scores))
开发者ID:ANB2,项目名称:kaggle_insults,代码行数:30,代码来源:old.py


示例6: classification_metrics

def classification_metrics (targets, preds, probs=None):

#    if probs != None and len(probs) > 0:
#        fpr, tpr, thresholds = roc_curve(targets, probs[:, 1], 1)
#        roc_auc = auc_score(fpr, tpr)
#    else:
#        fpr, tpr, thresholds = roc_curve(targets, preds, 1)
#        roc_auc = auc_score(targets, preds)

    auc = 0
    if len(targets) > 1:
        auc = auc_score(targets, preds)

    cm = confusion_matrix(targets, preds)

    #accuracy
    acc = accuracy_score(targets, preds)

    #recall? True Positive Rate or Sensitivity or Recall
    sens = recall_score(targets, preds)

    #precision
    prec = precision_score(targets, preds)

    #f1-score
    f1 = f1_score(targets, preds, np.unique(targets), 1)

    tnr  = 0.0
    spec = 0.0
    #True Negative Rate or Specificity (tn / (tn+fp))
    if len(cm) == 2:
        if (cm[0,0] + cm[0,1]) != 0:
            spec = float(cm[0,0])/(cm[0,0] + cm[0,1])

    return acc, sens, spec, prec, f1, auc
开发者ID:alexsavio,项目名称:aizkolari,代码行数:35,代码来源:aizkolari_classification.py


示例7: run_cv

def run_cv(x,y,reg,cv):
     ''' returns mean AUC for this reg using cv splits.'''
     scores = []      
     for sp in cv:
          reg.fit(x[sp[0],:],y[sp[0]])
          scores.append(auc_score(y[sp[1]],reg.predict_proba(x[sp[1],:])[:,1]))
     return np.mean(scores)
开发者ID:funemployment,项目名称:kaggle-amzn,代码行数:7,代码来源:log_reg.py


示例8: model_generate_level1

def model_generate_level1(bestc, features, X_train, y) :

    ntrain = X_train.shape[0]
    newdata = np.zeros(ntrain)

    X_train, keymap = utility.OneHotEncoder(X_train[:,features])
    model = linear_model.LogisticRegression()
    model.C = bestc

    cvscores = []
    cvgen = cross_validation.KFold(ntrain, 10, random_state=utility.SEED)
    for train_inds, test_inds in cvgen :
        X_cvtrain = X_train[train_inds]
        X_cvtest = X_train[test_inds]
        y_cvtrain = y[train_inds]
        y_cvtest = y[test_inds]

        model.fit(X_cvtrain, y_cvtrain)
        pred_cvtest = model.predict_proba(X_cvtest)[:,1]
        cvscore = metrics.auc_score(y_cvtest, pred_cvtest)
        cvscores.append(cvscore)

        newdata[test_inds] = pred_cvtest

    print "Average CV Score: {}".format(np.mean(cvscores))
    return newdata
开发者ID:jamesjohnson92,项目名称:kaggle-amazonaccess,代码行数:26,代码来源:logreg_generate_level1_data.py


示例9: auc

def auc(test_data, index, reverse, test_file):
    pred = [x[index] for x in test_data]

    if reverse:
        pred = [ x * -1 for x in pred]
    testing_Y = [x[0] for x in test_data]
    print "AUC: \n%f\n" % metrics.auc_score(testing_Y, pred)    
开发者ID:fengqi0423,项目名称:hahaha,代码行数:7,代码来源:evaluator.py


示例10: evalSymbReg

def evalSymbReg(individual):
    # Transform the tree expression in a callable function
    func = toolbox.lambdify(expr=individual)
    # Evaluate the sum of squared difference between the expression
    # and the real function : x**4 + x**3 + x**2 + x
    #X=[[1.0000,0.9231,1.0000,1.0000,1.0000,1.0000,0.9091,1.0000]]
    #X.append([1.0000,0.9231,1.0000,1.0000,0.8333,0.5000,0.9091,0.8333])
    #X.append([1.0000,0.9231,1.0000,1.0000,0.8333,1.0000,0.9091,0.8333])
    #X.append([0.0000,0.9231,0.0000,0.0000,0.8333,0.5000,0.9091,0.8333])
    #X=[[1.0000,0,1.0000,1.0000,1.0000,1.0000,0,1.0000]]
    #X.append([1.0000,0,1.0000,1.0000,0,0.5000,0,0])
    #X.append([1.0000,0,1.0000,1.0000,0,1.0000,0,0])
    #X.append([1.0000,1,1.0000,1.0000,0,0.5000,0,1])
    #X.append([0.0000,1,1.0000,1.0000,0,0.5000,0,1])
    #L
    #A=[1,1,1,0,1];
    t=0
    a=0
    global co 
    co+=1
   # print co
    if(co>6000):
        global A,X
        A,X=getdata()
        co=0
    preds=[]
    for x in X:
        preds.append(func(x[0],x[1],x[2],x[3],x[4],x[5],x[6],x[7]))
        #t+=1
    auc = 1-metrics.auc_score(A, preds)
    return auc,
开发者ID:flash121,项目名称:Symbolic-Regression-Logistical,代码行数:31,代码来源:demain.py


示例11: ScoreClassifier

def ScoreClassifier(features, labels, clf=None, score_func=None):
  """Test a learned classifier.

  :type callable score_func: Scoring function (one of accuracy_scorer or
     auc_scorer). This is not a score function from sklearn.metrics.
  :rtype: float
  :returns: Accuracy of classifier on test data.

  """
  # Note: type(clf) will be 'instance' if clf is a learned classifier. If
  # instead type(clf) is 'type', then it is assumed to be the class of learning
  # algorithm to apply.
  if clf is None or type(clf) == type:
    mask = ChooseTrainingSet(labels, 0.5)
    clf = FitClassifier(features[mask], labels[mask], algorithm=clf)
    features = features[~mask]
    labels = labels[~mask]
  features = features.astype(float)
  score_func = score_func or 'accuracy'
  if isinstance(score_func, basestring):
    score_func = score_func.lower()
    if score_func == 'accuracy':
      score_func = accuracy_score
    elif score_func == 'auc':
      predictions = clf.decision_function(features)
      return auc_score(labels, predictions), predictions
  elif not callable(score_func):
    raise ValueError("Score function must be a string or a callable.")
  predictions = clf.predict(features)
  return score_func(labels, predictions), predictions
开发者ID:mthomure,项目名称:glimpse-project,代码行数:30,代码来源:learn.py


示例12: test_auc

 def test_auc():
     probs = numpy.ravel([test_pred_proba_i(i)[:,1] for i in xrange(n_test_batches)])
     if numpy.all(test_set_y.get_value()) and numpy.all(probs):
         return 1.
     if numpy.all(test_set_y.get_value() == 0) and numpy.all(probs == 0):
         return 0.
     return auc_score(test_set_y.get_value()[:n_test_batches*batch_size], probs)
开发者ID:bjcohen,项目名称:kaggle,代码行数:7,代码来源:DBN.py


示例13: run_fest_test

def run_fest_test(festpath="/Users/bjcohen/dev/fest", **kwargs):
    """
    -c <int>  : committee type:
                1 bagging
                2 boosting (default)
                3 random forest
    -d <int>  : maximum depth of the trees (default: 1000)
    -e        : report out of bag estimates (default: no)
    -n <float>: relative weight for the negative class (default: 1)
    -p <float>: parameter for random forests: (default: 1)
                (ratio of features considered over sqrt(features))
    -t <int>  : number of trees (default: 100)
    """
    idstr = "".join(map(lambda (f, v): f + str(v), kwargs.items()))
    ret = call(
        [
            os.path.join(festpath, "festlearn"),
            " ".join(map(lambda (f, v): "-" + f + str(v), kwargs.items())),
            os.path.join("..", "data", "train_3way_-27000.libsvm"),
            os.path.join("..", "data", "fest_%s_-27000.model" % idstr),
        ]
    )
    if ret != 0:
        raise Exception()
    ret = call(
        [
            os.path.join(festpath, "festclassify"),
            os.path.join("..", "data", "train_3way_-27000.libsvm"),
            os.path.join("..", "data", "fest_%s_-27000.model" % idstr),
            os.path.join("..", "data", "pred_fest_train_-27000_%s" % idstr),
        ]
    )
    if ret != 0:
        raise Exception()
    ret = call(
        [
            os.path.join(festpath, "festclassify"),
            os.path.join("..", "data", "train_3way_27000-.libsvm"),
            os.path.join("..", "data", "fest_%s_-27000.model" % idstr),
            os.path.join("..", "data", "pred_fest_train_27000-_%s" % idstr),
        ]
    )
    if ret != 0:
        raise Exception()
    tr_score = auc_score(ACTION[:27000], pd.read_table("../data/pred_fest_train_-27000_%s" % idstr, header=None))
    te_score = auc_score(ACTION[27000:], pd.read_table("../data/pred_fest_train_27000-_%s" % idstr, header=None))
    return (tr_score, te_score)
开发者ID:pdikang,项目名称:kaggle,代码行数:47,代码来源:eda.py


示例14: calculatePrediction

def calculatePrediction():
		
        dTr = loadFile('../data/train.csv')
        y_train = dTr[0]
        X_train_A = dTr[1]
        X_train_B = dTr[2]

        dTes = loadFileTest('../data/test.csv')
        X_test_A = dTes[0]
        X_test_B = dTes[1]

        print "train size: {0} {1}".format(X_train_A.shape, X_train_B.shape)
        print "test size: {0} {1}".format(X_test_A.shape, X_test_B.shape)

	#def transform_features(x):
	#    return np.log(1+x)
	
	X_train_minus = transform_features(X_train_A) - transform_features(X_train_B)
	X_train_div = transform_features(X_train_A) / (transform_features(X_train_B) + 1)
	X_train = np.concatenate((X_train_div, X_train_minus),axis=1)

	X_test_minus = transform_features(X_test_A) - transform_features(X_test_B)
	X_test_div = transform_features(X_test_A) / (transform_features(X_test_B) + 1)
	X_test = np.concatenate((X_test_div, X_test_minus),axis=1)
	
        #In this case we'll use a random forest, but this could be any classifier
        cfr = RandomForestClassifier(n_estimators=100, max_features=math.sqrt(X_train.shape[1]), n_jobs=1)

    #Simple K-Fold cross validation. 5 folds.
        cv = cross_validation.KFold(len(X_train), k=10, indices=False)

    #iterate through the training and test cross validation segments and
    #run the classifier on each one, aggregating the results into a list
        results = []
        for traincv, testcv in cv:
            probas = cfr.fit(X_train[traincv], y_train[traincv]).predict_proba(X_train[testcv])
            p_train = [x[1] for x in probas]
            results.append(auc_score(y_train[testcv].tolist(),p_train))
            #results.append( logloss.llfun(target[testcv], [x[1] for x in probas]) )

    #print out the mean of the cross-validated results
        print "Results: " + str( np.array(results).mean() )

        # Test set prob

        probas = cfr.predict_proba(X_test)
        p_test = [x[1] for x in probas]

	###########################
	# WRITING SUBMISSION FILE
	###########################
	predfile = open('predictions_test.csv','w+')

        print "label size: test - {0} expected {1}".format(len(p_test), X_test_A.shape[0])
	
        for item in p_train:
            print >>predfile, "{0}".format(str(item))
	
	predfile.close()
开发者ID:kebos,项目名称:ClassyFires,代码行数:59,代码来源:prediction_randomforest_DA.py


示例15: AUROCScore

	def AUROCScore(self):
		try:
			self.__rocarea = auc_score(self.__labels, self.__scores)
		except Exception as e:
			print "roc_curve exception"
			print e
			return nan
		return self.__rocarea
开发者ID:wimverleyen,项目名称:AggregateGeneFunctionPrediction,代码行数:8,代码来源:performance.py


示例16: iterate_Multinomial_alpha

def iterate_Multinomial_alpha(vect):
	auc_training=[]
	auc_oos=[]
	dfs=np.arange(0,3,0.1)
	print dfs
	for n in dfs:
		print n
		train2=vect.fit_transform(train.Comment)
		x_train2,x_test2=train_test_split(train2,random_state=42)
		x_train2=x_train2.tocoo() 
		x_test2=x_test2.tocoo()
		classifier = MultinomialNB(fit_prior=True, alpha=n).fit(x_train2,x_train[:,0]) 
		auc_training.append(auc_score(x_train[:,0],classifier.predict(x_train2)))
		auc_oos.append(auc_score(x_test[:,0],classifier.predict(x_test2)))
	results= zip(dfs,auc_training,auc_oos)
	print results
	return auc_plot(results)
开发者ID:manugarri,项目名称:Data_Science,代码行数:17,代码来源:MNB_script.py


示例17: test_roc_curve

def test_roc_curve():
    """Test Area under Receiver Operating Characteristic (ROC) curve"""
    y_true, _, probas_pred = make_prediction(binary=True)

    fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
    roc_auc = auc(fpr, tpr)
    assert_array_almost_equal(roc_auc, 0.80, decimal=2)
    assert_almost_equal(roc_auc, auc_score(y_true, probas_pred))
开发者ID:conradlee,项目名称:scikit-learn,代码行数:8,代码来源:test_metrics.py


示例18: summary

def summary(clf, x, y):
    df = clf.decision_function(x).ravel()
    yp = df > 0

    print 'False Positive: %0.3f' % false_pos(y, yp)
    print 'Recall:         %0.3f' % recall(y, yp)
    print 'AUC:            %0.3f' % auc_score(y, yp)
    print 'Accuracy:       %0.3f' % (yp == y).mean()
开发者ID:ChrisBeaumont,项目名称:brut,代码行数:8,代码来源:util.py


示例19: validation_worker

def validation_worker(args):
    X, y, model, j, SEED = args
    X_train, X_cv, y_train, y_cv = cross_validation.train_test_split(
                                       X, y, test_size=.15, 
                                       random_state = j*SEED)
    model.fit(X_train, y_train)
    preds = model.predict_proba(X_cv)[:,1]
    auc = metrics.auc_score(y_cv, preds)
    return auc  
开发者ID:adepalatis,项目名称:379K_Final_Project,代码行数:9,代码来源:logistic_regression_updated_LM.py


示例20: cv_loop

def cv_loop(X, y, model, N, seed):
    mean_auc = 0.0
    k_fold = KFold(len(y), N, indices=True, shuffle=True, random_state=seed)
    for train_ix, test_ix in k_fold:
        model.fit(X[train_ix], y[train_ix])
        preds = model.predict_proba(X[test_ix])[:, 1]
        auc = metrics.auc_score(y[test_ix], preds)
        # print("AUC (fold %d/%d): %f" % (i + 1, N, auc))
        mean_auc += auc
    return mean_auc / N
开发者ID:BigZihao,项目名称:amazon_employee_access_2013,代码行数:10,代码来源:logistic_regression.py



注:本文中的sklearn.metrics.auc_score函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python metrics.average_precision_score函数代码示例发布时间:2022-05-27
下一篇:
Python metrics.auc函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap