• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python linear_model.RidgeCV类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中sklearn.linear_model.RidgeCV的典型用法代码示例。如果您正苦于以下问题:Python RidgeCV类的具体用法?Python RidgeCV怎么用?Python RidgeCV使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了RidgeCV类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: RR_cv_estimate_alpha

def RR_cv_estimate_alpha(sspacing, tspacing, alphas):
    """
    Estimate the optimal regularization parameter using grid search from a list
    and via k-fold cross validation

    Parameters
    ----------
    sspacing : 2D subsampling ratio in space (in one direction)

    tspacing : 1D subsampling ratio in time

    alphas : list of regularization parameters to do grid search
    
    """
    #Load all training data
    (Xl_tr, mea_l, sig_l, Xh_tr,mea_h,sig_h) =  data_preprocess(sspacing, tspacing)  
    
    # RidgeCV
    from sklearn.linear_model import RidgeCV    
    ridge = RidgeCV(alphas = alphas, cv = 10, fit_intercept=False, normalize=False)
    ridge.fit(Xl_tr, Xh_tr)
    
    RR_alpha_opt = ridge.alpha_
    
    print('\n Optimal lambda:', RR_alpha_opt)
    
    # save to .mat file
    import scipy.io as io
    filename = "".join(['/data/PhDworks/isotropic/regerssion/RR_cv_alpha_sspacing',
                        str(sspacing),'_tspacing',str(tspacing),'.mat'])
    io.savemat(filename, dict(alphas=alphas, RR_alpha_opt=RR_alpha_opt))
    
    # return
    return RR_alpha_opt
开发者ID:linhvannguyen,项目名称:PhDworks,代码行数:34,代码来源:regressionUtils.py


示例2: ridge_predict

def ridge_predict(train_data, train_target, test_data):

	# Prep modeller
	alpha_ranges = [1e-3, 1e-2, 1e-1, 1, 1e2, 1e3,
					2e3, 2.5e3, 3e3, 3.5e3, 4e3, 
					5e3, 6e3, 6.1e3, 6.15e3, 6.25e3, 6.3e3, 6.4e3, 7e3, 
					7.75e3, 7.9e3, 8e3, 8.1e3, 8.2e3, 8.25e3, 8.3e3, 8.4e3, 8.5e3, 8.75e3, 9e3, 9.25e3, 9.4e3, 9.5e3, 9.6e3, 9.75e3,
					1e4, 1.25e4, 1.4e4, 1.5e4, 1.55e4, 1.58e4, 1.6e4, 1.625e4, 1.65e4, 1.7e4, 1.725e4, 1.74e4, 1.75e4, 1.76e4, 1.78e4, 1.85e4, 
					2e4, 2.25e4, 2.5e4, 3e4, 4e4,  
					0.5e5, 0.75e5, 1e5, 1.25e5, 1.5e5, 
					0.8e6, 0.9e6, 1e6, 1.1e6, 1.2e6, 1.25e6, 1.28e6, 1.3e6, 1.32e6, 1.33e6, 1.34e6, 1.4e6, 1.5e6, 2e6,
					1e7, 1e8, 1e9, 5e9, 1e10, 5e10, 1e11, 1e12, 1e13]
	clf = RidgeCV(alphas=alpha_ranges, 
              normalize=True, cv=None, fit_intercept=False, store_cv_values=True)

	# Fit
	clf.fit(train_data, train_target)
	# print("alpha range:", alpha_ranges)
	# print("CV per alpha:",np.mean(clf.cv_values_, axis=0))
	# print("alpha used:", clf.alpha_)
	# print("fit score:", clf.score(train_data, train_target))

	# Prediction
	predictions = clf.predict(test_data)

	return predictions
开发者ID:LOBUTO,项目名称:CANCER.GENOMICS,代码行数:26,代码来源:ridge_tests.py


示例3: fit

 def fit(self, X, y):
     """Fit the shape function of each features with the backfitting algorithm.
     Please note that the shape functions are centered (not reduced).
     
     Parameters
     ----------
     X : array-like, shape=(n_samples, n_features)
         The input samples. 
         
     Returns
     -------
     self : object
         The Generalized Additive Model with the fitted shape functions
     """
     
     n_samples, n_features = X.shape
     
     if not isinstance(self.smoothers, list):
         self.smoothers_ = [clone(self.smoothers) for i in range(n_features) ]
         self.ridge = RidgeCV(alphas = [self.ridge_alphas]*len(self.smoothers_), fit_intercept=False)
     else:
         self.smoothers_ = [clone(self.smoothers[j]) for j in range(n_features) ]
         self.ridge = RidgeCV(alphas = [self.ridge_alphas]*len(self.smoothers_), fit_intercept=False)
         
     self.y_mean_ = np.mean(y)
     self.rmse_ = [] # array to stock the train error over the iteration
     y -= y.mean()
     temp = np.zeros(shape=(n_samples, n_features)) # array to stock the shape function for re-use in the next iteration
     shape_functions = np.zeros(shape=(n_samples, n_features))
     for i in range(self.max_iter):
         for j in range(n_features):
             # select all the columns except the j-th one
             idx = list(set(np.arange(0, n_features, 1)) - set([j])) 
             
             #Compute the residuals of the previous iteration          
             residuals = y.reshape((n_samples,1)) - temp[:, idx].sum(axis=1, keepdims=True).reshape((n_samples, 1)) 
             residuals -=residuals.mean()
             residuals = residuals
             #print(np.amin(residuals), np.amax(residuals), 'iteration number %s'%(i+1))
            
             self.smoothers_[j].fit(X[:, j:j+1], residuals.reshape((n_samples,))) #reshape cause deprecation warning
             shape_functions[:, j]= self.smoothers_[j].predict(X[:, j:j+1])
             shape_functions[:, j] -= shape_functions[:, j].mean()
         
         # RidgeRegression on top of the shape function in order to 're-scale' each shape functions
         self.ridge.fit(shape_functions, y)
         coef = self.ridge.coef_
         shape_functions *= coef
         
         y_pred = shape_functions.sum(axis=1)
         y_pred -= y_pred.mean()
         self.rmse_.append(met.mean_squared_error(y_pred, y))
         
         temp=shape_functions.copy()
         #plt.scatter(1, np.abs(residuals.min()), c='g', label='iteration = %s'%i)
         #plt.scatter(2, np.abs(residuals.max()), c='r')
         #plt.legend()
         #plt.show()
     return self
开发者ID:nicolasJouvin,项目名称:GAM,代码行数:59,代码来源:GeneralizedAdditivRegressor.py


示例4: regularizedreg

def regularizedreg(Xtrain,Xtest,ytrain,ytest):
    Rclf = RidgeCV(alphas=[1,2,20,40,50]) # RidgeCV(alphas=[0.1, 1.0, 2.0, 4.0, 20.0], cv=None, fit_intercept=True, scoring=None, normalize=False)
    Rclf.fit(Xtrain,ytrain);
    print("Residual sum of squares: %.2f"
         % np.mean((Rclf.predict(Xtest) - ytest) ** 2))
    print('Regularization choosen, alpha = %.2f' % Rclf.alpha_);
    print(' Coef values = ', Rclf.coef_);                                      
    print('Variance score: %.2f' % Rclf.score(Xtest, ytest))
开发者ID:chezhia,项目名称:PySci,代码行数:8,代码来源:selectfeatures.py


示例5: ridgeCV

def ridgeCV(data, targets):
    """
    Returns a RidgeCV linear model for predictions with alphas [1, 10, 50, 100, 1000]
    Takes the data and the associated targets as arguments.
    """
    model = RidgeCV(alphas=[1, 10, 50, 100, 1000])
    model.fit(data, targets)
    return model
开发者ID:sapresearch,项目名称:fuzzy_adventure,代码行数:8,代码来源:transaction_duration_prediction.py


示例6: fit_Ridge

def fit_Ridge(features_train, labels_train, features_pred, alphas=(0.1, 1.0, 10.0)):
	model = RidgeCV(normalize=True, store_cv_values=True, alphas=alphas)
	model.fit(features_train, labels_train)
	cv_errors = np.mean(model.cv_values_, axis=0)
	print "RIDGE - CV error min: ", np.min(cv_errors)	
	# Test the model
	labels_pred = model.predict(features_pred)
	return labels_pred
开发者ID:SU-AstroML,项目名称:AstroML-course,代码行数:8,代码来源:fit_method.py


示例7: orth_signal

def orth_signal(x, atol=1e-13, rtol=0):
    """
    Returns signal orthogonal to input ensemble.
    x -> input singal [n_samples, n_neurons]
    """
    t = np.linspace(0, 1, x.shape[0])[:, None]
    f = arange(x.shape[1]) / x.shape[1]
    xt = np.sum(sin(2 * np.pi * f * 3 * t) / (f + 1), axis=1)
    w = RidgeCV(np.logspace(-6, 3, 50))
    w.fit(x, xt)
    xt = xt - w.predict(x)
    # pdb.set_trace()
    return xt
开发者ID:kakila,项目名称:NCReservoir,代码行数:13,代码来源:reservoir.py


示例8: RidgeCVLinear

def RidgeCVLinear(train,test):
  print('starting RidgeCVLinear ...')
  ridge=RidgeCV(normalize=True,cv=5)
  train.reindex(np.random.permutation(train.index))
  tr_X=train.drop('LogSales',axis=1)
  tr_Y=train['LogSales']
  cutoff=math.floor(0.7*tr_Y.size)
  ridge.fit(tr_X[:cutoff],tr_Y[:cutoff])
  predY=ridge.predict(tr_X[cutoff:])
  mspe=rmspe(predY,tr_Y[cutoff:])
  print('rmspe is %9f'% mspe)
  print(train.columns)
  print(ridge.coef_)
  print('starting RidgeCVLinear ... completed')
  return ridge
开发者ID:sathya288,项目名称:kaggle,代码行数:15,代码来源:newross.py


示例9: __init__

    def __init__(self, num_dists=2, sigma=0.1, base_learner=None, **kwargs):
        self.num_dists = num_dists
        self.sigma = sigma
        
        if base_learner is None:
            base_learner = RidgeCV(fit_intercept=False, \
                    alphas=[0.001, 0.01, 0.1, 100, 1000], cv=None,
                    store_cv_values=True)
        
        if 'fit_intercept' not in kwargs:
            kwargs['fit_intercept'] = False

        self.base_learner = base_learner.set_params(**kwargs)
        self.R = None
        self.model = None
开发者ID:flaviovdf,项目名称:ecmlpkdd-analytics-challenge-2014,代码行数:15,代码来源:rbf.py


示例10: stacking

def stacking(estimators):
    # training
    predictions = []
    for estim in estimators:
        estim.fit(X, y)
        predictions.append(estim.predict(X))

    agg = RidgeCV(alphas=alphas, cv=5, normalize=True, fit_intercept=True)         # aggregator
    agg.fit(np.array(predictions).T, y)

    # test
    predictions = []
    for estim in estimators:
        predictions.append(estim.predict(test_data))

    predictions = agg.predict(np.array(predictions).T)
    write_results(predictions)
开发者ID:renhzhang2,项目名称:Kaggle-challenge,代码行数:17,代码来源:main.py


示例11: validate

def validate(nPrev, nAfter, aux_temp, aux_sun, aux_prec, get_model=False):
    X_Final = getFeature(nPrev, nAfter, aux_temp, aux_sun, aux_prec, TrainFiles)
    data_train_target = pd.read_csv(TrainTarget, sep='\t', header=None)
    y = data_train_target.loc[:,0].values

    TEST_SIZE = 0.2
    RANDOM_STATE = 0
    X_train, X_val, y_train, y_val = train_test_split(X_Final, y, test_size=TEST_SIZE, random_state=RANDOM_STATE)

    imp.fit(X_train)
    X_train = imp.transform(X_train)
    imp.fit(X_val)
    X_val = imp.transform(X_val)

    reg = RidgeCV()
    reg.fit(X_train, y_train)
    y_val_pred = reg.predict(X_val)
    print mean_squared_error(y_val, y_val_pred)
    
    if get_model:
        imp.fit(X_Final)
        X_Final = imp.transform(X_Final)
        reg_submit = RidgeCV()
        reg_submit.fit(X_Final, y)
        return reg_submit
    return mean_squared_error(y_val, y_val_pred)
开发者ID:leslie071564,项目名称:future-temparture-prediction,代码行数:26,代码来源:predict_ngram.py


示例12: build

def build(path):
    """
    Computes a linear regression using Ridge regularization.
    """
    print "Building the linear model using Ridge regression"
    start = time.time()

    # Load the data, the target is the last column.
    data  = np.loadtxt(path, delimiter=',')
    y = data[:,-1]
    X = data[:,0:-1]

    # Instantiate and fit the model.
    model = RidgeCV()
    model.fit(X, y)

    print "Finished training the linear model in {:0.3f} seconds".format(time.time() - start)
    return model
开发者ID:Puertarra,项目名称:hadoop-fundamentals,代码行数:18,代码来源:spark-sklearn-app.py


示例13: ridgeRegression

def ridgeRegression(X,Y):
    """
    :param X: data consisting of features (excluding class variable)
    :param Y: column vector consisting of class variable
    :return: report best RMSE value for tuned alpha in ridge regression
    """
    tuningAlpha = [0.1,0.01,0.001]

   # can change to model on the entire dataset but by convention splitting the dataset is a better option
   # X_train, X_test, Y_train, Y_test = cross_validation.train_test_split(X, Y, test_size = 0.10, random_state = 5)

    ridge = RidgeCV(normalize=True,scoring='mean_squared_error', alphas=tuningAlpha, cv=10)
    ridge.fit(X, Y)
    prediction = ridge.predict(X)

    print "RIDGE REGRESSION"
    print "Best Alpha value for Ridge Regression : " + str(ridge.alpha_)
    print 'Best RMSE for corresponding Alpha =', np.sqrt(mean_squared_error(Y, prediction))
开发者ID:RonakSumbaly,项目名称:EE239AS-Signal-and-Systems,代码行数:18,代码来源:utility.py


示例14: fit_mapping

 def fit_mapping(self):
     """
     Fits the mappings from one distributions to the other
     """
     X1 = self.X1
     n1, p1 = X1.shape
     X2 = self.X2
     n2, p2 = X2.shape
     P = self.P
     c = self.c
     r = self.r
     reg_mapping = self.reg_mapping
     # mapping from X1 to X2
     self.model1to2 = RidgeCV(alphas=np.logspace(-3, 3, 7))
     self.model1to2.fit(X1, (P * c.reshape((-1, 1))) @ X2)
     # mapping from X2 to X1
     self.model2to1 = RidgeCV(alphas=np.logspace(-3, 3, 7))
     self.model2to1.fit(X2, (P.T * r.reshape((-1, 1))) @ X2)
开发者ID:MichielStock,项目名称:Teaching,代码行数:18,代码来源:optimal_transport.py


示例15: map_vector_spaces

    def map_vector_spaces(self):
        """
        Perform linear regression upon the semantic embeddings.

        - Semantic embeddings obtained from vector space of corresponding
            bilingual words of the same language.
        """
        self.logger.info('Learning transformation between Vector Spaces.')
        self.lt = RidgeCV()
        self.lt.fit(self.vector_1_list, self.vector_2_list)
开发者ID:KshitijKarthick,项目名称:tvecs,代码行数:10,代码来源:vector_space_mapper.py


示例16: regression

def regression(x, y):
  #enet = MultiTaskElasticNetCV(l1_ratio=0.2)
  enet = RidgeCV()
  y_pred_enet = enet.fit(x, y)

  word_vals = pd.DataFrame(columns = ['coeff'])
  counter = 0
  for i in y_pred_enet.coef_[0]:
    word_vals.loc[x.columns.values[counter]] = i
    counter += 1

  predicted_vals = y_pred_enet.predict(x)
  predicted_df = pd.DataFrame(columns = ['comment','predicted'])
  predicted_df.set_index(['comment'], inplace = True)
  counter = 0
  for i in y.index.values:
    predicted_df.loc[i, 'predicted'] = predicted_vals[counter][0]
    counter += 1

  return word_vals, predicted_df
开发者ID:jacksonchen,项目名称:app_analysis,代码行数:20,代码来源:regression.py


示例17: fitFlowRates

    def fitFlowRates( self, rainData, flowData, **kwargs ):
        # model stream flows from rainfall rates

        xTrain = self.setDelay( rainData, kwargs[ 'nDays' ] )
        yTrain = flowData

        # perform feature scaling
        weatherScaler = preprocessing.StandardScaler().fit( xTrain )
        xTrain = weatherScaler.transform( xTrain )
        self.weatherScaler = weatherScaler

        if kwargs[ 'simpleModel' ]:
            model = RidgeCV( alphas = np.logspace( -2., 2. ) )
        else:
            model = ExtraTreesRegressor( n_estimators = 50, n_jobs = 4,
                                         random_state = 42 )
            
        model.fit( xTrain, yTrain )

        self.flowModel = model
开发者ID:jjardel,项目名称:bd-bq,代码行数:20,代码来源:riverModel2.py


示例18: transform

    def transform(self, X):

        if len(X.shape) == 1:
            X = np.atleast_2d(X).T

        H = self.H[self.n_washout:,:]
        yy = self.X[self.n_washout:,:]

        ## if regularization parameter is None, then determine by cross validation
        if self.lamb is None:
            ## proposals for regularization parameters
            lamb_all = [0.1, 1., 10.]
            ## initialize Ridge Regression classifier
            rr_clf = RidgeCV(alphas=lamb_all)
            ## fit the data with the linear model
            rr_clf.fit(H, yy)
            ## regularization parameter determined by cross validation
            self.lamb = rr_clf.alpha_

        else:
            rr_clf = Ridge(alpha=self.lamb)
            rr_clf.fit(H,yy)

        ## best-fit output weights
        self.ww = rr_clf.coef_

        ## store activations for future use

        return self.ww
开发者ID:dhuppenkothen,项目名称:LightEcho,代码行数:29,代码来源:echostate.py


示例19: fitLakeLevels

    def fitLakeLevels( self, flowData, lakeData, **kwargs ):
        # model lake levels from stream flows
        
        xTrain = self.setDelay( flowData, kwargs[ 'nDays' ] )

        flowScaler = preprocessing.StandardScaler().fit( xTrain )
        xTrain = flowScaler.transform( xTrain )
        self.flowScaler = flowScaler

        # fit to daily changes in elevation
        yTrain = lakeData - np.roll( lakeData, 1 )
        yTrain[ 0 ] = 0.


        if kwargs[ 'simpleModel' ]:
            model = RidgeCV( alphas = np.logspace( -2., 2. ) )
        else:
            model = ExtraTreesRegressor( n_estimators = 50, n_jobs = 4,
                                         random_state = 42 )
        

        model.fit( xTrain, yTrain )

        self.lakeModel = model

        ypreds = model.predict( xTrain )
        lakePreds = lakeData[ 0 ] + np.cumsum( ypreds )

        plt.clf()
        plt.plot( self.dates, yTrain + lakeData, label = 'Actual' )
        plt.plot( self.dates, lakePreds, label = 'Predicted' )

        plt.xlabel( 'Date' )
        plt.ylabel( 'Lake Travis Elevation (ft)' )
        plt.legend()
        plt.savefig( 'lakelevels.png' )
开发者ID:jjardel,项目名称:bd-bq,代码行数:36,代码来源:riverModel2.py


示例20: transform

    def transform(self, X):

        ## make sure data is in correct form (N_samples, N_dimensions)
        if len(X.shape) == 1:
            X = np.atleast_2d(X).T

        ## store data in attribute
        self.X = X

        ## number of data points
        self.K = int(self.X.shape[0])

        ## number of dimensions
        self.D = int(self.X.shape[1])


        ## filter windows
        H = np.zeros((self.K-self.k, self.k))

        for i in xrange(self.k,self.K-1,1):
            H[i-self.k,:] = X[i-self.k:i,0]


        self.H = H

        #print(self.k)
        if len(X.shape) == 1:
            X = np.atleast_2d(X).T

        H = self.H
        yy = X[self.k:]


        if self.lamb is None:
            ## proposals for regularization parameters
            lamb_all = [0.1, 1., 10.]
            ## initialize Ridge Regression classifier
            rr_clf = RidgeCV(alphas=lamb_all)
            ## fit the data with the linear model
            #print(H.shape)
            #print(yy.shape)
            rr_clf.fit(H, yy)
            ## regularization parameter determined by cross validation
            self.lamb = rr_clf.alpha_

        else:
            rr_clf = Ridge(alpha=self.lamb)
            rr_clf.fit(H,yy)

        ## best-fit output weights
        self.ww = rr_clf.coef_

        ## store activations for future use

        return self.ww
开发者ID:dhuppenkothen,项目名称:LightEcho,代码行数:55,代码来源:linearfilter.py



注:本文中的sklearn.linear_model.RidgeCV类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python linear_model.RidgeClassifier类代码示例发布时间:2022-05-27
下一篇:
Python linear_model.Ridge类代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap