本文整理汇总了Python中sklearn.svm.SVR类的典型用法代码示例。如果您正苦于以下问题:Python SVR类的具体用法?Python SVR怎么用?Python SVR使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了SVR类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: svr_main
def svr_main(X, Y):
X_train = X[:TRAIN_SIZE]
Y_train = Y[:TRAIN_SIZE]
X_test = X[TRAIN_SIZE:]
Y_test = Y[TRAIN_SIZE:]
clf = SVR(kernel='rbf', C=1e3, gamma=0.00001)
#clf.fit(X_train,Y_train)
#y_pred = clf.predict(X_test)
#plt.plot(X_test, y_pred, linestyle='-', color='red')
#clf = GradientBoostingRegressor(n_estimators=100,max_depth=1)
#clf = DecisionTreeRegressor(max_depth=25)
#clf = ExtraTreesRegressor(n_estimators=2000,max_depth=14)
#clf = xgb.XGBRegressor(n_estimators=2000,max_depth=25)
#clf = RandomForestRegressor(n_estimators=1000,max_depth=26,n_jobs=7)
predict_list = []
for i in xrange(TEST_SIZE):
X = [ [x] for x in xrange(i, TRAIN_SIZE+i)]
clf.fit(X, Y[i:TRAIN_SIZE+i])
y_pred = clf.predict([TRAIN_SIZE+1+i])
predict_list.append(y_pred)
print "mean_squared_error:%s"%mean_squared_error(Y_test, predict_list)
print "sqrt of mean_squared_error:%s"%np.sqrt(mean_squared_error(Y_test, predict_list))
origin_data = Y_test
print "origin data:%s"%origin_data
plt.plot([ x for x in xrange(TRAIN_SIZE+1, TRAIN_SIZE+TEST_SIZE+1)], predict_list, linestyle='-', color='red', label='prediction model')
plt.plot(X_test, Y_test, linestyle='-', color='blue', label='actual model')
plt.legend(loc=1, prop={'size': 12})
plt.show()
开发者ID:zhengze,项目名称:svm-prediction,代码行数:31,代码来源:svm-prediction.py
示例2: analyze
def analyze(data, label, num_folds):
# Partition data into folds
n = len(data) // num_folds
data_folds = [data[i:i+n] for i in range(0, len(data), n)]
label_folds = [label[i:i+n] for i in range(0, len(label), n)]
lin_reg_error = 0
cs = [4**c for c in range(-10, 0, 1)]
svm_error = [0] * len(cs)
svm_std = [0] * len(cs)
# for i in range(0, num_folds):
# test_data = data_folds[i]
# test_label = label_folds[i]
# train_data = []
# train_label = []
# for j in range(num_folds):
# if i != j:
# train_data += data_folds[j]
# train_label += label_folds[j]
# model = linear_model.LinearRegression()
# model.fit(data, label)
# return model
# lin_reg_error += np.mean(abs(model.predict(test_data) - test_label))
#
# for i2 in range(len(cs)):
# svm_classifier = SVR(gamma=cs[i2])
# svm_classifier.fit(train_data, train_label)
# svm_error[i2] += np.mean(abs(svm_classifier.predict(test_data) - test_label))
# svm_std[i2] += np.std(abs(svm_classifier.predict(test_data) - test_label))
svm_c = SVR(gamma=4**-7)
svm_c.fit(data, label)
return svm_c
开发者ID:awood314,项目名称:fantasy-football-ml,代码行数:35,代码来源:analyze.py
示例3: svm
def svm(self):
"""
C_range = np.logspace(-2, 10, 2)
print C_range
gamma_range = np.logspace(-9, 3, 2)
print gamma_range
param_grid = dict(gamma=gamma_range, C=C_range)
cv = ShuffleSplit(len(self.search_inputs.y_train), n_iter=5, test_size=0.2, random_state=42)
grid = GridSearchCV(SVR(verbose=True), param_grid=param_grid, cv=cv)
#grid = GridSearchCV(svm.SVR(kernel='rbf', verbose=True), param_grid=param_grid, cv=cv)
grid.fit(self.search_inputs.X_train, self.search_inputs.y_train)
print("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
self.svm_preds = grid.predict(self.search_inputs.X_test)
"""
regression = SVR(kernel='rbf', C=1e3, gamma=0.1, verbose=True)
regress_fit = regression.fit(self.search_inputs.X_train,self.search_inputs.y_train)
self.svm_preds = regress_fit.predict(self.search_inputs.X_test)
for i in range(0,len(self.svm_preds) - 1):
if self.svm_preds[i] < 1:
self.svm_preds[i] = 1.00
elif self.svm_preds[i] > 3:
self.svm_preds[i] = 3.00
self.search_inputs.fin_df['relevance'] = np.array(self.svm_preds) # easy swap in / out
final_file_svm = self.search_inputs.fin_df.to_csv(self.fin_file_name+'_svm.csv', float_format='%.5f', index=False)
开发者ID:jms-dipadua,项目名称:machine-learn-py,代码行数:29,代码来源:learn_predict.py
示例4: fit
def fit(self, start_date, end_date):
for ticker in self.tickers:
self.stocks[ticker] = Stock(ticker)
params_svr = [{
'kernel': ['rbf', 'sigmoid', 'linear'],
'C': [0.01, 0.1, 1, 10, 100],
'epsilon': [0.0000001, 0.000001, 0.00001]
}]
params = ParameterGrid(params_svr)
# Find the split for training and CV
mid_date = train_test_split(start_date, end_date)
for ticker, stock in self.stocks.items():
X_train, y_train = stock.get_data(start_date, mid_date, fit=True)
# X_train = self.pca.fit_transform(X_train.values)
X_train = X_train.values
# pdb.set_trace()
X_cv, y_cv = stock.get_data(mid_date, end_date)
# X_cv = self.pca.transform(X_cv.values)
X_cv = X_cv.values
lowest_mse = np.inf
for i, param in enumerate(params):
svr = SVR(**param)
# ada = AdaBoostRegressor(svr)
svr.fit(X_train, y_train.values)
mse = mean_squared_error(
y_cv, svr.predict(X_cv))
if mse <= lowest_mse:
self.models[ticker] = svr
return self
开发者ID:atremblay,项目名称:MLND,代码行数:35,代码来源:predictor.py
示例5: __init__
class HotTweets:
''' Train and get tweet hotness '''
def __init__(self, kernel='rbf', C=1e3, gamma=0.1, epsilon=0.1, n_comp=100):
''' Prepare support vector regression '''
self.svr = SVR(kernel=kernel, C=C, gamma=gamma, epsilon=epsilon, verbose=True)
#self.svr = LogisticRegression(random_state=42, verbose=0)
self.n_comp = n_comp
def fit_scaler(self, dev, i_dev):
''' Train normalizers for features and importances '''
# importance scaler
self.std_scaler_i = sklearn.preprocessing.StandardScaler()
self.std_scaler_i.fit(i_dev)
self.norm = sklearn.preprocessing.StandardScaler()
self.norm.fit(dev[:,0:self.n_comp])
self.n_comp = self.n_comp
def train(self, features, importances):
''' Train regression '''
importances = self.std_scaler_i.transform(importances)
features = self.norm.transform(features[:,0:self.n_comp])
self.svr.fit(features, importances)
def predict(self, features):
''' Predict importances '''
features = self.norm.transform(features[:,0:self.n_comp])
results = self.svr.predict(features)
#print results[0:100:5]
results = self.std_scaler_i.inverse_transform(results)
#print results[0:100:5]
return results
开发者ID:makseq,项目名称:360,代码行数:33,代码来源:hotTweets.py
示例6: main
def main(args):
(training_file, label_file, test_file, test_label, c, e) = args
svr = SVR(C=float(c), epsilon=float(e), kernel='rbf')
X = load_feat(training_file)
y = [float(line.strip()) for line in open(label_file)]
X = np.asarray(X)
y = np.asarray(y)
test_X = load_feat(test_file)
test_X = np.asarray(test_X)
test_X[np.isnan(test_X)] = 0
svr.fit(X, y)
pred = svr.predict(test_X)
if test_label != 'none':
test_y = [float(line.strip()) for line in open(test_label)]
test_y = np.asarray(test_y)
print 'MAE: ', mean_absolute_error(test_y, pred)
print 'RMSE: ', sqrt(mean_squared_error(test_y, pred))
print 'corrpearson: ', sp.stats.pearsonr(test_y, pred)
print 'r-sqr: ', sp.stats.linregress(test_y, pred)[2] ** 2
print mquantiles(test_y, prob=[0.10, 0.90])
print mquantiles(pred, prob=[0.10, 0.90])
with open(test_file + '.svr.pred', 'w') as output:
for p in pred:
print >>output, p
return
开发者ID:mriosb08,项目名称:palodiem-QE,代码行数:30,代码来源:SVR.py
示例7: train
def train(self, x, y, param_names, random_search=100,
kernel_cache_size=2000, **kwargs):
if self._debug:
print "First training sample\n", x[0]
start = time.time()
scaled_x = self._set_and_preprocess(x=x, param_names=param_names)
# Check that each input is between 0 and 1
self._check_scaling(scaled_x=scaled_x)
if self._debug:
print "Shape of training data: ", scaled_x.shape
print "Param names: ", self._used_param_names
print "First training sample\n", scaled_x[0]
print "Encode: ", self._encode
# Do a random search
c, gamma = self._random_search(random_iter=random_search, x=scaled_x,
y=y, kernel_cache_size=kernel_cache_size)
# Now train model
try:
svr = SVR(gamma=gamma, C=c, random_state=self._rng,
cache_size=kernel_cache_size)
svr.fit(scaled_x, y)
self._model = svr
except Exception, e:
print "Training failed", e.message
svr = None
开发者ID:KEggensperger,项目名称:SurrogateBenchmarks,代码行数:29,代码来源:SupportVectorRegression.py
示例8: RunSVRScikit
def RunSVRScikit(q):
totalTimer = Timer()
# Load input dataset.
Log.Info("Loading dataset", self.verbose)
# Use the last row of the training set as the responses.
X, y = SplitTrainData(self.dataset)
# Get all the parameters.
c = re.search("-c (\d+\.\d+)", options)
e = re.search("-e (\d+\.\d+)", options)
g = re.search("-g (\d+\.\d+)", options)
C = 1.0 if not c else float(c.group(1))
epsilon = 1.0 if not e else float(e.group(1))
gamma = 0.1 if not g else float(g.group(1))
try:
with totalTimer:
# Perform SVR.
model = SSVR(kernel='rbf', C=C, epsilon=epsilon, gamma=gamma)
model.fit(X, y)
except Exception as e:
q.put(-1)
return -1
time = totalTimer.ElapsedTime()
q.put(time)
return time
开发者ID:MarcosPividori,项目名称:benchmarks,代码行数:29,代码来源:svr.py
示例9: test_regression
def test_regression():
X, y = make_regression(n_samples=1000,
n_features=5,
n_informative=2,
n_targets=1,
random_state=123,
shuffle=False)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=123)
svm = SVR(kernel='rbf')
svm.fit(X_train, y_train)
imp_vals, imp_all = feature_importance_permutation(
predict_method=svm.predict,
X=X_test,
y=y_test,
metric='r2',
num_rounds=1,
seed=123)
assert imp_vals.shape == (X_train.shape[1], )
assert imp_all.shape == (X_train.shape[1], 1)
assert imp_vals[0] > 0.2
assert imp_vals[1] > 0.2
assert sum(imp_vals[3:]) <= 0.01
开发者ID:JJLWHarrison,项目名称:mlxtend,代码行数:28,代码来源:test_feature_importance.py
示例10: train_model
def train_model(train, test, labels):
clf = SVR(C=1.0, epsilon=0.2)
clf.fit(train, labels)
#clf = GaussianNB()
#clf.fit(train, labels)
print "Good!"
predictions = clf.predict(test)
print predictions.shape
predictions = pd.DataFrame(predictions, columns = ['relevance'])
print "Good again!"
print "Predictions head -------"
print predictions.head()
print predictions.shape
print "TEST head -------"
print test.head()
print test.shape
test['id'].to_csv("TEST_TEST.csv",index=False)
predictions.to_csv("PREDICTIONS.csv",index=False)
#test = test.reset_index()
#predictions = predictions.reset_index()
#test = test.groupby(level=0).first()
#predictions = predictions.groupby(level=0).first()
predictions = pd.concat([test['id'],predictions], axis=1, verify_integrity=False)
print predictions
return predictions
开发者ID:ap-mishra,项目名称:KTHDRelevance,代码行数:25,代码来源:chunk_SVR.py
示例11: learn
def learn(X, y):
# do pca
pca = PCA(n_components=6)
pca_6 = pca.fit(X)
print('variance ratio')
print(pca_6.explained_variance_ratio_)
X = pca.fit_transform(X)
# X = np.concatenate((X_pca[:, 0].reshape(X.shape[0], 1), X_pca[:, 5].reshape(X.shape[0], 1)), axis=1)
# do svr
svr_rbf = SVR(kernel='rbf', C=1)
svr_rbf.fit(X, y)
# print(model_rbf)
y_rbf = svr_rbf.predict(X)
print(y_rbf)
print(y)
# see difference
y_rbf = np.transpose(y_rbf)
deviation(y, y_rbf)
# pickle model
with open('rbfmodel.pkl', 'wb') as f:
pickle.dump(svr_rbf, f)
with open('pcamodel.pkl', 'wb') as f:
pickle.dump(pca_6, f)
开发者ID:inciboduroglu,项目名称:gradr,代码行数:29,代码来源:learn.py
示例12: train_SVM
def train_SVM(X, Y, kernel='rbf', shrinking=True, tol=0.001, cache_size=1500, verbose=True, max_iter=-1):
"""Assumes all irrelevant features have been removed from X and Y"""
"""Learns several hundred SVMs"""
clf = SVR(kernel=kernel, tol=tol, cache_size=cache_size, verbose=verbose, max_iter=max_iter)
pipeline = Pipeline(zip([ "imputate", "vart", "scale", "svm" ], [ Imputer(), VarianceThreshold(), StandardScaler(), clf ]))
param_grid = dict(svm__C=[0.1, 1, 10, 100, 1000],
svm__gamma=[0.001, 0.01, 1, 10])
grid_search = GridSearchCV(pipeline, param_grid=param_grid, verbose=3)
results = []
for i in range(Y[0].shape[1]):
Y_new = np.fromiter((x[:, i][0, 0] for x in Y), np.double)
X_new = np.array([np.matrix(x.data).flatten().tolist() for x in X], np.double)
#X_new = np.fromiter((np.matrix(x.data) for x in X), np.double)
X_train, X_test, Y_train, Y_test = cross_validation.train_test_split(X_new, Y_new, test_size = 0.2)
X_train = flatten(X_train)
X_test = flatten(X_test)
grid_search.fit(X_train, Y_train)
results.append( (grid_search.best_estimator_, clf.score(X_test, Y_test)))
print("Best estimators (C): {0}, Score: {1}".format(grid_search.best_estimator_, clf.score(X_test, Y_test)))
return results
开发者ID:orichardson,项目名称:mcm2016,代码行数:28,代码来源:svm.py
示例13: compute_mse
def compute_mse(regressor, horizon):
# get wind park and corresponding target.
windpark = NREL().get_windpark(NREL.park_id['tehachapi'], 3, 2004, 2005)
target = windpark.get_target()
# use power mapping for pattern-label mapping.
feature_window = 3
mapping = PowerMapping()
X = mapping.get_features_park(windpark, feature_window, horizon)
y = mapping.get_labels_turbine(target, feature_window, horizon)
# train roughly for the year 2004, test for 2005.
train_to = int(math.floor(len(X) * 0.5))
test_to = len(X)
train_step, test_step = 25, 25
X_train=X[:train_to:train_step]
y_train=y[:train_to:train_step]
X_test=X[train_to:test_to:test_step]
y_test=y[train_to:test_to:test_step]
if(regressor == 'svr'):
reg = SVR(kernel='rbf', epsilon=0.1, C = 100.0,\
gamma = 0.0001).fit(X_train,y_train)
mse = mean_squared_error(reg.predict(X_test),y_test)
elif(regressor == 'knn'):
reg = KNeighborsRegressor(10, 'uniform').fit(X_train,y_train)
mse = mean_squared_error(reg.predict(X_test),y_test)
return mse
开发者ID:DeeplearningMachineLearning,项目名称:windml,代码行数:28,代码来源:forecast_horizon.py
示例14: __init__
class SVRegression:
def __init__(self, kernel_value, c_value, iter_value):
self.kernel = kernel_value
self.c = c_value
self.iter = iter_value
self.svr_lin = None
def fit_predict(self, x_train, y_train, x_test):
self.svr_lin = SVR(kernel=self.kernel, C=self.c, max_iter=self.iter)
y_lin = self.svr_lin.fit(x_train, y_train).predict(x_test)
return y_lin
def computeC(self, x_train):
print "ARRAY ", type(x_train)
print x_train
array = x_train.todense()
print "ARRAY ", type(array)
print array
result = array.sum(axis=1, dtype='float')
result = pow(result, 2)
total = result.sum(axis=0, dtype='float')
rows, columns = x_train.shape
total = float(total)/float(rows)
total = pow(total,-1)
print "C", total
self.c = total
def computeAccuracy(self, x, y):
return self.svr_lin.score(x, y)
开发者ID:CU-Boulder-Course,项目名称:ml-final-project,代码行数:29,代码来源:regression.py
示例15: getCharacteristicSignal
def getCharacteristicSignal(normedDays, phase, period, plotAxis=False):
series = pandas.Series()
for day in normedDays:
series = series.append(day)
'''Shift the times to give relative time of day'''
t0 = array(series.index, dtype=float)
t0 = (t0 - phase) % period
t0 = array([t0]).T
'''Shift the array to fit the edges'''
tExt = array([array([t0-period,t0,t0+period]).flatten()]).T
seriesExt = numpy.array([array(series),array(series),
array(series)]).flatten()
'''Fit the model'''
svr_rbf = SVR(kernel='rbf', C=1e4, gamma=.03, epsilon=.01)
y_rbf = svr_rbf.fit(tExt, seriesExt)
'''Predict a new characteristic signal'''
t1 = array([arange(0,period, period/100.)]).T
signal = y_rbf.predict(t1)
if plotAxis:
plotAxis.plot(t1, signal)
colors = ['b','g','r','c']
for i,day in enumerate(normedDays):
timesAdjusted = array(normedDays[i].index,dtype=float)
timesAdjusted = (timesAdjusted - phase) % period
plotAxis.plot(timesAdjusted, day, 'o', label=str(i),
color=colors[i])
plotAxis.set_title('Characteristic Signal')
plotAxis.legend(loc='best')
plotAxis.set_xbound(0,period)
plotAxis.set_ybound(-1.1,1.1)
return signal
开发者ID:theandygross,项目名称:Luc,代码行数:35,代码来源:LuciferasePlots.py
示例16: train_svm
def train_svm(train_file, avg={}):
test_X, test_Y, weight = load_data(train_file, avg)
svr = SVR(kernel='rbf', C=100, gamma=1, verbose=True, cache_size=1024)
print("start train")
svr.fit(test_X, test_Y)
print("train finish")
return svr
开发者ID:modkzs,项目名称:regression-predict,代码行数:7,代码来源:model_time_series.py
示例17: svr
def svr(self, X, y):
""" Train support vector regression model
Parameters
----------
X : numpy ndarray with numeric values
Array containing input parameters
for the model. Model will try to
learn the output y[i] in terms of
inputs X[i]
y : columnar numpy array with numeric values
Array containing single column of
output values. Entry at y[i] corresponds
to value of the underlying experiment
for input parameters X[i]
Returns
-------
result : model
Model learnt from incoming input
inputs and outputs
"""
clf = SVR(C=1.0, epsilon=0.2)
clf.fit(X, y)
return clf
开发者ID:JuergenNeubauer,项目名称:pygotham,代码行数:27,代码来源:ml.py
示例18: Sand_SVR
def Sand_SVR(X_train, Y_train, X_test, Y_test, cv_iterator):
#===========================================================================
# param_grid = {'C':[100,500,1000, 5000, 10000, 100000],
# 'epsilon':[0.075,0.1, 0.125]
# }
#
# svr = SVR(cache_size = 1000, random_state=42)
# search = GridSearchCV(svr, param_grid, scoring="mean_squared_error", cv=cv_iterator)
#===========================================================================
#search.fit(X_train, Y_train["Sand"])
#search.grid_scores_
#svr = search.best_estimator_
#svr.fit(X_train, Y_train["SAND"])
#test = cross_val_score(svr, X_train.astype('float64'), Y_train["Ca"].astype('float64'), scoring="mean_squared_error", cv=cv_iterator)
svr = SVR(C=10000)
svr.fit(X_train, Y_train["Sand"])
yhat_svr = svr.predict(X_test)
test_error = math.sqrt(mean_squared_error(Y_test["Sand"], yhat_svr))
return svr, test_error
开发者ID:pkravik,项目名称:kaggle,代码行数:25,代码来源:sand_models.py
示例19: train_learning_model_svm
def train_learning_model_svm(df):
X_all, y_all = preprocess_data(df)
X_train, X_test, y_train, y_test = split_data(X_all, y_all)
regressor = SVR()
regressor.fit(X_train, y_train)
calculate_results(regressor, X_train, X_test, y_train, y_test)
开发者ID:longnd84,项目名称:machine-learning,代码行数:7,代码来源:trader_regressor.py
示例20: CaSVRModel
def CaSVRModel(X_train, Y_train, X_test, Y_test, cv_iterator):
#
# param_grid = {'C':[10000],
# 'epsilon':[0.001, 0.01, 0.05, 0.1, 0.15, 1]
# }
#
# svr = SVR(random_state=42, cache_size=1000, verbose=2)
# search = GridSearchCV(svr, param_grid, scoring="mean_squared_error", n_jobs= 1, iid=True, cv=cv_iterator)
# search.fit(X_train, Y_train["Ca"])
# #search.grid_scores_
#
# model = search.best_estimator_
#scaler = StandardScaler()
model = SVR(C=10000, epsilon = 0.01, cache_size=1000)
model.fit(X_train, Y_train["Ca"])
#model.fit(X_train, Y_train["Ca"])
#model.fit(X_train, Y_train["Ca"])
#test = cross_val_score(svr, X_train.astype('float64'), Y_train["Ca"].astype('float64'), scoring="mean_squared_error", cv=cv_iterator)
yhat_svr = model.predict(X_test)
test_error = math.sqrt(mean_squared_error(Y_test["Ca"], yhat_svr))
return model, test_error
开发者ID:pkravik,项目名称:kaggle,代码行数:27,代码来源:ca_models.py
注:本文中的sklearn.svm.SVR类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论