本文整理汇总了Python中sklearn.neural_network.MLPRegressor类的典型用法代码示例。如果您正苦于以下问题:Python MLPRegressor类的具体用法?Python MLPRegressor怎么用?Python MLPRegressor使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了MLPRegressor类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: regression
def regression(N, P):
assert len(N) == len(P)
clf = MLPRegressor(hidden_layer_sizes=(15, ), activation='relu', algorithm='adam', alpha=0.0001)
clf.fit (N, P)
return clf
开发者ID:JessMcintosh,项目名称:SonoGestures,代码行数:7,代码来源:NNRegCross.py
示例2: _create_first_population
def _create_first_population(self):
self._current_population = []
for _ in range(self._n_individuals):
mlp = MLPRegressor(hidden_layer_sizes = self._nn_architecture, alpha=10**-10, max_iter=1)
mlp.fit([np.random.randn(self._n_features)], [np.random.randn(self._n_actions)])
mlp.out_activation_ = 'softmax'
self._current_population.append([mlp,0])
开发者ID:fritjofwolf,项目名称:RL2048,代码行数:7,代码来源:deepneuroevolution_bot.py
示例3: construct_train
def construct_train(train_length, **kwargs):
"""
Train and test model with given input
window and number of neurons in layer
"""
start_cur_postion = 0
steps, steplen = observations.size/(2 * train_length), train_length
if 'hidden_layer' in kwargs:
network = MLPRegressor(hidden_layer_sizes=kwargs['hidden_layer'])
else:
network = MLPRegressor()
quality = []
# fit model - configure parameters
network.fit(observations[start_cur_postion:train_length][:, 1].reshape(1, train_length),
observations[:, 1][start_cur_postion:train_length].reshape(1, train_length))
parts = []
# calculate predicted values
# for each step add all predicted values to a list
# TODO: add some parallelism here
for i in xrange(0, steps):
parts.append(network.predict(observations[start_cur_postion:train_length][:, 1]))
start_cur_postion += steplen
train_length += steplen
# estimate model quality using
result = np.array(parts).flatten().tolist()
for valnum, value in enumerate(result):
quality.append((value - observations[valnum][1])**2)
return sum(quality)/len(quality)
开发者ID:AntonKorobkov,项目名称:HW_3,代码行数:35,代码来源:homework_3_Korobkov.py
示例4: mlp_bench
def mlp_bench(x_train, y_train, x_test, fh):
"""
Forecasts using a simple MLP which 6 nodes in the hidden layer
:param x_train: train input data
:param y_train: target values for training
:param x_test: test data
:param fh: forecasting horizon
:return:
"""
y_hat_test = []
model = MLPRegressor(hidden_layer_sizes=6, activation='identity', solver='adam',
max_iter=100, learning_rate='adaptive', learning_rate_init=0.001,
random_state=42)
model.fit(x_train, y_train)
last_prediction = model.predict(x_test)[0]
for i in range(0, fh):
y_hat_test.append(last_prediction)
x_test[0] = np.roll(x_test[0], -1)
x_test[0, (len(x_test[0]) - 1)] = last_prediction
last_prediction = model.predict(x_test)[0]
return np.asarray(y_hat_test)
开发者ID:KaterinaKou,项目名称:M4-methods,代码行数:25,代码来源:ML_benchmarks.py
示例5: test_multioutput_regression
def test_multioutput_regression():
# Test that multi-output regression works as expected
X, y = make_regression(n_samples=200, n_targets=5)
mlp = MLPRegressor(solver='lbfgs', hidden_layer_sizes=50, max_iter=200,
random_state=1)
mlp.fit(X, y)
assert_greater(mlp.score(X, y), 0.9)
开发者ID:aniryou,项目名称:scikit-learn,代码行数:7,代码来源:test_mlp.py
示例6: test_lbfgs_regression
def test_lbfgs_regression():
# Test lbfgs on the boston dataset, a regression problems."""
X = Xboston
y = yboston
for activation in ACTIVATION_TYPES:
mlp = MLPRegressor(algorithm='l-bfgs', hidden_layer_sizes=50,
max_iter=150, shuffle=True, random_state=1,
activation=activation)
mlp.fit(X, y)
assert_greater(mlp.score(X, y), 0.95)
开发者ID:0664j35t3r,项目名称:scikit-learn,代码行数:10,代码来源:test_mlp.py
示例7: GetOptimalCLF2
def GetOptimalCLF2(train_x,train_y,rand_starts = 8):
'''
Gets the optimal CLF function based on fixed settings
Parameters
------------------------
train_x - np.array
Training feature vectors
train_y - np.array
Training label vectors
rand_starts - int
Number of random starts to do
Default - 8 for 95% confidence and best 30%
Returns
------------------------
max_clf - sklearn function
Optimal trained artificial neuron network
'''
#### Get number of feature inputs of training vector
n_input = train_x.shape[1]
#### Set initial loss value
min_loss = 1e10
#### Perform number of trainings according to random start set
for i in range(rand_starts):
#### Print current status
print "Iteration number {}".format(i+1)
#### Initialize ANN network
clf = MLPRegressor(hidden_layer_sizes = (int(round(2*np.sqrt(n_input),0)),1), activation = 'logistic',solver = 'sgd',
learning_rate = 'adaptive', max_iter = 100000000,tol = 1e-10,
early_stopping = True, validation_fraction = 1/3.)
#### Fit data
clf.fit(train_x,train_y)
#### Get current loss
cur_loss = clf.loss_
#### Save current clf if loss is minimum
if cur_loss < min_loss:
#### Set min_loss to a new value
min_loss = cur_loss
#### Set max_clf to new value
max_clf = clf
return max_clf
开发者ID:leolorenzoii,项目名称:Development-Codes,代码行数:53,代码来源:SubsurfacePredictionANN.py
示例8: MLP_Regressor
def MLP_Regressor(train_x, train_y):
clf = MLPRegressor( alpha=1e-05,
batch_size='auto', beta_1=0.9, beta_2=0.999, early_stopping=False,
epsilon=1e-08, hidden_layer_sizes=([8,8]), learning_rate='constant',
learning_rate_init=0.01, max_iter=500, momentum=0.9,
nesterovs_momentum=True, power_t=0.5, random_state=1, shuffle=True,
tol=0.0001, validation_fraction=0.1, verbose=False,
warm_start=False)
clf.fit(train_x, train_y)
#score = metrics.accuracy_score(clf.predict((train_x)), (train_y))
#print(score)
return clf
开发者ID:licheng5625,项目名称:coder,代码行数:13,代码来源:NNsklean_mult.py
示例9: test_lbfgs_regression
def test_lbfgs_regression():
# Test lbfgs on the boston dataset, a regression problems.
X = Xboston
y = yboston
for activation in ACTIVATION_TYPES:
mlp = MLPRegressor(solver='lbfgs', hidden_layer_sizes=50,
max_iter=150, shuffle=True, random_state=1,
activation=activation)
mlp.fit(X, y)
if activation == 'identity':
assert_greater(mlp.score(X, y), 0.84)
else:
# Non linear models perform much better than linear bottleneck:
assert_greater(mlp.score(X, y), 0.95)
开发者ID:aniryou,项目名称:scikit-learn,代码行数:14,代码来源:test_mlp.py
示例10: __init__
def __init__(self):
self._nn = MLPRegressor(hidden_layer_sizes=(10,), verbose=False, warm_start=True)
self._entradas_entrenamiento = []
self._salidas_esperadas_entrenamiento = []
# Parámetro de TD-lambda
self.lambdaCoefficient = 0.9
开发者ID:gsiriani,项目名称:MAA,代码行数:7,代码来源:JugadorGrupo3.py
示例11: __init__
def __init__(self, num_inputs, num_outputs):
self.nx = num_inputs
self.ny = num_outputs
self.net = MLPRegressor(hidden_layer_sizes=(50, 10),
max_iter=1,
algorithm='sgd',
learning_rate='constant',
learning_rate_init=0.001,
warm_start=True,
momentum=0.9,
nesterovs_momentum=True
)
self.initialize_network()
# set experience replay
self.mbsize = 128 # mini-batch size
self.er_s = []
self.er_a = []
self.er_r = []
self.er_done = []
self.er_sp = []
self.er_size = 2000 # total size of mb, impliment as queue
self.whead = 0 # write head
开发者ID:aravindr93,项目名称:RL-tasks,代码行数:25,代码来源:play_agent.py
示例12: train_model
def train_model(x_train, y_train, alpha=1e-3, hid_layers=[512], max_iter=100):
"""
Train model on training data.
:param x_train: training examples
:param y_train: target variables
:param alpha: L2 regularization coefficient
:param hid_layers: hidden layer sizes
:param max_iter: maximum number of iterations in L-BFGS optimization
:return a model trained with neuron network
"""
nn_model = MLPRegressor(solver='lbgfs', hidden_layer_sizes=hid_layers,
alpha=alpha, max_iter=max_iter,
activation="relu", random_state=1)
nn_model.fit(x_train, y_train)
return nn_model
开发者ID:minhitbk,项目名称:data-science,代码行数:16,代码来源:ETL_Modeling.py
示例13: train
def train(self):
print("DEB Training with TSnew")
self.MLP = MLPRegressor(activation='relu', alpha=1e-05, batch_size='auto', beta_1=0.9,
beta_2=0.999, early_stopping=False, epsilon=1e-08,
hidden_layer_sizes=len(self.TSnew_Y.columns), learning_rate='constant',
learning_rate_init=0.001, max_iter=200, momentum=0.9,
nesterovs_momentum=True, power_t=0.5, random_state=1, shuffle=True,
solver='lbfgs', tol=0.0001, validation_fraction=0.1, verbose=False,
warm_start=False)
self.MLP.fit(self.TSnew_X, self.TSnew_Y)
开发者ID:HerrAugust,项目名称:EserciziUni,代码行数:10,代码来源:NeuralNetwork.py
示例14: __init__
class Ann:
def __init__(self):
self._nn = MLPRegressor(hidden_layer_sizes=(10,), verbose=False, warm_start=True)
self._entradas_entrenamiento = []
self._salidas_esperadas_entrenamiento = []
self.lambdaCoefficient = 0.9
def evaluar(self, entrada):
return self._nn.predict(entrada)
def agregar_a_entrenamiento(self, tableros, resultado):
tableros.reverse()
for i in xrange(len(tableros)):
tablero, valorEstimado = tableros[i][0], tableros[i][1]
self._entradas_entrenamiento.append(tablero)
if i == 0 or True:
self._salidas_esperadas_entrenamiento.append(resultado.value)
else:
valorAAprender = valorEstimado + self.lambdaCoefficient * (self._salidas_esperadas_entrenamiento[i-1] -
valorEstimado)
self._salidas_esperadas_entrenamiento.append(valorAAprender)
def entrenar(self):
self._nn.partial_fit(self._entradas_entrenamiento, self._salidas_esperadas_entrenamiento)
self._entradas_entrenamiento = []
self._salidas_esperadas_entrenamiento = []
def almacenar(self):
pickle.dump(self._nn, open(self.path,'wb'))
def cargar(self, path, red):
self.path = path
if os.path.isfile(path):
self._nn = pickle.load(open(path, 'rb'))
else:
self._nn = red
tableroVacio = ([EnumCasilla.EMPTY.value for _ in xrange(64)],0)
self.agregar_a_entrenamiento([tableroVacio], EnumResultado.EMPATE)
self.entrenar()
开发者ID:gsiriani,项目名称:MAA,代码行数:42,代码来源:JugadorGrupoSimple-no-usar.py
示例15: _create_new_nn
def _create_new_nn(self, weights, biases):
mlp = MLPRegressor(hidden_layer_sizes = self._nn_architecture, alpha=10**-10, max_iter=1)
mlp.fit([np.random.randn(self._n_features)], [np.random.randn(self._n_actions)])
mlp.coefs_ = weights
mlp.intercepts_ = biases
mlp.out_activation_ = 'softmax'
return mlp
开发者ID:fritjofwolf,项目名称:RL2048,代码行数:7,代码来源:deepneuroevolution_bot.py
示例16: test_partial_fit_regression
def test_partial_fit_regression():
# Test partial_fit on regression.
# `partial_fit` should yield the same results as 'fit' for regression.
X = Xboston
y = yboston
for momentum in [0, .9]:
mlp = MLPRegressor(solver='sgd', max_iter=100, activation='relu',
random_state=1, learning_rate_init=0.01,
batch_size=X.shape[0], momentum=momentum)
with warnings.catch_warnings(record=True):
# catch convergence warning
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp = MLPRegressor(solver='sgd', activation='relu',
learning_rate_init=0.01, random_state=1,
batch_size=X.shape[0], momentum=momentum)
for i in range(100):
mlp.partial_fit(X, y)
pred2 = mlp.predict(X)
assert_almost_equal(pred1, pred2, decimal=2)
score = mlp.score(X, y)
assert_greater(score, 0.75)
开发者ID:aniryou,项目名称:scikit-learn,代码行数:24,代码来源:test_mlp.py
示例17: main
def main():
cal_housing = fetch_california_housing()
X, y = cal_housing.data, cal_housing.target
names = cal_housing.feature_names
# Center target to avoid gradient boosting init bias: gradient boosting
# with the 'recursion' method does not account for the initial estimator
# (here the average target, by default)
y -= y.mean()
print("Training MLPRegressor...")
est = MLPRegressor(activation='logistic')
est.fit(X, y)
print('Computing partial dependence plots...')
# We don't compute the 2-way PDP (5, 1) here, because it is a lot slower
# with the brute method.
features = [0, 5, 1, 2]
plot_partial_dependence(est, X, features, feature_names=names,
n_jobs=3, grid_resolution=50)
fig = plt.gcf()
fig.suptitle('Partial dependence of house value on non-location features\n'
'for the California housing dataset, with MLPRegressor')
plt.subplots_adjust(top=0.9) # tight_layout causes overlap with suptitle
print("Training GradientBoostingRegressor...")
est = GradientBoostingRegressor(n_estimators=100, max_depth=4,
learning_rate=0.1, loss='huber',
random_state=1)
est.fit(X, y)
print('Computing partial dependence plots...')
features = [0, 5, 1, 2, (5, 1)]
plot_partial_dependence(est, X, features, feature_names=names,
n_jobs=3, grid_resolution=50)
fig = plt.gcf()
fig.suptitle('Partial dependence of house value on non-location features\n'
'for the California housing dataset, with Gradient Boosting')
plt.subplots_adjust(top=0.9)
print('Custom 3d plot via ``partial_dependence``')
fig = plt.figure()
target_feature = (1, 5)
pdp, axes = partial_dependence(est, X, target_feature,
grid_resolution=50)
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[0].T
ax = Axes3D(fig)
surf = ax.plot_surface(XX, YY, Z, rstride=1, cstride=1,
cmap=plt.cm.BuPu, edgecolor='k')
ax.set_xlabel(names[target_feature[0]])
ax.set_ylabel(names[target_feature[1]])
ax.set_zlabel('Partial dependence')
# pretty init view
ax.view_init(elev=22, azim=122)
plt.colorbar(surf)
plt.suptitle('Partial dependence of house value on median\n'
'age and average occupancy, with Gradient Boosting')
plt.subplots_adjust(top=0.9)
plt.show()
开发者ID:daniel-perry,项目名称:scikit-learn,代码行数:61,代码来源:plot_partial_dependence.py
示例18: getKaggleMNIST
from __future__ import print_function, division
from future.utils import iteritems
from builtins import range, input
# Note: you may need to update your version of future
# sudo pip install -U future
import numpy as np
from sklearn.neural_network import MLPRegressor
from util import getKaggleMNIST
# get data
X, _, Xt, _ = getKaggleMNIST()
# create the model and train it
model = MLPRegressor()
model.fit(X, X)
# test the model
print("Train R^2:", model.score(X, X))
print("Test R^2:", model.score(Xt, Xt))
Xhat = model.predict(X)
mse = ((Xhat - X)**2).mean()
print("Train MSE:", mse)
Xhat = model.predict(Xt)
mse = ((Xhat - Xt)**2).mean()
print("Test MSE:", mse)
开发者ID:lazyprogrammer,项目名称:machine_learning_examples,代码行数:31,代码来源:sk_mlp.py
示例19: MLPRegressor
geno = np.load('genodata.npy')
pheno = np.load('phenodata.npy')
X_tr = geno[:1000,1:] #slicing geno
#X_va = geno[201:250,:]
X_te = geno[1001:,1:]
Y_tr = pheno[:1000,1:] #slicing pheno
#Y_va = pheno[201:250,:]
Y_te = pheno[1001:,1:]
diabetes_X_train = X_tr
diabetes_X_test = X_te
diabetes_y_train = Y_tr
diabetes_y_test = Y_te
reg = MLPRegressor(hidden_layer_sizes=(1, ),algorithm='l-bfgs')
reg.fit(X_tr,Y_tr)
scores = cross_val_score(reg,geno[:,1:],pheno[:,1:],cv=10)
#Result_Y = np.zeros((249,1), dtype='float64')
Result_Y = reg.predict(X_te)
#Yte = np.array(Y_te, dtype=np.float64)
r_row,p_score = pearsonr(Result_Y,Y_te)
# The mean square error
print("Residual sum of squares: %.2f"
% np.mean((reg.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % reg.score(diabetes_X_test, diabetes_y_test))
print(Result_Y)
开发者ID:godisboy,项目名称:SNP-deep-learning,代码行数:31,代码来源:GBLUP.py
示例20: get_stacking_model
def get_stacking_model():
model = MLPRegressor(hidden_layer_sizes=(20,20))
X_train,y_train,_,_ = get_data()
model.fit(X_train,y_train)
return model
开发者ID:theidentity,项目名称:Ensembling_Techniques,代码行数:5,代码来源:regression_ensemble.py
注:本文中的sklearn.neural_network.MLPRegressor类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论