本文整理汇总了Python中nolearn.lasagne.NeuralNet类的典型用法代码示例。如果您正苦于以下问题:Python NeuralNet类的具体用法?Python NeuralNet怎么用?Python NeuralNet使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了NeuralNet类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: lasagne_oneLayer_classifier
def lasagne_oneLayer_classifier(param, X, labels):
## initialize the NN
layers0 = [('input', InputLayer),
('dense0', DenseLayer),
('dropout', DropoutLayer),
('output', DenseLayer)]
net0 = NeuralNet(layers=layers0,
input_shape=(None, param['num_features']),
dense0_num_units=param['dense0_num_units'],
dropout_p=param['dropout_p'],
output_num_units=param['num_classes'],
output_nonlinearity=softmax,
update=nesterov_momentum,
update_learning_rate=param['update_learning_rate'],
update_momentum=param['update_momentum'],
eval_size=0.02,
verbose=1,
max_epochs=param['max_epochs'])
## fit the results
net0.fit(X, labels)
return net0
开发者ID:huanqi,项目名称:Otto_Group_Competition,代码行数:29,代码来源:classifier.py
示例2: fit_nn_and_predict_probas
def fit_nn_and_predict_probas(features, dv, features_t):
bwh = BestWeightsHolder()
tvs = TrainValidSplitter(standardize=True,few=True)
layers = [('input', InputLayer),
('dense0', DenseLayer),
('dropout0', DropoutLayer),
('dense1', DenseLayer),
('dropout1', DropoutLayer),
('output', DenseLayer)]
net = NeuralNet(layers=layers,
input_shape=(None, features.shape[1]),
dense0_num_units=512,
dropout0_p=0.4,
dense1_num_units=256,
dropout1_p=0.4,
output_num_units=38,
output_nonlinearity=softmax,
update=adagrad,
update_learning_rate=0.02,
train_split=tvs,
verbose=1,
max_epochs=40,
on_epoch_finished=[bwh.hold_best_weights])
holder = net.fit(features, dv)
holder.load_params_from(bwh.best_weights)
return holder.predict_proba(np.hstack((tvs.standa.transform(features_t[:,:23]), features_t[:,23:])))
开发者ID:matchado,项目名称:WalmartTripType,代码行数:29,代码来源:train_and_predict.py
示例3: fit_model
def fit_model(train_x, y, test_x):
"""Feed forward neural network for kaggle digit recognizer competition.
Intentionally limit network size and optimization time (by choosing max_epochs = 15) to meet runtime restrictions
"""
print("\n\nRunning Convetional Net. Optimization progress below\n\n")
net1 = NeuralNet(
layers=[ #list the layers here
('input', layers.InputLayer),
('hidden1', layers.DenseLayer),
('output', layers.DenseLayer),
],
# layer parameters:
input_shape=(None, train_x.shape[1]),
hidden1_num_units=200, hidden1_nonlinearity=rectify, #params of first layer
output_nonlinearity=softmax, # softmax for classification problems
output_num_units=10, # 10 target values
# optimization method:
update=nesterov_momentum,
update_learning_rate=0.05,
update_momentum=0.7,
regression=False,
max_epochs=10, # Intentionally limited for execution speed
verbose=1,
)
net1.fit(train_x, y)
predictions = net1.predict(test_x)
return(predictions)
开发者ID:huanqi,项目名称:Otto_Group_Competition,代码行数:31,代码来源:NN_Lasagne_Example_2.py
示例4: test_diamond
def test_diamond(self, NeuralNet):
input, hidden1, hidden2, concat, output = (
Mock(), Mock(), Mock(), Mock(), Mock())
nn = NeuralNet(
layers=[
('input', input),
('hidden1', hidden1),
('hidden2', hidden2),
('concat', concat),
('output', output),
],
input_shape=(10, 10),
hidden2_incoming='input',
concat_incoming=['hidden1', 'hidden2'],
)
nn.initialize_layers(nn.layers)
input.assert_called_with(name='input', shape=(10, 10))
hidden1.assert_called_with(incoming=input.return_value, name='hidden1')
hidden2.assert_called_with(incoming=input.return_value, name='hidden2')
concat.assert_called_with(
incoming=[hidden1.return_value, hidden2.return_value],
name='concat'
)
output.assert_called_with(incoming=concat.return_value, name='output')
开发者ID:alobrix,项目名称:Deep-Learning,代码行数:25,代码来源:test_lasagne.py
示例5: _create_nnet
def _create_nnet(self, input_dims, output_dims, learning_rate, num_hidden_units=15, batch_size=32, max_train_epochs=1,
hidden_nonlinearity=nonlinearities.rectify, output_nonlinearity=None, update_method=updates.sgd):
"""
A subclass may override this if a different sort
of network is desired.
"""
nnlayers = [('input', layers.InputLayer), ('hidden', layers.DenseLayer), ('output', layers.DenseLayer)]
nnet = NeuralNet(layers=nnlayers,
# layer parameters:
input_shape=(None, input_dims),
hidden_num_units=num_hidden_units,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=output_nonlinearity,
output_num_units=output_dims,
# optimization method:
update=update_method,
update_learning_rate=learning_rate,
regression=True, # flag to indicate we're dealing with regression problem
max_epochs=max_train_epochs,
batch_iterator_train=BatchIterator(batch_size=batch_size),
train_split=nolearn.lasagne.TrainSplit(eval_size=0),
verbose=0,
)
nnet.initialize()
return nnet
开发者ID:rihardsk,项目名称:predictive-rl,代码行数:28,代码来源:cacla_agent_nolearn.py
示例6: train
def train():
weather = load_weather()
training = load_training()
X = assemble_X(training, weather)
print len(X[0])
mean, std = normalize(X)
y = assemble_y(training)
input_size = len(X[0])
learning_rate = theano.shared(np.float32(0.1))
net = NeuralNet(
layers=[
('input', InputLayer),
('hidden1', DenseLayer),
('dropout1', DropoutLayer),
('hidden2', DenseLayer),
('dropout2', DropoutLayer),
('output', DenseLayer),
],
# layer parameters:
input_shape=(None, input_size),
hidden1_num_units=325,
dropout1_p=0.4,
hidden2_num_units=325,
dropout2_p=0.4,
output_nonlinearity=sigmoid,
output_num_units=1,
# optimization method:
update=nesterov_momentum,
update_learning_rate=learning_rate,
update_momentum=0.9,
# Decay the learning rate
on_epoch_finished=[
AdjustVariable(learning_rate, target=0, half_life=1),
],
# This is silly, but we don't want a stratified K-Fold here
# To compensate we need to pass in the y_tensor_type and the loss.
regression=True,
y_tensor_type = T.imatrix,
objective_loss_function = binary_crossentropy,
max_epochs=85,
eval_size=0.1,
verbose=1,
)
X, y = shuffle(X, y, random_state=123)
net.fit(X, y)
_, X_valid, _, y_valid = net.train_test_split(X, y, net.eval_size)
probas = net.predict_proba(X_valid)[:,0]
print("ROC score", metrics.roc_auc_score(y_valid, probas))
return net, mean, std
开发者ID:kaiwang0112006,项目名称:mykaggle_westnile,代码行数:60,代码来源:SimpleLasagneNN.py
示例7: train
def train(x_train, y_train):
clf_nn = NeuralNet(
layers=[ # three layers: one hidden layer
('input', layers.InputLayer),
('hidden1', layers.DenseLayer),
('hidden2', layers.DenseLayer),
('output', layers.DenseLayer),
],
# layer parameters:
input_shape=(None, 2538), # 784 input pixels per batch
hidden1_num_units=100, # number of units in hidden layer
hidden2_num_units=100,
output_nonlinearity=nonlinearities.softmax, # output layer uses identity function
output_num_units=10, # 10 target values
# optimization method:
update=nesterov_momentum,
update_learning_rate=0.01,
update_momentum=0.9,
max_epochs=50, # we want to train this many epochs
verbose=1,
)
clf_nn.fit(x_train, y_train)
return clf_nn
开发者ID:YilinGUO,项目名称:NLP,代码行数:25,代码来源:cw.py
示例8: CompileNetwork
def CompileNetwork(l_out, epochs, update, update_learning_rate, objective_l2,
earlystopping, patience, batch_size, verbose):
update_fn = getattr(updates, update)
earlystop = EarlyStopping(patience=patience, verbose=verbose)
net = NeuralNet(
l_out,
max_epochs=epochs,
update=update_fn,
objective_l2=objective_l2,
batch_iterator_train = BatchIterator(batch_size=batch_size),
batch_iterator_test = BatchIterator(batch_size=batch_size),
verbose=verbose,
on_training_finished = [earlystop.load_best_weights]
)
if earlystopping == True:
net.on_epoch_finished.append(earlystop)
if update_learning_rate is not None:
net.update_learning_rate=update_learning_rate
return net
开发者ID:jacobzweig,项目名称:RCNN_Toolbox,代码行数:27,代码来源:RCNN.py
示例9: test_initialization_with_tuples
def test_initialization_with_tuples(self, NeuralNet):
input = Mock(__name__="InputLayer", __bases__=(InputLayer,))
hidden1, hidden2, output = [Mock(__name__="MockLayer", __bases__=(Layer,)) for i in range(3)]
nn = NeuralNet(
layers=[
(input, {"shape": (10, 10), "name": "input"}),
(hidden1, {"some": "param", "another": "param"}),
(hidden2, {}),
(output, {"name": "output"}),
],
input_shape=(10, 10),
mock1_some="iwin",
)
out = nn.initialize_layers(nn.layers)
input.assert_called_with(name="input", shape=(10, 10))
assert nn.layers_["input"] is input.return_value
hidden1.assert_called_with(incoming=input.return_value, name="mock1", some="iwin", another="param")
assert nn.layers_["mock1"] is hidden1.return_value
hidden2.assert_called_with(incoming=hidden1.return_value, name="mock2")
assert nn.layers_["mock2"] is hidden2.return_value
output.assert_called_with(incoming=hidden2.return_value, name="output")
assert out is nn.layers_["output"]
开发者ID:buyijie,项目名称:nolearn,代码行数:27,代码来源:test_base.py
示例10: train_net
def train_net(X, y):
net2 = NeuralNet(
layers=[
('input', layers.InputLayer),
('ncaa', NCAALayer),
('dropout1', layers.DropoutLayer),
('hidden', layers.DenseLayer),
('dropout2', layers.DropoutLayer),
('output', layers.DenseLayer),
],
input_shape = (None, num_features * 2),
ncaa_num_units = 128,
dropout1_p=0.2,
hidden_num_units=128,
dropout2_p=0.3,
output_nonlinearity=nonlinearities.sigmoid,
output_num_units=1,
update=nesterov_momentum,
update_learning_rate=theano.shared(float32(0.01)),
update_momentum=theano.shared(float32(0.9)),
regression=True, # flag to indicate we're dealing with regression problem
max_epochs=20, # we want to train this many epochs
verbose=1,
)
net2.fit(X, y)
return net2
开发者ID:stonezyl,项目名称:march-ml-mania-2015,代码行数:29,代码来源:model.py
示例11: train_network
def train_network():
layers0 = [('input', InputLayer),
('dense0', DenseLayer),
('dropout0', DropoutLayer),
('dense1', DenseLayer),
('dropout1', DropoutLayer),
('dense2', DenseLayer),
('output', DenseLayer)]
es = EarlyStopping(patience=200)
net0 = NeuralNet(layers=layers0,
input_shape=(None, num_features),
dense0_num_units=256,
dropout0_p=0.5,
dense1_num_units=128,
dropout1_p=0.5,
dense2_num_units=64,
output_num_units=num_classes,
output_nonlinearity=softmax,
update=nesterov_momentum,
update_learning_rate=theano.shared(float32(0.01)),
update_momentum=theano.shared(float32(0.9)),
eval_size=0.2,
verbose=1,
max_epochs=1000,
on_epoch_finished=[
AdjustVariable('update_learning_rate', start=0.01, stop=0.0001),
AdjustVariable('update_momentum', start=0.9, stop=0.999),
es
])
net0.fit(X, y)
return (es.best_valid, net0)
开发者ID:Adri96,项目名称:aifh,代码行数:35,代码来源:example_otto.py
示例12: test_initialization_legacy
def test_initialization_legacy(self, NeuralNet):
input = Mock(__name__='InputLayer', __bases__=(InputLayer,))
hidden1, hidden2, output = [
Mock(__name__='MockLayer', __bases__=(Layer,)) for i in range(3)]
nn = NeuralNet(
layers=[
('input', input),
('hidden1', hidden1),
('hidden2', hidden2),
('output', output),
],
input_shape=(10, 10),
hidden1_some='param',
)
out = nn.initialize_layers(nn.layers)
input.assert_called_with(
name='input', shape=(10, 10))
assert nn.layers_['input'] is input.return_value
hidden1.assert_called_with(
incoming=input.return_value, name='hidden1', some='param')
assert nn.layers_['hidden1'] is hidden1.return_value
hidden2.assert_called_with(
incoming=hidden1.return_value, name='hidden2')
assert nn.layers_['hidden2'] is hidden2.return_value
output.assert_called_with(
incoming=hidden2.return_value, name='output')
assert out[0] is nn.layers_['output']
开发者ID:dnouri,项目名称:nolearn,代码行数:32,代码来源:test_base.py
示例13: test_diamond
def test_diamond(self, NeuralNet):
input = Mock(__name__='InputLayer', __bases__=(InputLayer,))
hidden1, hidden2, concat, output = [
Mock(__name__='MockLayer', __bases__=(Layer,)) for i in range(4)]
nn = NeuralNet(
layers=[
('input', input),
('hidden1', hidden1),
('hidden2', hidden2),
('concat', concat),
('output', output),
],
input_shape=(10, 10),
hidden2_incoming='input',
concat_incomings=['hidden1', 'hidden2'],
)
nn.initialize_layers(nn.layers)
input.assert_called_with(name='input', shape=(10, 10))
hidden1.assert_called_with(incoming=input.return_value, name='hidden1')
hidden2.assert_called_with(incoming=input.return_value, name='hidden2')
concat.assert_called_with(
incomings=[hidden1.return_value, hidden2.return_value],
name='concat'
)
output.assert_called_with(incoming=concat.return_value, name='output')
开发者ID:dnouri,项目名称:nolearn,代码行数:26,代码来源:test_base.py
示例14: fit
def fit(xTrain, yTrain, dense0_num=800, dropout_p=0.5, dense1_num=500, update_learning_rate=0.01,
update_momentum=0.9, test_ratio=0.2, max_epochs=20):
#update_momentum=0.9, test_ratio=0.2, max_epochs=20, train_fname='train.csv'):
#xTrain, yTrain, encoder, scaler = load_train_data(train_fname)
#xTest, ids = load_test_data('test.csv', scaler)
num_features = len(xTrain[0,:])
num_classes = 9
print num_features
layers0 = [('input', InputLayer),
('dense0', DenseLayer),
('dropout', DropoutLayer),
('dense1', DenseLayer),
('output', DenseLayer)]
clf = NeuralNet(layers=layers0,
input_shape=(None, num_features),
dense0_num_units=dense0_num,
dropout_p=dropout_p,
dense1_num_units=dense1_num,
output_num_units=num_classes,
output_nonlinearity=softmax,
update=nesterov_momentum,
update_learning_rate=update_learning_rate,
update_momentum=update_momentum,
eval_size=test_ratio,
verbose=1,
max_epochs=max_epochs)
clf.fit(xTrain, yTrain)
ll_train = metrics.log_loss(yTrain, clf.predict_proba(xTrain))
print ll_train
return clf
开发者ID:qi-feng,项目名称:ClassificationUsingScikitLearn,代码行数:35,代码来源:nn_otto_ensemble_v8.6.py
示例15: neural_network
def neural_network(x_train, y_train):
X, y, encoder, scaler = load_train_data(x_train, y_train)
num_classes = len(encoder.classes_)
num_features = X.shape[1]
layers0 = [
("input", InputLayer),
("dropoutf", DropoutLayer),
("dense0", DenseLayer),
("dropout", DropoutLayer),
("dense1", DenseLayer),
("dropout2", DropoutLayer),
("output", DenseLayer),
]
net0 = NeuralNet(
layers=layers0,
input_shape=(None, num_features),
dropoutf_p=0.15,
dense0_num_units=1000,
dropout_p=0.25,
dense1_num_units=500,
dropout2_p=0.25,
output_num_units=num_classes,
output_nonlinearity=softmax,
update=adagrad,
update_learning_rate=0.005,
eval_size=0.01,
verbose=1,
max_epochs=30,
)
net0.fit(X, y)
return (net0, scaler)
开发者ID:ctozlm,项目名称:KDDCUP15,代码行数:31,代码来源:kddcup15.py
示例16: fit
def fit(self,tr,add_feat_tr):
## if trend exists, remove trend
if self.trend ==1:
trend = self.est_trend(tr)
tr = tr-np.asarray(trend)
layers0=[
## 2 layers with one hidden layer
(InputLayer, {'shape': (None,8,self.window_length)}),
(DenseLayer, {'num_units': 8*self.window_length}),
(DropoutLayer, {'p':0.3}),
(DenseLayer, {'num_units': 8*self.window_length/3}),
## the output layer
(DenseLayer, {'num_units': 1, 'nonlinearity': None}),
]
feats = build_feat(tr, add_feat_tr, window_length=self.window_length)
print feats.shape
feat_target = get_target(tr,window_length=self.window_length)
print feat_target.shape
net0 = NeuralNet(
layers=layers0,
max_epochs=400,
update=nesterov_momentum,
update_learning_rate=0.01,
update_momentum=0.9,
verbose=1,
regression=True,
)
net0.fit(feats[:-1],feat_target)
return net0,feats,feat_target
开发者ID:aubreychen9012,项目名称:signal-interpolation,代码行数:29,代码来源:interpolator.py
示例17: createNet
def createNet(X, Y, ln, loadFile = ""):
net1 = NeuralNet(
layers=[ # four layers: two hidden layers
('input', layers.InputLayer),
('hidden', layers.DenseLayer),
('hidden1', layers.DenseLayer),
('hidden2', layers.DenseLayer),
('hidden3', layers.DenseLayer),
('output', layers.DenseLayer),
],
# layer parameters: Best 400 400
input_shape=(None, numInputs), # 31 inputs
hidden_num_units=400, # number of units in hidden layer
hidden1_num_units=400,
hidden2_num_units=400,
hidden3_num_units=400,
output_nonlinearity=None, # output layer uses identity function
output_num_units=numOutputs, # 4 outputs
# optimization method:
update=nesterov_momentum,
update_learning_rate=ln,
update_momentum=0.9,
regression=True, # flag to indicate we're dealing with regression problem
max_epochs=1500, # we want to train this many epochs
verbose=1,
)
#if (loadFile != ""):
#net1.load_params_from(loadFile)
net1.max_epochs = 50
net1.update_learning_rate = ln;
return net1
开发者ID:tmoldwin,项目名称:NNGen,代码行数:34,代码来源:Lasagne.py
示例18: loadNet
def loadNet(netName):
if os.path.exists(netName):
net = pickle.load(open(netName, "rb"))
else:
net = NeuralNet(
layers=[ # three layers: one hidden layer
('input', layers.InputLayer),
('hidden', layers.DenseLayer),
('output', layers.DenseLayer),
],
# layer parameters:
input_shape=(None, 9216), # 96x96 input pixels per batch
hidden_num_units=100, # number of units in hidden layer
output_nonlinearity=None, # output layer uses identity function
output_num_units=30, # 30 target values
# optimization method:
update=nesterov_momentum,
update_learning_rate=0.01,
update_momentum=0.9,
regression=True, # flag to indicate we're dealing with regression problem
max_epochs=400, # we want to train this many epochs
verbose=1,
)
X, y = load()
net.fit(X, y)
print("X.shape == {}; X.min == {:.3f}; X.max == {:.3f}".format(X.shape, X.min(), X.max()))
print("y.shape == {}; y.min == {:.3f}; y.max == {:.3f}".format(y.shape, y.min(), y.max()))
pickle.dump(net, open(netName, 'wb'), -1)
return net
开发者ID:kanak87,项目名称:oldboy_rep,代码行数:35,代码来源:nn.py
示例19: net_fitted
def net_fitted(self, NeuralNet, X_train, y_train):
nn = NeuralNet(
layers=[
('input', InputLayer),
('conv1', Conv2DLayer),
('conv2', Conv2DLayer),
('pool2', MaxPool2DLayer),
('output', DenseLayer),
],
input_shape=(None, 1, 28, 28),
output_num_units=10,
output_nonlinearity=softmax,
more_params=dict(
conv1_filter_size=(5, 5), conv1_num_filters=16,
conv2_filter_size=(3, 3), conv2_num_filters=16,
pool2_pool_size=(8, 8),
hidden1_num_units=16,
),
update=nesterov_momentum,
update_learning_rate=0.01,
update_momentum=0.9,
max_epochs=3,
)
return nn.fit(X_train, y_train)
开发者ID:aaxwaz,项目名称:nolearn,代码行数:28,代码来源:test_lasagne.py
示例20: train
def train(self, X, y_train, X_test, ids_test, y_test, outfile, is_valid):
X = np.array(X)
encoder = LabelEncoder()
y = encoder.fit_transform(y_train).astype(np.int32)
num_classes = len(encoder.classes_)
num_features = X.shape[1]
layers0 = [('input', InputLayer),
('dense1', DenseLayer),
('dropout1', DropoutLayer),
('dense2', DenseLayer),
('dropout2', DropoutLayer),
('output', DenseLayer)]
net0 = NeuralNet(layers=layers0,
input_shape=(None, num_features),
dense1_num_units=3500,
dropout1_p=0.4,
dense2_num_units=2300,
dropout2_p=0.5,
output_num_units=num_classes,
output_nonlinearity=softmax,
#update=nesterov_momentum,
update=adagrad,
update_learning_rate=0.01,
#update_momentum=0.9,
#objective_loss_function=softmax,
objective_loss_function=categorical_crossentropy,
eval_size=0.2,
verbose=1,
max_epochs=20)
net0.fit(X, y)
X_test = np.array(X_test)
self.make_submission(net0, X_test, ids_test, encoder)
开发者ID:hustmonk,项目名称:k21,代码行数:34,代码来源:net6.py
注:本文中的nolearn.lasagne.NeuralNet类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论