本文整理汇总了Python中nolearn.dbn.DBN类的典型用法代码示例。如果您正苦于以下问题:Python DBN类的具体用法?Python DBN怎么用?Python DBN使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了DBN类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: train_model
def train_model(data_set_path='/home/devin.fisher/Kingdoms/treadstone/_samples/still_data/still_training_data.pkl'):
# data_set = None
with open(data_set_path, 'rb') as f:
data_set = pickle.load(f)
# with open('/home/devin.fisher/Kingdoms/lol/still_training_data2.pkl', 'rb') as f:
# data_set = pickle.load(f)
# (train_x, test_x, train_y, test_y) = train_test_split(data_set['data'], data_set['target'], test_size=0.1)
train_x = data_set['data']
test_x = data_set['data']
train_y = data_set['target']
test_y = data_set['target']
dbn = DBN(
[-1, 300, -1],
learn_rates=0.3,
learn_rate_decays=0.9,
epochs=60,
verbose=1)
dbn.fit(train_x, train_y)
joblib.dump(dbn, 'digit_model.pkl', compress=9)
# dbn = joblib.load('digit_model.pkl')
# compute the predictions for the test data and show a classification report
preds = dbn.predict(test_x)
print classification_report(test_y, preds)
开发者ID:devin-fisher,项目名称:treadstone,代码行数:30,代码来源:video_still_model_builder.py
示例2: benchmark
def benchmark(k, epochs):
print("*" * 80)
print("k: %d, epochs: %d\n" % (k, epochs))
#select = SelectKBest(score_func=chi2, k=k)
select = TruncatedSVD(n_components=k)
X_train_trunc = select.fit_transform(X_train, Y_train)
X_test_trunc = select.transform(X_test)
print('done truncating')
clf = DBN([X_train_trunc.shape[1], k, 4], learn_rates=0.3, learn_rate_decays=0.9, epochs=epochs, verbose=1)
clf.fit(X_train_trunc, Y_train)
pred = clf.predict(X_test_trunc)
if CREATE_SUBMISSION:
X_submit_trunc = select.transform(X_submit)
pred_submit = clf.predict(X_submit_trunc)
dump_csv(pred_submit, k, epochs)
score = metrics.f1_score(Y_test, pred)
print("f1-score: %0.3f" % score)
print("classification report:")
print(metrics.classification_report(Y_test, pred))
print("confusion matrix:")
print(metrics.confusion_matrix(Y_test, pred))
开发者ID:alireza-saberi,项目名称:Applied_MachineLearning_COMP_598_MiniProject2,代码行数:28,代码来源:dbn_test.py
示例3: training_dbn
def training_dbn(train_dataset, n_targets=2, learn_rates=0.3, learn_rate_decays=0.9, epochs=1000, n_hidden_layers=5, n_hidden_layer_nodes=100,
verbose=True):
layers = np.ones(n_hidden_layers, dtype=int) * n_hidden_layer_nodes
print(layers.tolist())
X_train, y_train = train_dataset
ff = [X_train.shape[1]]
ff.extend(layers.tolist())
ff.append(n_targets)
# Create the dbn
clf = DBN(
ff,
learn_rates=learn_rates,
learn_rate_decays=learn_rate_decays,
epochs=epochs,
dropouts=0.1,
verbose=verbose)
# Counting time for training
start = time.time()
clf.fit(X_train, y_train) # training
end = time.time()
exec_time = end - start
print('Exec time was {} secs'.format(exec_time))
return clf, exec_time
开发者ID:caiobelfort,项目名称:DeepLearningProject,代码行数:25,代码来源:dbn.py
示例4: main
def main():
data_id = 'B'
data_path = '/broad/compbio/maxwshen/data/1-MAKETRAINTEST/complete/golf/'
print 'train...', datetime.datetime.now()
train_set = readin(data_id, 'train', data_path)
print 'valid...', datetime.datetime.now()
valid_set = readin(data_id, 'valid', data_path)
print 'test...', datetime.datetime.now()
test_set = readin(data_id, 'test', data_path)
# Input to 300 node RBM to 2 node output
dbn = DBN( \
[xtrain.shape[1], 300, 2], \
learn_rates = 5, \
learn_rate_decays = 0.9, \
epochs = 31, \
verbose = 1)
dbn.fit(dat_train, y_train)
preds = dbn.predict(dat_test)
print classification_report(y_test, preds)
out_fn = 'dbn.pickle'
with open(out_fn, 'w') as f:
pickle.dump(dbn, out_fn)
return
开发者ID:maxwshen,项目名称:Kellis,代码行数:28,代码来源:dbn_nolearn.py
示例5: train
def train(X, Y, alphabet):
model = DBN([13, 1000, len(alphabet)],
learn_rates=0.3,
learn_rate_decays=0.9,
epochs=10,
verbose=1,)
model.fit(X, Y)
return model
开发者ID:larisahax,项目名称:Dialect,代码行数:9,代码来源:nn.py
示例6: run
def run():
X_train, Y_train = load_training_data()
X_train, Y_train = rotate_dataset(X_train, Y_train, 8)
X_train, Y_train = nudge_dataset(X_train, Y_train)
n_features = X_train.shape[1]
n_classes = 10
classifier = DBN([n_features, 8000, n_classes],
learn_rates=0.4, learn_rate_decays=0.9 ,epochs=75, verbose=1)
classifier.fit(X_train, Y_train)
test_data = get_test_data_set()
predictions = classifier.predict(test_data)
write_predictions_to_csv(predictions)
开发者ID:bin2000,项目名称:kaggle-mnist-digits,代码行数:16,代码来源:predict.py
示例7: __init__
def __init__(self):
# images_train=data_train[:,1:]
# trainX, _trainX, trainY, _trainY = train_test_split(images_train/255.,values_train,test_size=0.5)
# #load test.csv
# test = pd.read_csv("data/test.csv")
# data_test=test.as_matrix()
# testX, _testX = train_test_split(data_test/255.,test_size=0.99)
# Random Forest
# self.clf = RandomForestClassifier()
# Stochastic Gradient Descent
# self.clf = SGDClassifier()
# Support Vector Machine
# self.clf = LinearSVC()
# Nearest Neighbors
# self.clf = KNeighborsClassifier(n_neighbors=13)
train = pd.read_csv("data/train.csv")
data_train=train.as_matrix()
values_train=data_train[:,0]
images_train=data_train[:,1:]
trainX, _trainX, trainY, _trainY = train_test_split(images_train/255.,values_train,test_size=0.995)
# Neural Network
self.clf = DBN([trainX.shape[1], 300, 10],learn_rates=0.3,learn_rate_decays=0.9,epochs=10,verbose = 1)
#Training
self.clf.fit(trainX, trainY)
pass
开发者ID:BellyWong,项目名称:redigit,代码行数:35,代码来源:clf.py
示例8: train_clf
def train_clf(dim, X, y, classificator):
print("Training for {} classes".format(dim[2]))
if classificator == "DBN":
clf = DBN(dim,
learn_rates=dbn_learn_rates,
learn_rate_decays=dbn_learn_rate_decays,
epochs=dbn_epochs,
minibatch_size=dbn_minibatch_size,
verbose=dbn_verbose,
dropouts=dbn_dropouts
)
elif classificator == "GaussianNB":
clf = GaussianNB()
clf.fit(X, y)
return clf
开发者ID:presight,项目名称:happy-cerberus,代码行数:17,代码来源:train.py
示例9: train_dbn_dataset
def train_dbn_dataset(dataset, x_test, y_test, alpha, nhidden, epochs, batch_size, noises=[]):
from nolearn.dbn import DBN
num_classes = len(set(y_test))
print "Number of classes", num_classes
x_train, y_train = dataset
dbn_model = DBN([x_train.shape[1], nhidden, num_classes],
learn_rates = alpha,
learn_rate_decays = 0.9,
epochs = epochs,
verbose = 1,
nesterov=False,
minibatch_size=batch_size,
noises = noises)
dbn_model.fit(x_train, y_train)
from sklearn.metrics import classification_report, accuracy_score
y_true, y_pred = y_test, dbn_model.predict(x_test) # Get our predictions
print(classification_report(y_true, y_pred)) # Classification on each digit
print(roc_auc_score(y_true, y_pred)) # Classification on each digit
return y_pred, roc_auc_score(y_true, y_pred)
开发者ID:viveksck,项目名称:nolearn,代码行数:20,代码来源:adult_dbn.py
示例10: dbn_clf
def dbn_clf(X, y, hidden_sizes=[300], num_epochs=10):
""" deep belief network """
Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, test_size=0.25, random_state=0)
output_categories = np.load(os.path.join(loaddir,'submit_col_name.npy'))
print('Start training Neural Network...')
dbn = DBN(
[Xtrain.shape[1]] + hidden_sizes + [len(output_categories)],
learn_rates = 0.3,
learn_rate_decays = 0.9,
epochs = num_epochs,
verbose = 1)
dbn.fit(Xtrain, ytrain)
ypred = dbn.predict_proba(Xtest)
score = log_loss(ytest, ypred)
print('Log loss = {}'.format(score))
return dbn, score
开发者ID:wkvictor,项目名称:Kaggle-TalkingData,代码行数:20,代码来源:train_models.py
示例11: test
def test(self):
#iris = datasets.load_iris()
#X, y = iris.data, iris.target
X, y = self.dataMat,self.labelMat
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.6, random_state=12)
#clf = RandomForestClassifier(max_depth=6,min_samples_split=9,min_samples_leaf=15,n_estimators=5)
#clf = DBN([X.shape[1], 24, 2],scales=0.5,learn_rates=0.02,learn_rate_decays = 0.95, learn_rate_minimums =0.001,epochs=500,l2_costs = 0.02*0.031, dropouts=0.2,verbose=0)
#cvnum = ShuffleSplit(2013,n_iter=10,test_size=0.6,train_size=0.4,random_state=0)
for scal in arange(4.5, 5.0, 0.5):
print "**************************************************************"
print "DBN scal=",scal
clf = DBN([X.shape[1], 24,48, 2],scales=0.5,learn_rates=0.01,learn_rate_decays = 0.95, learn_rate_minimums =0.001,epochs=50,l2_costs = 0.02*0.001, dropouts=0.0,verbose=0)
clf.fit(X_train, y_train);
scores = cross_val_score(clf,X,y,cv=3,scoring='roc_auc')
y_pred = clf.predict(X_test);
y_predprob = clf.predict_proba(X_test);
prf=precision_recall_fscore_support(y_test, y_pred, average='binary')
print ("Accuracy: %0.5f (+/- %0.5f)" % (scores.mean(), scores.std() * 2))
print classification_report(y_test,y_pred)
print 'The accuracy is: ', accuracy_score(y_test,y_pred)
print 'The log loss is:', log_loss(y_test, y_predprob)
print 'The ROC score is:', roc_auc_score(y_test,y_predprob[:,1])
开发者ID:kevinmtian,项目名称:Kaggle-Contests,代码行数:22,代码来源:cross_valid_NN.py
示例12: main
def main():
data_fn = "/home/ec2-user/Kellis/data/bravo.formatted/dat.all.txt"
blacklist_fn = "/home/ec2-user/Kellis/data/bravo.formatted/dat.blacklist.txt"
y_fn = "/home/ec2-user/Kellis/data/bravo.formatted/dat.y.txt"
data = read_delimited_txt(data_fn, "\t")
blacklist = read_delimited_txt(blacklist_fn, "\t")
y = read_delimited_txt(y_fn, "\t")
# Get names and remove the first element of each row which is the row number
names = data[0]
data = data[1:]
for i in range(len(data)):
data[i] = data[i][1:]
y = y[1:]
for i in range(len(y)):
y[i] = y[i][-1]
y = convert_y_binary(y)
# Normalizes column-wise so all values are between 0 and 1
data = normalize_0_1(data)
# Split into training, testing
xtrain, xtest, ytrain, ytest = train_test_split(data, y, test_size=0.2, random_state=1)
# Input to 300 node RBM to 2 node output
dbn = DBN([xtrain.shape[1], 300, 2], learn_rates=5, learn_rate_decays=0.9, epochs=501, verbose=1)
dbn.fit(xtrain, ytrain)
preds = dbn.predict(xtest)
print classification_report(ytest, preds)
out_fn = "dbn.pickle"
with open(out_fn, "w") as f:
pickle.dump(dbn, out_fn)
return
开发者ID:maxwshen,项目名称:Kellis,代码行数:38,代码来源:dbn.py
示例13: runOfflineML
def runOfflineML(y, X, classifiers, savemodel=False):
X_train, X_test, y_train, y_test = train_test_split(X, y.astype("int0"), test_size=0.20, random_state=0)
data = dict(x_train=X_train, x_test=X_test, y_train=y_train, y_test=y_test)
cls_stats = initClsStats(classifiers)
for cls_name, cls in classifiers.items():
cls_stats[cls_name]["n_train"] = data["x_train"].shape[0]
cls_stats[cls_name]["n_test"] = data["x_test"].shape[0]
cls_stats[cls_name]["n_features"] = data["x_train"].shape[1]
tick = time.time()
if cls_name == "DBN":
data = dataNormalise(data)
clf = DBN([data["x_train"].shape[1], 300, 2], learn_rates=0.3, learn_rate_decays=0.9, epochs=10, verbose=1)
clf.fit(data["x_train"], data["y_train"])
else:
clf = classifiers[cls_name].fit(data["x_train"], data["y_train"])
if savemodel:
pickle.dump(clf, open(cls_name + ".dat", "w"))
clf = pickle.load(open(cls_name + ".dat", "r"))
cls_stats[cls_name]["training_time"] += time.time() - tick
# check the accuracy on the training set
tick = time.time()
predicted = clf.predict(data["x_test"])
cls_stats[cls_name]["testing_time"] += time.time() - tick
acc = metrics.accuracy_score(data["y_test"], predicted)
cls_stats[cls_name]["accuracy"] = acc
print cls_name, "accuracy is: " + str(acc)
# auc = metrics.roc_auc_score(data['y_test'], probs[:, 1])
conf_matrix = metrics.confusion_matrix(data["y_test"], predicted)
cls_stats[cls_name]["conf_matrix"] = conf_matrix
# print conf_matrix
precision, recall, fscore, support = metrics.precision_recall_fscore_support(data["y_test"], predicted)
cls_stats[cls_name]["precision"] = precision
cls_stats[cls_name]["recall"] = recall
cls_stats[cls_name]["fscore"] = fscore
cls_stats[cls_name]["support"] = support
return cls_stats
开发者ID:Nik0l,项目名称:UTemPro,代码行数:36,代码来源:OfflineLearning.py
示例14: fit
def fit(self, X, y, X_pretrain=None):
from nolearn.dbn import DBN
if y.ndim == 2:
n_outputs = y.shape[1]
else:
y = y[:, np.newaxis]
n_outputs = 1
params = dict(self.__dict__)
from gdbn.activationFunctions import Linear
params['output_act_funct'] = Linear()
n_units = params.pop('n_units')
n_hidden_layers = params.pop('n_hidden_layers')
if isinstance(n_units, int):
units = [n_units] * n_hidden_layers
else:
units = n_units
units = [X.shape[1]] + units + [n_outputs]
self.dbn = DBN(units, **params)
print X.shape
self.dbn.fit(X, y, X_pretrain=X_pretrain)
开发者ID:mhdella,项目名称:kaggle-solar-energy,代码行数:23,代码来源:attic.py
示例15: inputs
"""
Train the network with 3200 inputs (64x50 values in file)
6 output units (for the different defects)
Lets start with 2 hidden units
"""
# Numbers from example used here
(trainX, testX, trainY, testY) = train_test_split(
dataset / 255.0, dataset.target.astype("int0"), test_size=0.33)
dbn = DBN(
# [[numNodes input layer], numNodes hidden layer, numNodes output layer ]
[trainX.shape[1], 2, 6],
# Learning rate of algorithm
learn_rates=0.3,
# Decay of learn rate
learn_rate_decays=0.9,
# Iterations of training data (epochs)
epochs=10,
# Verbosity level
verbose=1)
dbn.fit(trainX,trainY)
print "trained yo!"
# Evaluate network
#-----------------
preds = dbn.predict(testX)
print classification_report(testY, preds) # Table of accuracies
开发者ID:CAWilson94,项目名称:NeuralNetsAdventures,代码行数:29,代码来源:shallowBC.py
示例16: range
####################################
if __name__=='__main__':
dbn_list=[]
for i in range(2,8):
dat,lab=db_load(tup(i),i)
try:
dbn = joblib.load("pickles/dbn_"+str(tup(i))+"x"+str(i)+".pkl")
dbn_list.append(dbn)
except:
dbn = DBN(
[i*tup(i), 400, 10],
learn_rates = 0.3,
learn_rate_decays = 0.9,
epochs = 50,
verbose = 1
)
dbn.fit(dat,lab)
dbn_list.append(dbn)
joblib.dump(dbn,"pickles/dbn_"+str(tup(i))+"x"+str(i)+".pkl")
finally:
#print dat.shape
#print lab.shape
print dbn_list.__len__()
print ("trained ! ready to predict!")
#print "training report for {}x{}:".format(tup(i),i)
tes,labt=test_load(tup(i),i)
preds=dbn.predict(tes)
sampleClassificationReport=classification_report(labt,preds)
开发者ID:Yami-Bitshark,项目名称:DBN_MULTI_RESOLUTION_DIGITS,代码行数:29,代码来源:vid-multi-dbn.py
示例17: load_data
print "Loading data..."
X_train, y_train = load_data("../dataset/%s" % TRAIN_DATA)
# Split data to train and test
X_train, X_test, y_train, y_test = train_test_split(X_train, y_train, test_size=TEST_SIZE, random_state=0)
X_train = X_train.todense()
X_test = X_test.todense()
# Train --------------------------------------------------------------
print "Training..."
t1 = datetime.now()
dbn = DBN(
[-1, 300, 300, -1],
learn_rates=0.1,
learn_rate_decays=0.9,
epochs=10,
verbose=1)
dbn.fit(X_train, y_train)
print "Training %f secs" % (datetime.now() - t1).total_seconds()
if TEST_SIZE > 0:
tlabel = dbn.predict(X_test)
print 'Error: %f' % error_track_0(tlabel, y_test)
if DUMP:
# Dump model --------------------------------------------------------------
print "Dumping model..."
joblib.dump(dbn, '../model/deep/%s.pkl' % MODEL_NAME)
开发者ID:bingo4508,项目名称:ML-handwriting-recognition,代码行数:32,代码来源:deep_learning.py
示例18: DBNRegressor
class DBNRegressor(BaseEstimator, RegressorMixin):
def __init__(self, n_hidden_layers=2, n_units=1000, epochs=100,
epochs_pretrain=0, scales=0.05,
real_valued_vis=True,
use_re_lu=False,
uniforms=False,
learn_rates_pretrain=0.1,
learn_rates=0.1,
learn_rate_decays=1.0,
learn_rate_minimums=0.0,
momentum=0.9,
momentum_pretrain=0.9,
l2_costs=0.0001,
l2_costs_pretrain=0.0001,
dropouts=None,
minibatch_size=64,
verbose=2,
fine_tune_callback=None,
nest_compare=True,
nest_compare_pretrain=None,
fan_outs=None,
nesterov=False,
):
self.n_hidden_layers = n_hidden_layers
self.n_units = n_units
self.epochs = epochs
self.epochs_pretrain = epochs_pretrain
self.learn_rates_pretrain = learn_rates_pretrain
self.learn_rates = learn_rates
self.learn_rate_decays = learn_rate_decays
self.learn_rate_minimums = learn_rate_minimums
self.l2_costs_pretrain = l2_costs_pretrain
self.l2_costs = l2_costs
self.momentum = momentum
self.momentum_pretrain = momentum_pretrain
self.verbose = verbose
self.real_valued_vis = real_valued_vis
self.use_re_lu = use_re_lu
self.scales = scales
self.minibatch_size = minibatch_size
if dropouts is None:
dropouts = [0.2] + [0.5] * n_hidden_layers
self.dropouts = dropouts
self.fine_tune_callback = fine_tune_callback
self.nest_compare = nest_compare
self.nest_compare_pretrain = nest_compare_pretrain
self.fan_outs = fan_outs
self.nesterov = nesterov
def fit(self, X, y, X_pretrain=None):
from nolearn.dbn import DBN
if y.ndim == 2:
n_outputs = y.shape[1]
else:
y = y[:, np.newaxis]
n_outputs = 1
params = dict(self.__dict__)
from gdbn.activationFunctions import Linear
params['output_act_funct'] = Linear()
n_units = params.pop('n_units')
n_hidden_layers = params.pop('n_hidden_layers')
if isinstance(n_units, int):
units = [n_units] * n_hidden_layers
else:
units = n_units
units = [X.shape[1]] + units + [n_outputs]
self.dbn = DBN(units, **params)
print X.shape
self.dbn.fit(X, y, X_pretrain=X_pretrain)
def predict(self, X):
return self.dbn.chunked_decision_function(X)
开发者ID:mhdella,项目名称:kaggle-solar-energy,代码行数:76,代码来源:attic.py
示例19: train_test_prep
x_train, x_test, y_train, y_test, x, y = train_test_prep()
#dbn_model = DBN([x_train.shape[1], 1500, 1500, 2],
# learn_rates = 0.01,
# learn_rate_decays = 0.9,
# epochs = 1000,
# verbose = 3)
dbn_model = DBN([x_train.shape[1], 5000, 2500, 1250, 500],
#dropouts=0.01,
output_act_funct=activationFunctions.Sigmoid(),
learn_rates=0.01,
learn_rates_pretrain=0.001,
# minibatch_size=9,
# learn_rate_decays=0.9,
# learn_rate_minimums=0.0001,
epochs_pretrain=500,
epochs=500,
# momentum= self.momentum,
# real_valued_vis=True,
# use_re_lu=True,
verbose=2)
dbn_model.fit(x_train, y_train)
y_true, y_pred = y_test, dbn_model.predict(x_test) # Get our predictions
print(classification_report(y_true, y_pred)) # Classification on each digit
print y_true
print y_pred
开发者ID:matrachma,项目名称:Deep-Belief-Network-for-Genomic-Prediciton-of-Categorical-Phenotype,代码行数:30,代码来源:nolearnDBN.py
示例20: do_operation_
def do_operation_(X_train,X_test,y_train,y_test,l_r,d_r):
clf = DBN([np.shape(X_train)[1],300,10],learn_rates = l_r,learn_rate_decays = d_r,epochs = 30,verbose = 1 )
clf.fit(X_train,y_train)
y_test,y_pred = y_test, clf.predict(X_test)
result = np.sum(y_test == y_pred)
return (result,l_r,d_r)
开发者ID:akm-sabbir,项目名称:Stacking_based_digit_classifier,代码行数:6,代码来源:digits_classifier.py
注:本文中的nolearn.dbn.DBN类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论