本文整理汇总了Python中sklearn.neural_network.BernoulliRBM类的典型用法代码示例。如果您正苦于以下问题:Python BernoulliRBM类的具体用法?Python BernoulliRBM怎么用?Python BernoulliRBM使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了BernoulliRBM类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: RBM_SVM
def RBM_SVM(trainfeatures, testfeatures, trainlabels, testlabels):
# ******************* Scikit-learning RBM + SVM *******************
print "train RBM+SVM model"
## trainfeatures = (trainfeatures - np.min(trainfeatures, 0)) / (np.max(trainfeatures, 0) + 0.0001) # 0-1 scaling
min_max_scaler = preprocessing.MinMaxScaler()
trainfeatures_fs = min_max_scaler.fit_transform(trainfeatures)
testfeatures_fs = min_max_scaler.transform(testfeatures)
# SVM parameters
clf = svm.SVC(C=5.0, kernel='sigmoid', degree=3, gamma=0.5, coef0=10.0,
shrinking=True, probability=False, tol=0.001, cache_size=200,
class_weight=None, verbose=False, max_iter=-1, random_state=None)
# RBM parameters
rbm = BernoulliRBM(random_state=0, verbose=True)
rbm.learning_rate = 0.06
rbm.n_iter = 20
# Machine learning pipeline
classifier = Pipeline(steps=[('rbm', rbm), ('svm', clf)])
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 400
classifier.fit(trainfeatures_fs, trainlabels)
results = classifier.predict(testfeatures_fs)
results = results.ravel()
testerror = float(len(testlabels)
- np.sum(testlabels == results))/float(len(testlabels))
# print"error rate with SVM is %.4f" %testerror
return testerror
开发者ID:nigellegg,项目名称:plankton,代码行数:34,代码来源:SVM_Results.py
示例2: SGD
def SGD():
SGD = linear_model.SGDClassifier(loss='hinge',penalty='l2',random_state=42,n_jobs=-1,epsilon=0.001)
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('SGD', SGD)])
# RBM parameters obtained after cross-validation
rbm.learning_rate = 0.01
rbm.n_iter = 15
rbm.n_components = 50
SGD.alpha=0.0001
SGD.C=1
# Training SGD
SGD_classifier = linear_model.SGDClassifier(loss='hinge',penalty='l2',random_state=42,n_jobs=-1,alpha=0.0001, epsilon=0.001)
SGD_classifier.fit(data_train,target_train)
# Training RBM-SGD Pipeline
classifier.fit(data_train,target_train)
print("printing_results")
print("SGD using RBM features:\n%s\n" % (metrics.classification_report(target_test,classifier.predict(data_test))))
cm = confusion_matrix(target_test,classifier.predict(data_test))
plt.matshow(cm)
plt.title('Confusion Matrix SVM with SDG with RBM Features')
plt.colorbar()
plt.ylabel('True Label')
plt.xlabel('Predicted Label')
plt.savefig('confusion_matrix1.jpg')
print("SGD using raw pixel features:\n%s\n" % (metrics.classification_report(target_test,SGD_classifier.predict(data_test))))
cm1 = confusion_matrix(target_test,SGD_classifier.predict(data_test))
plt.matshow(cm1)
plt.title('Confusion Matrix SVM with SDG Raw Features')
plt.colorbar()
plt.ylabel('True Label')
plt.xlabel('Predicted Label')
plt.savefig('confusion_matrix2.jpg')
开发者ID:campbelljc,项目名称:598p4,代码行数:33,代码来源:imputation.py
示例3: run_test
def run_test(params, model):
if model == "rf":
n_tree, mtry = params
print "# Trees: ", n_tree
print "mtry: ", mtry
rf = RandomForestClassifier(n_estimators= int(n_tree), verbose = True,
n_jobs = -1, max_features= int(mtry))
rf.fit(X, y)
modelPred = rf.predict(X)
elif model == "svm":
C, kernel = params
print "# Cost: ", C
print "kernel: ", kernel
svmod = SVC(int(C), kernel)
svmod.fit(X, y)
modelPred = svmod.predict(X)
elif model == "knn":
k = params
print "# k: ", k
knnmod = KNeighborsClassifier(int(k))
knnmod.fit(X, y)
modelPred =knnmod.predict(X)
elif model == "NeuralNetwork":
n_components, learning_rate, batch_size, n_iter = params
print "# n_components: ", n_components
print "# learning_rate: ", learning_rate
print "# batch_size: ", batch_size
print "# n_iter: ", n_iter
nnmod = BernoulliRBM(int(n_components), learning_rate, int(batch_size), int(n_iter))
nnmod.fit(X, y)
modelPred =nnmod.score_samples(X)
accuError = AccuracyErrorCalc(y, modelPred)
return accuError
开发者ID:binga,项目名称:CA-Exacerbator,代码行数:35,代码来源:HyperOptDemo.py
示例4: process_machine_learning
def process_machine_learning(symbol, i, path):
params['path']= path
label, feature= load_data(params['path'])
#scales values in features so that they range from 0 to 1
minmaxScaler = MinMaxScaler()
feature = minmaxScaler.fit_transform(feature)
print("Dimensions")
print("label", label.shape)
print("feature", feature.shape)
#feature selection using RBM
start_time = time.time()
rbm = BernoulliRBM(n_components=params['reduced_feature'], learning_rate=params['learning_rate'], batch_size=params['batchsize'], n_iter=params['n_iter'])
feature = rbm.fit_transform(feature)
print("RBM--- %s seconds ---" % (time.time() - start_time))
print("Dimensions after RBM")
print("label", label.shape)
print("feature", feature.shape)
x_train, x_test, y_train, y_test = train_test_split(feature, label, i)
y_pred = random_forest(x_train, x_test, y_train)
signal_pd=pd.DataFrame({'y_test':y_test[:,0],'y_pred':y_pred})
signal_pd.to_csv(os.path.join('..', 'data', 'rbm_random_forest',symbol,symbol+'_'+str(i)+'.csv'))
开发者ID:cylt0212,项目名称:MachineLearningProject,代码行数:30,代码来源:rbm_random_forest.py
示例5: rbm_001
def rbm_001():
s = 15
crop = 150
n_patches = 400000
rf_size = 5
train_x_crop_scale = CropScaleImageTransformer(training=True,
result_path='data/data_train_crop_{}_scale_{}.npy'.format(crop, s),
crop_size=crop,
scaled_size=s,
n_jobs=-1,
memmap=True)
patch_extractor = models.KMeansFeatures.PatchSampler(n_patches=n_patches,
patch_size=rf_size,
n_jobs=-1)
images = train_x_crop_scale.transform()
images = images.reshape((images.shape[0], 15 * 15 * 3))
# rbm needs inputs to be between 0 and 1
scaler = MinMaxScaler()
images = scaler.fit_transform(images)
# Training takes a long time, says 80 seconds per iteration, but seems like longer
# And this is only with 256 components
rbm = BernoulliRBM(verbose=1)
rbm.fit(images)
train_x = rbm.transform(images)
train_y = classes.train_solutions.data
# 0.138 CV on 50% of the dataset
wrapper = ModelWrapper(models.Ridge.RidgeRFEstimator, {'alpha': 500, 'n_estimators': 500}, n_jobs=-1)
wrapper.cross_validation(train_x, train_y, sample=0.5, parallel_estimator=True)
开发者ID:cyberport-kaggle,项目名称:galaxy-zoo,代码行数:34,代码来源:rbm_001.py
示例6: Logistic
def Logistic():
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
# RBM parameters obtained after cross-validation
rbm.learning_rate = 0.01
rbm.n_iter = 121
rbm.n_components = 700
logistic.C= 1.0
# Training RBM-Logistic Pipeline
classifier.fit(data_train,target_train)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=1.0)
logistic_classifier.fit(data_train,target_train)
print("printing_results")
print("Logistic regression using RBM features:\n%s\n" % (metrics.classification_report(target_test,classifier.predict(data_test))))
cm3 = confusion_matrix(target_test,classifier.predict(data_test))
plt.matshow(cm3)
plt.title('Confusion Matrix Logistic Regression with RBM Features')
plt.colorbar()
plt.ylabel('True Label')
plt.xlabel('Predicted Label')
plt.savefig('confusion_matrix3.jpg')
print("Logistic regression using raw pixel features:\n%s\n" % (metrics.classification_report(target_test,logistic_classifier.predict(data_test))))
cm4 = confusion_matrix(target_test,logistic_classifier.predict(data_test))
plt.matshow(cm4)
plt.title('Confusion Matrix Logistic Regression')
plt.colorbar()
plt.ylabel('True Label')
plt.xlabel('Predicted Label')
plt.savefig('confusion_matrix4.jpg')
#Logistic()
开发者ID:campbelljc,项目名称:598p4,代码行数:32,代码来源:imputation.py
示例7: build_classifier
def build_classifier(clf_name):
clf = None
parameters = {}
if clf_name == "svm":
clf = svm.SVC(kernel='linear', C=10)
parameters = {}
elif clf_name == "knn":
clf = neighbors.KNeighborsClassifier(n_neighbors=5, weights='uniform', algorithm='brute', leaf_size=30,
metric='cosine', metric_params=None)
elif clf_name == "rmb":
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
rbm.learning_rate = 0.01
rbm.n_iter = 20
rbm.n_components = 100
logistic.C = 6000
clf = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
#parameters = {'clf__C': (1, 10)}
elif clf_name == "tsne":
clf = TSNE(n_components=2, init='random', metric='cosine')
return clf, parameters
开发者ID:verasazonova,项目名称:textsim,代码行数:27,代码来源:test2.py
示例8: neural_network_classify
def neural_network_classify(train_data,train_label,test_data):
# nnc=MLPClassifier(algorithm='l-bfgs', alpha=1e-5, hidden_layer_sizes=(5, 2), random_state=1)
nnc=BernoulliRBM(random_state=0, verbose=True)
nnc.fit(train_data, ravel(train_label))
test_label=ncc.predict(test_data)
save_result(test_label,'sklearn_neural_network_classify_Result.csv')
return test_label
开发者ID:fzhurd,项目名称:fzwork,代码行数:8,代码来源:digit_recognizer_main_v4h.py
示例9: Bernoulli
def Bernoulli(X_train, X_test, y_train, y_test):
mod = BernoulliRBM(random_state=0, verbose=True)
mod.fit(X_train, y_train)
print "Done training"
bernoulli_labels = mod.predict(X_test)
print "Done testing"
bernoulli_score = mod.score(X_test, y_test)
return bernoulli_score, bernoulli_labels
开发者ID:maniarathi,项目名称:takethislifedata,代码行数:8,代码来源:linclassifer.py
示例10: test_rbm_verbose
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
开发者ID:aniryou,项目名称:scikit-learn,代码行数:8,代码来源:test_rbm.py
示例11: runRBM
def runRBM(arr, clsfr):#iters, lrn_rate, logistic_c_val, logistic_c_val2, n_comp, filename):
global file_dir, nEvents, solutionFile
iters = int(arr[0]*10)
lrn_rate = arr[1]
logistic_c_val = arr[2]*1000.0
logistic_c_val2 = arr[3]*100.0
n_comp = int(arr[4]*100)
filename = 'rbm_iter'+str(iters)+'_logc'+str(log_c_val)+'_logcc'+str(log_c_val2)+'_lrn'+str(learn_rate)+'_nc'+str(n_comp)# low
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
###############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = lrn_rate #0.10#0.06
rbm.n_iter = iters #20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = n_comp # 250
logistic.C = logistic_c_val #6000.0
# Training RBM-Logistic Pipeline
classifier.fit(sigtr[train_input].values, sigtr['Label'].values)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=logistic_c_val2)#100.0
logistic_classifier.fit(sigtr[train_input].values, sigtr['Label'].values)
###############################################################################
# Evaluation
if clsfr == 0:
clsnn_pred=classifier.predict(sigtest[train_input].values)
solnFile('clsnn_'+filename,clsnn_pred,sigtest['EventId'].values)#,bkgtest)
ams_score = ams.AMS_metric(solutionFile, file_dir+filename+'.out', nEvents)
print ams_score
logfile.write(filename+': ' + str(ams_score)+'\n')
elif clsfr == 1:
log_cls_pred = logistic_classifier.predict(sigtest[train_input].values)
solnFile('lognn_'+filename,log_cls_pred,sigtest['EventId'].values)#,bkgtest)
ams_score = ams.AMS_metric(solutionFile, file_dir+'lognn_'+filename+'.out', nEvents)
print ams_score
logfile.write('lognn ' + filename+': ' + str(ams_score)+'\n')
else:
logistic_classifier_tx = linear_model.LogisticRegression(C=logistic_c_val2)
logistic_classifier_tx.fit_transform(sigtr[train_input].values, sigtr['Label'].values)
log_cls_tx_pred = logistic_classifier_tx.predict(sigtest[train_input].values)
solnFile('lognntx_'+filename,log_cls_tx_pred,sigtest['EventId'].values)#,bkgtest)
ams_score = ams.AMS_metric(solutionFile, file_dir+filename+'.out', nEvents)
print ams_score
logfile.write('lognntx '+ filename+': ' + str(ams_score)+'\n')
return -1.0*float(ams_score)
开发者ID:tibristo,项目名称:htautau,代码行数:58,代码来源:runAnalysis.py
示例12: test_gibbs_smoke
def test_gibbs_smoke():
""" just seek if we don't get NaNs sampling the full digits dataset """
rng = np.random.RandomState(42)
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=10,
n_iter=20, random_state=rng)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
开发者ID:Ashatz,项目名称:scikit-learn,代码行数:9,代码来源:test_rbm.py
示例13: test_transform
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5, n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
开发者ID:amitmse,项目名称:scikit-learn,代码行数:9,代码来源:test_rbm.py
示例14: rbm_dbn_train_and_predict
def rbm_dbn_train_and_predict(train_set_x,train_set_y,test_set_x,test_set_y):
dbn = DBN(epochs=200,learn_rates=0.01)
rbm = BernoulliRBM(random_state=0, verbose=True)
rbm.learning_rate = 0.06
rbm.n_iter = 20
rbm.n_components = 100
classifier = Pipeline(steps=[('rbm', rbm), ('dbn', dbn)])
classifier.fit(train_set_x,train_set_y)
PRED = classifier.predict(test_set_x)
return PRED
开发者ID:giesekow,项目名称:giles-aims-thesis,代码行数:10,代码来源:scikitclassifiers.py
示例15: test_gibbs_smoke
def test_gibbs_smoke():
"""Check if we don't get NaNs sampling the full digits dataset.
Also check that sampling again will yield different results."""
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40, n_iter=20, random_state=42)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
X_sampled2 = rbm1.gibbs(X)
assert_true(np.all((X_sampled != X_sampled2).max(axis=1)))
开发者ID:amitmse,项目名称:scikit-learn,代码行数:10,代码来源:test_rbm.py
示例16: rbm_logistic_train_and_predict
def rbm_logistic_train_and_predict(train_set_x,train_set_y,test_set_x,test_set_y):
logistic = linear_model.LogisticRegression(C=6000)
rbm = BernoulliRBM(random_state=0, verbose=True)
rbm.learning_rate = 0.06
rbm.n_iter = 20
rbm.n_components = 100
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
classifier.fit(train_set_x,train_set_y)
PRED = classifier.predict(test_set_x)
return PRED
开发者ID:giesekow,项目名称:giles-aims-thesis,代码行数:10,代码来源:scikitclassifiers.py
示例17: test_fit
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1, batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21.0, decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
开发者ID:amitmse,项目名称:scikit-learn,代码行数:10,代码来源:test_rbm.py
示例18: rbm_knn_train_and_predict
def rbm_knn_train_and_predict(train_set_x,train_set_y,test_set_x,test_set_y):
knn = KNeighborsClassifier(n_neighbors=5)
rbm = BernoulliRBM(random_state=0, verbose=True)
rbm.learning_rate = 0.06
rbm.n_iter = 20
rbm.n_components = 100
classifier = Pipeline(steps=[('rbm', rbm), ('knn', knn)])
classifier.fit(train_set_x,train_set_y)
PRED = classifier.predict(test_set_x)
return PRED
开发者ID:giesekow,项目名称:giles-aims-thesis,代码行数:10,代码来源:scikitclassifiers.py
示例19: test_sample_hiddens
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5, n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
开发者ID:amitmse,项目名称:scikit-learn,代码行数:10,代码来源:test_rbm.py
示例20: __init__
class DeepRbmMnistClassifier:
def __init__(self):
self.n_components_first = 500
self.n_components_second = 500
self.n_components_third = 2000
self.n_iter_first = 20
self.n_iter_second = 20
self.n_iter_third = 20
self.learning_rate_first = 0.06
self.learning_rate_second = 0.06
self.learning_rate_third = 0.06
self.verbose = True
def label_to_feature(self,y):
feature = [0]*10
feature[y] = 1
return feature
def fit(self,X,y):
self.rbm_1 = BernoulliRBM(verbose=self.verbose,
n_components=self.n_components_first,
n_iter=self.n_iter_first,
learning_rate=self.learning_rate_first)
self.rbm_2 = BernoulliRBM(verbose=self.verbose,
n_components=self.n_components_second,
n_iter=self.n_iter_second,
learning_rate=self.learning_rate_second)
self.first_pipeline = Pipeline(steps=[('rbm_1',self.rbm_1), ('rbm_2',self.rbm_2)])
self.first_pipeline.fit(X,y)
# TODO improve. Look at how it is done in classify
new_features = []
for example,label in zip(X,y):
transformed = self.first_pipeline.transform(example)[0]
new_features.append(np.concatenate((transformed,self.label_to_feature(label))))
self.rbm_3 = BernoulliRBM(verbose=self.verbose,
n_components=self.n_components_third,
n_iter=self.n_iter_third,
learning_rate=self.learning_rate_third)
self.rbm_3.fit(new_features,y)
def classify(self,X):
transformed = self.first_pipeline.transform(X)
transformed = np.concatenate((transformed,[[0]*10]*len(transformed)),axis=1)
# The inverse of rbm_3 to go from hidden layer to visible layer
rbm_aux = BernoulliRBM()
rbm_aux.intercept_hidden_ = self.rbm_3.intercept_visible_
rbm_aux.intercept_visible_ = self.rbm_3.intercept_hidden_
rbm_aux.components_ = np.transpose(self.rbm_3.components_)
results = rbm_aux.transform(self.rbm_3.transform(transformed))
results = results[:,-10:]
return np.argmax(results,axis=1)
开发者ID:costapt,项目名称:kaggle_digit_recognizer,代码行数:55,代码来源:deep_rbm.py
注:本文中的sklearn.neural_network.BernoulliRBM类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论