本文整理汇总了Python中shogun.Kernel.CombinedKernel类的典型用法代码示例。如果您正苦于以下问题:Python CombinedKernel类的具体用法?Python CombinedKernel怎么用?Python CombinedKernel使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了CombinedKernel类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: evaluation_cross_validation_multiclass_storage
def evaluation_cross_validation_multiclass_storage(traindat=traindat, label_traindat=label_traindat):
from shogun.Evaluation import CrossValidation, CrossValidationResult
from shogun.Evaluation import CrossValidationPrintOutput
from shogun.Evaluation import CrossValidationMKLStorage, CrossValidationMulticlassStorage
from shogun.Evaluation import MulticlassAccuracy, F1Measure
from shogun.Evaluation import StratifiedCrossValidationSplitting
from shogun.Features import MulticlassLabels
from shogun.Features import RealFeatures, CombinedFeatures
from shogun.Kernel import GaussianKernel, CombinedKernel
from shogun.Classifier import MKLMulticlass
from shogun.Mathematics import Statistics, MSG_DEBUG
# training data, combined features all on same data
features=RealFeatures(traindat)
comb_features=CombinedFeatures()
comb_features.append_feature_obj(features)
comb_features.append_feature_obj(features)
comb_features.append_feature_obj(features)
labels=MulticlassLabels(label_traindat)
# kernel, different Gaussians combined
kernel=CombinedKernel()
kernel.append_kernel(GaussianKernel(10, 0.1))
kernel.append_kernel(GaussianKernel(10, 1))
kernel.append_kernel(GaussianKernel(10, 2))
# create mkl using libsvm, due to a mem-bug, interleaved is not possible
svm=MKLMulticlass(1.0,kernel,labels);
svm.set_kernel(kernel);
# splitting strategy for 5 fold cross-validation (for classification its better
# to use "StratifiedCrossValidation", but the standard
# "StratifiedCrossValidationSplitting" is also available
splitting_strategy=StratifiedCrossValidationSplitting(labels, 5)
# evaluation method
evaluation_criterium=MulticlassAccuracy()
# cross-validation instance
cross_validation=CrossValidation(svm, comb_features, labels,
splitting_strategy, evaluation_criterium)
cross_validation.set_autolock(False)
# append cross vlaidation output classes
#cross_validation.add_cross_validation_output(CrossValidationPrintOutput())
#mkl_storage=CrossValidationMKLStorage()
#cross_validation.add_cross_validation_output(mkl_storage)
multiclass_storage=CrossValidationMulticlassStorage()
multiclass_storage.append_binary_evaluation(F1Measure())
cross_validation.add_cross_validation_output(multiclass_storage)
cross_validation.set_num_runs(3)
# perform cross-validation
result=cross_validation.evaluate()
roc_0_0_0 = multiclass_storage.get_fold_ROC(0,0,0)
#print roc_0_0_0
auc_0_0_0 = multiclass_storage.get_fold_evaluation_result(0,0,0,0)
#print auc_0_0_0
return roc_0_0_0, auc_0_0_0
开发者ID:lgatto,项目名称:shogun,代码行数:60,代码来源:evaluation_cross_validation_multiclass_storage.py
示例2: evaluation_cross_validation_mkl_weight_storage
def evaluation_cross_validation_mkl_weight_storage(traindat=traindat, label_traindat=label_traindat):
from shogun.Evaluation import CrossValidation, CrossValidationResult
from shogun.Evaluation import CrossValidationPrintOutput
from shogun.Evaluation import CrossValidationMKLStorage
from shogun.Evaluation import ContingencyTableEvaluation, ACCURACY
from shogun.Evaluation import StratifiedCrossValidationSplitting
from shogun.Features import BinaryLabels
from shogun.Features import RealFeatures, CombinedFeatures
from shogun.Kernel import GaussianKernel, CombinedKernel
from shogun.Classifier import LibSVM, MKLClassification
from shogun.Mathematics import Statistics
# training data, combined features all on same data
features=RealFeatures(traindat)
comb_features=CombinedFeatures()
comb_features.append_feature_obj(features)
comb_features.append_feature_obj(features)
comb_features.append_feature_obj(features)
labels=BinaryLabels(label_traindat)
# kernel, different Gaussians combined
kernel=CombinedKernel()
kernel.append_kernel(GaussianKernel(10, 0.1))
kernel.append_kernel(GaussianKernel(10, 1))
kernel.append_kernel(GaussianKernel(10, 2))
# create mkl using libsvm, due to a mem-bug, interleaved is not possible
svm=MKLClassification(LibSVM());
svm.set_interleaved_optimization_enabled(False);
svm.set_kernel(kernel);
# splitting strategy for 5 fold cross-validation (for classification its better
# to use "StratifiedCrossValidation", but the standard
# "StratifiedCrossValidationSplitting" is also available
splitting_strategy=StratifiedCrossValidationSplitting(labels, 5)
# evaluation method
evaluation_criterium=ContingencyTableEvaluation(ACCURACY)
# cross-validation instance
cross_validation=CrossValidation(svm, comb_features, labels,
splitting_strategy, evaluation_criterium)
cross_validation.set_autolock(False)
# append cross vlaidation output classes
#cross_validation.add_cross_validation_output(CrossValidationPrintOutput())
mkl_storage=CrossValidationMKLStorage()
cross_validation.add_cross_validation_output(mkl_storage)
cross_validation.set_num_runs(3)
# perform cross-validation
result=cross_validation.evaluate()
# print mkl weights
weights=mkl_storage.get_mkl_weights()
开发者ID:Argram,项目名称:shogun,代码行数:55,代码来源:evaluation_cross_validation_mkl_weight_storage.py
示例3: statistics_linear_time_mmd_kernel_choice
def statistics_linear_time_mmd_kernel_choice():
from shogun.Features import RealFeatures, CombinedFeatures
from shogun.Kernel import GaussianKernel, CombinedKernel
from shogun.Statistics import LinearTimeMMD
from shogun.Statistics import BOOTSTRAP, MMD1_GAUSSIAN
# note that the linear time statistic is designed for much larger datasets
n=50000
dim=5
difference=2
# data is standard normal distributed. only one dimension of Y has a mean
# shift of difference
(X,Y)=gen_data.create_mean_data(n,dim,difference)
# concatenate since MMD class takes data as one feature object
# (it is possible to give two, but then data is copied)
Z=concatenate((X,Y), axis=1)
print "dimension means of X", [mean(x) for x in X]
print "dimension means of Y", [mean(x) for x in Y]
# create kernels/features to choose from
# here: just a bunch of Gaussian Kernels with different widths
# real sigmas are 2^-5, ..., 2^10
sigmas=array([pow(2,x) for x in range(-5,10)])
# shogun has a different parametrization of the Gaussian kernel
shogun_sigmas=array([x*x*2 for x in sigmas])
# We will use multiple kernels
kernel=CombinedKernel()
# two separate feature objects here, could also be one with appended data
features=CombinedFeatures()
# all kernels work on same features
for i in range(len(sigmas)):
kernel.append_kernel(GaussianKernel(10, shogun_sigmas[i]))
features.append_feature_obj(RealFeatures(Z))
mmd=LinearTimeMMD(kernel,features, n)
print "start learning kernel weights"
mmd.set_opt_regularization_eps(10E-5)
mmd.set_opt_low_cut(10E-5)
mmd.set_opt_max_iterations(1000)
mmd.set_opt_epsilon(10E-7)
mmd.optimize_kernel_weights()
weights=kernel.get_subkernel_weights()
print "learned weights:", weights
#pyplot.plot(array(range(len(sigmas))), weights)
#pyplot.show()
print "index of max weight", weights.argmax()
开发者ID:jimloco,项目名称:shogun,代码行数:53,代码来源:statistics_linear_time_mmd_kernel_choice.py
示例4: statistics_linear_time_mmd_kernel_choice
def statistics_linear_time_mmd_kernel_choice():
from shogun.Features import RealFeatures, CombinedFeatures
from shogun.Features import DataGenerator
from shogun.Kernel import GaussianKernel, CombinedKernel
from shogun.Statistics import LinearTimeMMD
from shogun.Statistics import BOOTSTRAP, MMD1_GAUSSIAN
# note that the linear time statistic is designed for much larger datasets
n=50000
dim=5
difference=2
# use data generator class to produce example data
# in pratice, this generate data function could be replaced by a method
# that obtains data from a stream
data=DataGenerator.generate_mean_data(n,dim,difference)
print "dimension means of X", mean(data.T[0:n].T)
print "dimension means of Y", mean(data.T[n:2*n+1].T)
# create kernels/features to choose from
# here: just a bunch of Gaussian Kernels with different widths
# real sigmas are 2^-5, ..., 2^10
sigmas=array([pow(2,x) for x in range(-5,10)])
# shogun has a different parametrization of the Gaussian kernel
shogun_sigmas=array([x*x*2 for x in sigmas])
# We will use multiple kernels
kernel=CombinedKernel()
# two separate feature objects here, could also be one with appended data
features=CombinedFeatures()
# all kernels work on same features
for i in range(len(sigmas)):
kernel.append_kernel(GaussianKernel(10, shogun_sigmas[i]))
features.append_feature_obj(RealFeatures(data))
mmd=LinearTimeMMD(kernel,features, n)
print "start learning kernel weights"
mmd.set_opt_regularization_eps(10E-5)
mmd.set_opt_low_cut(10E-5)
mmd.set_opt_max_iterations(1000)
mmd.set_opt_epsilon(10E-7)
mmd.optimize_kernel_weights()
weights=kernel.get_subkernel_weights()
print "learned weights:", weights
#pyplot.plot(array(range(len(sigmas))), weights)
#pyplot.show()
print "index of max weight", weights.argmax()
开发者ID:coodoing,项目名称:shogun,代码行数:52,代码来源:statistics_linear_time_mmd_kernel_choice.py
示例5: training_run
def training_run(options):
"""Conduct a training run and return a trained SVM kernel"""
settings = MotifFinderSettings(kirmes_ini.MOTIF_LENGTH, options.window_width, options.replace)
positives = MotifFinder(finder_settings=settings)
positives.setFastaFile(options.positives)
positives.setMotifs(options.pgff)
pmotifs, ppositions = positives.getResults()
negatives = MotifFinder(finder_settings=settings)
negatives.setFastaFile(options.negatives)
negatives.setMotifs(options.ngff)
nmotifs, npositions = negatives.getResults()
wds_kparams = kirmes_ini.WDS_KERNEL_PARAMETERS
wds_svm = EasySVM.EasySVM(wds_kparams)
num_positives = len(pmotifs.values()[0])
num_negatives = len(nmotifs.values()[0])
# Creating Kernel Objects
kernel = CombinedKernel()
features = CombinedFeatures()
kernel_array = []
motifs = pmotifs.keys()
motifs.sort()
# Adding Kmer Kernels
for motif in motifs:
all_examples = pmotifs[motif] + nmotifs[motif]
motif_features = wds_svm.createFeatures(all_examples)
wds_kernel = WeightedDegreePositionStringKernel(motif_features, motif_features, wds_kparams["degree"])
wds_kernel.set_shifts(wds_kparams["shift"] * ones(wds_kparams["seqlength"], dtype=int32))
features.append_feature_obj(motif_features)
kernel_array.append(wds_kernel)
kernel.append_kernel(wds_kernel)
rbf_svm = EasySVM.EasySVM(kirmes_ini.RBF_KERNEL_PARAMETERS)
positions = array(ppositions + npositions, dtype=float64).T
position_features = rbf_svm.createFeatures(positions)
features.append_feature_obj(position_features)
motif_labels = append(ones(num_positives), -ones(num_negatives))
complete_labels = Labels(motif_labels)
rbf_kernel = GaussianKernel(position_features, position_features, kirmes_ini.RBF_KERNEL_PARAMETERS["width"])
kernel_array.append(rbf_kernel)
kernel.append_kernel(rbf_kernel)
# Kernel init
kernel.init(features, features)
kernel.set_cache_size(kirmes_ini.K_CACHE_SIZE)
svm = LibSVM(kirmes_ini.K_COMBINED_C, kernel, complete_labels)
svm.parallel.set_num_threads(kirmes_ini.K_NUM_THREADS)
# Training
svm.train()
if not os.path.exists(options.output_path):
os.mkdir(options.output_path)
html = {}
if options.contrib:
html["contrib"] = contrib(svm, kernel, motif_labels, kernel_array, motifs)
if options.logos:
html["poims"] = poims(svm, kernel, kernel_array, motifs, options.output_path)
if options.query:
html["query"] = evaluate(options, svm, kernel, features, motifs)
htmlize(html, options.output_html)
开发者ID:veniciusgrjr,项目名称:oqtans_tools,代码行数:57,代码来源:kirmes.py
示例6: get_weighted_spectrum_kernel
def get_weighted_spectrum_kernel(subfeats_list, options):
"""build weighted spectrum kernel with non-redundant k-mer list (removing reverse complement)
Arguments:
subfeats_list -- list of sub-feature objects
options -- object containing option data
Return:
CombinedFeatures of StringWord(Ulong)Features, CombinedKernel of CommWord(Ulong)StringKernel
"""
kmerlen = options.kmerlen
kmerlen2 = options.kmerlen2
subkernels = 0
kernel = CombinedKernel()
feats = CombinedFeatures()
for subfeats in subfeats_list:
feats.append_feature_obj(subfeats)
for k in xrange(kmerlen, kmerlen2+1):
if k <= 8:
subkernel = CommWordStringKernel(10, False)
else:
subkernel = CommUlongStringKernel(10, False)
kernel.append_kernel(subkernel)
subkernels+=1
kernel.init(feats, feats)
kernel.set_subkernel_weights(numpy.array([1/float(subkernels)]*subkernels, numpy.dtype('float64')))
return kernel
开发者ID:aleasoni,项目名称:Summer-Research-2013,代码行数:34,代码来源:kmersvm_train.py
示例7: create_empty_promoter_kernel
def create_empty_promoter_kernel(param):
"""
creates an uninitialized promoter kernel
@param param:
"""
# centered WDK/WDK-shift
if param["shifts"] == 0:
kernel_center = WeightedDegreeStringKernel(param["degree"])
else:
kernel_center = WeightedDegreePositionStringKernel(10, param["degree"])
shifts_vector = numpy.ones(param["center_offset"]*2, dtype=numpy.int32)*param["shifts"]
kernel_center.set_shifts(shifts_vector)
kernel_center.set_cache_size(param["kernel_cache"]/3)
# border spetrum kernels
size = param["kernel_cache"]/3
use_sign = False
kernel_left = WeightedCommWordStringKernel(size, use_sign)
kernel_right = WeightedCommWordStringKernel(size, use_sign)
# assemble combined kernel
kernel = CombinedKernel()
kernel.append_kernel(kernel_center)
kernel.append_kernel(kernel_left)
kernel.append_kernel(kernel_right)
return kernel
开发者ID:cwidmer,项目名称:multitask,代码行数:32,代码来源:shogun_factory_new.py
示例8: create_combined_kernel
def create_combined_kernel(kname, kparam, examples, train_mode, preproc):
"""A wrapper for creating combined kernels.
kname, kparam and examples are lists.
"""
num_kernels = len(kname)
feats['combined'] = CombinedFeatures()
kernel = CombinedKernel()
for kix in xrange(num_kernels):
cur_kname = '%s%d' % (kname[kix],kix)
(cur_feats, cur_preproc) = create_features(kname[kix], examples[kix], kparam[kix], train_mode, preproc)
feats[cur_kname] = cur_feats
cur_kernel = create_kernel(kname[kix], kparam[kix], cur_feats)
kernel.append_kernel(cur_kernel)
return (feats,kernel)
开发者ID:axitkhurana,项目名称:shogun,代码行数:18,代码来源:experiment.py
示例9: train
def train(self, data, labels):
"""
model training
"""
# centered WDK/WDK-shift
if self.param["shifts"] == 0:
kernel_center = WeightedDegreeStringKernel(self.param["degree"])
else:
kernel_center = WeightedDegreePositionStringKernel(10, self.param["degree"])
shifts_vector = numpy.ones(self.param["center_offset"]*2, dtype=numpy.int32)*self.param["shifts"]
kernel_center.set_shifts(shifts_vector)
kernel_center.set_cache_size(self.param["kernel_cache"]/3)
# border spetrum kernels
size = self.param["kernel_cache"]/3
use_sign = False
kernel_left = WeightedCommWordStringKernel(size, use_sign)
kernel_right = WeightedCommWordStringKernel(size, use_sign)
# assemble combined kernel
kernel = CombinedKernel()
kernel.append_kernel(kernel_center)
kernel.append_kernel(kernel_left)
kernel.append_kernel(kernel_right)
## building features
feat = create_features(data, self.param["center_offset"], self.param["center_pos"])
# init combined kernel
kernel.init(feat, feat)
print "len(labels) = %i" % (len(labels))
lab = BinaryLabels(numpy.double(labels))
self.svm = SVMLight(self.param["cost"], kernel, lab)
# show debugging output
self.svm.io.enable_progress()
self.svm.io.set_loglevel(MSG_DEBUG)
# optimization settings
num_threads = 2
self.svm.parallel.set_num_threads(num_threads)
self.svm.set_epsilon(10e-8)
self.svm.train()
return self
开发者ID:kuod,项目名称:genomeutils,代码行数:49,代码来源:promoter_kernel.py
示例10: loadSVM
def loadSVM(pickled_svm_filename, C, labels):
"""Loads a Shogun SVM object which was pickled by saveSVM"""
from cPickle import Unpickler, PickleError
from shogun.Kernel import CombinedKernel
pickle_file = open(pickled_svm_filename, 'rb')
unpck = Unpickler(pickle_file)
(version, num_sv, name, bias, alphas, svs) = unpck.load()
if (version == __version__):
svm = LibSVM(num_sv) # same as .create_new_model(num_sv)
svm.set_bias(bias)
svm.set_alphas(alphas)
svm.set_support_vectors(svs)
kernel = CombinedKernel() #not sure if this is even required
kernel.set_name(name) # maybe not required
svm.set_kernel(kernel)
else:
print "File was pickled by another version of EasySVM.py or is not a kernel:"
print "Received from ", pickled_svm_filename, ": ", version, " expected: ", __version__
raise PickleError
return svm
开发者ID:boya888,项目名称:oqtans_tools,代码行数:20,代码来源:EasySVM.py
示例11: create_combined_wd_kernel
def create_combined_wd_kernel(instances, param):
"""
creates a combined wd kernel object
"""
num_features = len(instances[0])
# contruct combined features
kernel = CombinedKernel()
for idx in range(num_features):
param.kernel = "WeightedDegreeStringKernel"
tmp_kernel = create_empty_kernel(param)
kernel.append_kernel(tmp_kernel)
combined_features = create_combined_wd_features(instances, feat_type="dna")
kernel.init(combined_features, combined_features)
return kernel
开发者ID:cwidmer,项目名称:multitask,代码行数:22,代码来源:shogun_factory_new.py
示例12: mkl_binclass_modular
def mkl_binclass_modular(fm_train_real=traindat,
fm_test_real=testdat,
fm_label_twoclass=label_traindat):
sc1 = StringCharFeatures(strings1, RAWBYTE)
sc2 = StringCharFeatures(strings2, RAWBYTE)
sfeats1 = StringWordFeatures(RAWBYTE)
sfeats1.obtain_from_char(sc1, 0, 2, 0, False)
skernel1 = CommWordStringKernel(10, False)
skernel1.init(sfeats1, sfeats1)
sfeats2 = StringWordFeatures(RAWBYTE)
sfeats2.obtain_from_char(sc2, 0, 2, 0, False)
skernel2 = CommWordStringKernel(10, False)
skernel2.init(sfeats2, sfeats2)
ffeats = RealFeatures(traindat)
fkernel = LinearKernel(ffeats, ffeats)
fffeats = RealFeatures(traindat)
ffkernel = GaussianKernel(fffeats, fffeats, 1.0)
# COMBINING LINADD FEATURES/KERNELS LEAD TO FAIL
feats_train = CombinedFeatures()
feats_train.append_feature_obj(ffeats)
#feats_train.append_feature_obj(fffeats)
feats_train.append_feature_obj(sfeats2)
print feats_train.get_num_vectors()
kernel = CombinedKernel()
kernel.append_kernel(fkernel)
#kernel.append_kernel(ffkernel)
kernel.append_kernel(skernel2)
kernel.init(feats_train, feats_train)
labels = RegressionLabels(fm_label_twoclass)
mkl = MKLRegression()
mkl.set_mkl_norm(1) #2,3
mkl.set_C(1, 1)
mkl.set_kernel(kernel)
mkl.set_labels(labels)
mkl.io.enable_file_and_line()
mkl.io.set_loglevel(MSG_DEBUG)
开发者ID:aeweiwi,项目名称:shogun,代码行数:46,代码来源:mkl_linadd_regression.py
示例13: CombinedFeatures
feats_train.append_feature_obj(RealFeatures(traindata_real))
feats_train.append_feature_obj(RealFeatures(traindata_real))
feats_train.append_feature_obj(RealFeatures(traindata_real))
feats_train.append_feature_obj(RealFeatures(traindata_real))
feats_test = CombinedFeatures()
feats_test.append_feature_obj(RealFeatures(testdata_real))
feats_test.append_feature_obj(RealFeatures(testdata_real))
feats_test.append_feature_obj(RealFeatures(testdata_real))
feats_test.append_feature_obj(RealFeatures(testdata_real))
feats_test.append_feature_obj(RealFeatures(testdata_real))
labels = BinaryLabels(trainlab)
# and corresponding combined kernel
kernel = CombinedKernel()
kernel.append_kernel(GaussianKernel(10, s))
kernel.append_kernel(GaussianKernel(10, s))
kernel.append_kernel(GaussianKernel(10, s))
kernel.append_kernel(GaussianKernel(10, s))
kernel.append_kernel(GaussianKernel(10, s))
kernel.init(feats_train, feats_train)
# Create a classifier
classifier=MKLClassification(LibSVM())
classifier.set_interleaved_optimization_enabled(False)
classifier.set_kernel(kernel)
classifier.set_labels(labels)
classifier.set_C(C, C)
param_tree_root=ModelSelectionParameters()
开发者ID:nickponline,项目名称:mkl,代码行数:31,代码来源:mklms.py
示例14: kernel_combined_custom_poly_modular
def kernel_combined_custom_poly_modular(fm_train_real = traindat,fm_test_real = testdat,fm_label_twoclass=label_traindat):
from shogun.Features import CombinedFeatures, RealFeatures, BinaryLabels
from shogun.Kernel import CombinedKernel, PolyKernel, CustomKernel
from shogun.Classifier import LibSVM
kernel = CombinedKernel()
feats_train = CombinedFeatures()
tfeats = RealFeatures(fm_train_real)
tkernel = PolyKernel(10,3)
tkernel.init(tfeats, tfeats)
K = tkernel.get_kernel_matrix()
kernel.append_kernel(CustomKernel(K))
subkfeats_train = RealFeatures(fm_train_real)
feats_train.append_feature_obj(subkfeats_train)
subkernel = PolyKernel(10,2)
kernel.append_kernel(subkernel)
kernel.init(feats_train, feats_train)
labels = BinaryLabels(fm_label_twoclass)
svm = LibSVM(1.0, kernel, labels)
svm.train()
kernel = CombinedKernel()
feats_pred = CombinedFeatures()
pfeats = RealFeatures(fm_test_real)
tkernel = PolyKernel(10,3)
tkernel.init(tfeats, pfeats)
K = tkernel.get_kernel_matrix()
kernel.append_kernel(CustomKernel(K))
subkfeats_test = RealFeatures(fm_test_real)
feats_pred.append_feature_obj(subkfeats_test)
subkernel = PolyKernel(10, 2)
kernel.append_kernel(subkernel)
kernel.init(feats_train, feats_pred)
svm.set_kernel(kernel)
svm.apply()
km_train=kernel.get_kernel_matrix()
return km_train,kernel
开发者ID:behollis,项目名称:muViewBranch,代码行数:44,代码来源:kernel_combined_custom_poly_modular.py
示例15: mkl_multiclass_modular
def mkl_multiclass_modular(fm_train_real, fm_test_real, label_train_multiclass,
width, C, epsilon, num_threads, mkl_epsilon, mkl_norm):
from shogun.Features import CombinedFeatures, RealFeatures, Labels
from shogun.Kernel import CombinedKernel, GaussianKernel, LinearKernel,PolyKernel
from shogun.Classifier import MKLMultiClass
kernel = CombinedKernel()
feats_train = CombinedFeatures()
feats_test = CombinedFeatures()
subkfeats_train = RealFeatures(fm_train_real)
subkfeats_test = RealFeatures(fm_test_real)
subkernel = GaussianKernel(10, width)
feats_train.append_feature_obj(subkfeats_train)
feats_test.append_feature_obj(subkfeats_test)
kernel.append_kernel(subkernel)
subkfeats_train = RealFeatures(fm_train_real)
subkfeats_test = RealFeatures(fm_test_real)
subkernel = LinearKernel()
feats_train.append_feature_obj(subkfeats_train)
feats_test.append_feature_obj(subkfeats_test)
kernel.append_kernel(subkernel)
subkfeats_train = RealFeatures(fm_train_real)
subkfeats_test = RealFeatures(fm_test_real)
subkernel = PolyKernel(10,2)
feats_train.append_feature_obj(subkfeats_train)
feats_test.append_feature_obj(subkfeats_test)
kernel.append_kernel(subkernel)
kernel.init(feats_train, feats_train)
labels = Labels(label_train_multiclass)
mkl = MKLMultiClass(C, kernel, labels)
mkl.set_epsilon(epsilon);
mkl.parallel.set_num_threads(num_threads)
mkl.set_mkl_epsilon(mkl_epsilon)
mkl.set_mkl_norm(mkl_norm)
mkl.train()
kernel.init(feats_train, feats_test)
out = mkl.classify().get_labels()
return out
开发者ID:AsherBond,项目名称:shogun,代码行数:49,代码来源:mkl_multiclass_modular.py
示例16: CombinedFeatures
feats_train = CombinedFeatures()
feats_train.append_feature_obj(RealFeatures(data_1))
feats_train.append_feature_obj(RealFeatures(data_2))
feats_train.append_feature_obj(RealFeatures(data_3))
#feats_test = CombinedFeatures()
#feats_test.append_feature_obj(RealFeatures(testdata_real))
#feats_test.append_feature_obj(RealFeatures(testdata_real))
#feats_test.append_feature_obj(RealFeatures(testdata_real))
#feats_test.append_feature_obj(RealFeatures(testdata_real))
#feats_test.append_feature_obj(RealFeatures(testdata_real))
labels = BinaryLabels(trainlab)
# and corresponding combined kernel
kernel = CombinedKernel()
kernel.append_kernel(LinearKernel())
kernel.append_kernel(LinearKernel())
kernel.append_kernel(LinearKernel())
kernel.init(feats_train, feats_train)
kernel.print_modsel_params()
# Create a classifier
classifier=MKLClassification(LibSVM())
classifier.set_interleaved_optimization_enabled(False)
classifier.set_kernel(kernel)
classifier.set_labels(labels)
classifier.set_C(2, 1)
param_tree_root=ModelSelectionParameters()
开发者ID:nickponline,项目名称:mkl,代码行数:30,代码来源:irimklms-500.py
示例17: CombinedFeatures
feats_train.append_feature_obj(RealFeatures(data_2))
feats_train.append_feature_obj(RealFeatures(data_3))
feats_train.append_feature_obj(RealFeatures(data_4))
feats_train.append_feature_obj(RealFeatures(data_5))
#feats_test = CombinedFeatures()
#feats_test.append_feature_obj(RealFeatures(testdata_real))
#feats_test.append_feature_obj(RealFeatures(testdata_real))
#feats_test.append_feature_obj(RealFeatures(testdata_real))
#feats_test.append_feature_obj(RealFeatures(testdata_real))
#feats_test.append_feature_obj(RealFeatures(testdata_real))
labels = Labels(trainlab)
# and corresponding combined kernel
kernel = CombinedKernel()
kernel.append_kernel(GaussianKernel(10, 2.0))
kernel.append_kernel(GaussianKernel(10, 0.25))
kernel.append_kernel(GaussianKernel(10, 0.062))
kernel.append_kernel(GaussianKernel(10, 8.0))
kernel.append_kernel(GaussianKernel(10, 10.0))
kernel.init(feats_train, feats_train)
# Create a classifier
classifier=MKLClassification(LibSVM())
classifier.set_interleaved_optimization_enabled(False)
classifier.set_kernel(kernel)
classifier.set_labels(labels)
classifier.set_C(1, 1)
param_tree_root=ModelSelectionParameters()
开发者ID:nickponline,项目名称:mkl,代码行数:31,代码来源:irimklms.py
示例18: mkl_binclass_modular
def mkl_binclass_modular (train_data, testdata, train_labels, test_labels, d1, d2):
# create some Gaussian train/test matrix
tfeats = RealFeatures(train_data)
tkernel = GaussianKernel(128, d1)
tkernel.init(tfeats, tfeats)
K_train = tkernel.get_kernel_matrix()
pfeats = RealFeatures(test_data)
tkernel.init(tfeats, pfeats)
K_test = tkernel.get_kernel_matrix()
# create combined train features
feats_train = CombinedFeatures()
feats_train.append_feature_obj(RealFeatures(train_data))
# and corresponding combined kernel
kernel = CombinedKernel()
kernel.append_kernel(CustomKernel(K_train))
kernel.append_kernel(GaussianKernel(128, d2))
kernel.init(feats_train, feats_train)
# train mkl
labels = Labels(train_labels)
mkl = MKLClassification()
# not to use svmlight
mkl.set_interleaved_optimization_enabled(0)
# which norm to use for MKL
mkl.set_mkl_norm(2)
# set cost (neg, pos)
mkl.set_C(1, 1)
# set kernel and labels
mkl.set_kernel(kernel)
mkl.set_labels(labels)
# train
mkl.train()
# test
# create combined test features
feats_pred = CombinedFeatures()
feats_pred.append_feature_obj(RealFeatures(test_data))
# and corresponding combined kernel
kernel = CombinedKernel()
kernel.append_kernel(CustomKernel(K_test))
kernel.append_kernel(GaussianKernel(128, d2))
kernel.init(feats_train, feats_pred)
# and classify
mkl.set_kernel(kernel)
output = mkl.apply().get_labels()
output = [1.0 if i>0 else -1.0 for i in output]
accu = len(where(output == test_labels)[0]) / float(len(output))
return accu
开发者ID:leiding326,项目名称:data-science,代码行数:58,代码来源:mkl_binclass_modular.py
示例19: _train
def _train(self, train_data, param):
"""
training procedure using training examples and labels
@param train_data: Data relevant to SVM training
@type train_data: dict<str, list<instances> >
@param param: Parameters for the training procedure
@type param: ParameterSvm
"""
# merge data sets
data = PreparedMultitaskData(train_data, shuffle=True)
# create shogun label
lab = shogun_factory.create_labels(data.labels)
########################################################
print "creating a kernel for each node:"
########################################################
# assemble combined kernel
combined_kernel = CombinedKernel()
combined_kernel.io.set_loglevel(shogun.Kernel.MSG_INFO)
base_features = shogun_factory.create_features(data.examples)
combined_features = CombinedFeatures()
##################################################
# intra-domain blocks
# intra_block_vec = PairiiVec()
#
# for task_id in data.get_task_ids():
# intra_block_vec.push_back(Pairii(task_id, task_id))
#
#
#
# # create mask-based normalizer
# normalizer = MultitaskKernelMaskPairNormalizer(data.task_vector_nums, intra_block_vec)
# kernel = shogun_factory.create_empty_kernel(param)
# kernel.set_normalizer(normalizer)
#
# # append current kernel to CombinedKernel
# combined_kernel.append_kernel(kernel)
#
# # append features
# combined_features.append_feature_obj(base_features)
#
# print "------"
#
# ##################################################
# # all blocks
#
#
# all_block_vec = PairiiVec()
#
# for task_id_1 in data.get_task_ids():
# for task_id_2 in data.get_task_ids():
# all_block_vec.push_back(Pairii(task_id_1, task_id_2))
#
#
# # create mask-based normalizer
# normalizer_all = MultitaskKernelMaskPairNormalizer(data.task_vector_nums, all_block_vec)
# kernel_all = shogun_factory.create_empty_kernel(param)
# kernel_all.set_normalizer(normalizer_all)
#
# # append current kernel to CombinedKernel
# combined_kernel.append_kernel(kernel_all)
#
# # append features
# combined_features.append_feature_obj(base_features)
##################################################
# add one kernel per similarity position
# init seq handler
pseudoseqs = SequencesHandler()
pseudoseq_length = pseudoseqs.seq_length
for pos in range(pseudoseq_length):
print "appending kernel for pos %i" % (pos)
print "nums", data.task_vector_nums
#.........这里部分代码省略.........
开发者ID:cwidmer,项目名称:multitask,代码行数:101,代码来源:method_mhc_mkl.py
示例20: kernel_combined_modular
def kernel_combined_modular(fm_train_real=traindat,fm_test_real=testdat,fm_train_dna=traindna,fm_test_dna=testdna ):
from shogun.Kernel import CombinedKernel, GaussianKernel, FixedDegreeStringKernel, LocalAlignmentStringKernel
from shogun.Features import RealFeatures, StringCharFeatures, CombinedFeatures, DNA
kernel=CombinedKernel()
feats_train=CombinedFeatures()
feats_test=CombinedFeatures()
subkfeats_train=RealFeatures(fm_train_real)
subkfeats_test=RealFeatures(fm_test_real)
subkernel=GaussianKernel(10, 1.1)
feats_train.append_feature_obj(subkfeats_train)
feats_test.append_feature_obj(subkfeats_test)
kernel.append_kernel(subkernel)
subkfeats_train=StringCharFeatures(fm_train_dna, DNA)
subkfeats_test=StringCharFeatures(fm_test_dna, DNA)
degree=3
subkernel=FixedDegreeStringKernel(10, degree)
feats_train.append_feature_obj(subkfeats_train)
feats_test.append_feature_obj(subkfeats_test)
kernel.append_kernel(subkernel)
subkfeats_train=StringCharFeatures(fm_train_dna, DNA)
subkfeats_test=StringCharFeatures(fm_test_dna, DNA)
subkernel=LocalAlignmentStringKernel(10)
feats_train.append_feature_obj(subkfeats_train)
feats_test.append_feature_obj(subkfeats_test)
kernel.append_kernel(subkernel)
kernel.init(feats_train, feats_train)
km_train=kernel.get_kernel_matrix()
kernel.init(feats_train, feats_test)
km_test=kernel.get_kernel_matrix()
return km_train,km_test,kernel
开发者ID:coodoing,项目名称:shogun,代码行数:35,代码来源:kernel_combined_modular.py
注:本文中的shogun.Kernel.CombinedKernel类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论