• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python Classifier.SVMLight类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中shogun.Classifier.SVMLight的典型用法代码示例。如果您正苦于以下问题:Python SVMLight类的具体用法?Python SVMLight怎么用?Python SVMLight使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了SVMLight类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: classifier_domainadaptationsvm_modular

def classifier_domainadaptationsvm_modular(fm_train_dna=traindna,fm_test_dna=testdna, \
                                                label_train_dna=label_traindna, \
                                               label_test_dna=label_testdna,fm_train_dna2=traindna2,fm_test_dna2=testdna2, \
                                               label_train_dna2=label_traindna2,label_test_dna2=label_testdna2,C=1,degree=3):



    
	feats_train = StringCharFeatures(fm_train_dna, DNA)
	feats_test = StringCharFeatures(fm_test_dna, DNA)
	kernel = WeightedDegreeStringKernel(feats_train, feats_train, degree)
	labels = Labels(label_train_dna)
	svm = SVMLight(C, kernel, labels)
	svm.train()
	#svm.io.set_loglevel(MSG_DEBUG)
    
	#####################################
		
	#print "obtaining DA SVM from previously trained SVM"

	feats_train2 = StringCharFeatures(fm_train_dna, DNA)
	feats_test2 = StringCharFeatures(fm_test_dna, DNA)
	kernel2 = WeightedDegreeStringKernel(feats_train, feats_train, degree)
	labels2 = Labels(label_train_dna)

	# we regularize against the previously obtained solution
	dasvm = DomainAdaptationSVM(C, kernel2, labels2, svm, 1.0)
	dasvm.train()

	out = dasvm.apply(feats_test2).get_labels()

	return out #,dasvm TODO
开发者ID:Anshul-Bansal,项目名称:gsoc,代码行数:32,代码来源:classifier_domainadaptationsvm_modular.py


示例2: classifier_svmlight_linear_term_modular

def classifier_svmlight_linear_term_modular(fm_train_dna=traindna,fm_test_dna=testdna, \
                                                label_train_dna=label_traindna,degree=3, \
                                                C=10,epsilon=1e-5,num_threads=1):
    
    from shogun.Features import StringCharFeatures, BinaryLabels, DNA
    from shogun.Kernel import WeightedDegreeStringKernel
    from shogun.Classifier import SVMLight
    
    feats_train=StringCharFeatures(DNA)
    feats_train.set_features(fm_train_dna)
    feats_test=StringCharFeatures(DNA)
    feats_test.set_features(fm_test_dna)
    
    kernel=WeightedDegreeStringKernel(feats_train, feats_train, degree)
    
    labels=BinaryLabels(label_train_dna)
    
    svm=SVMLight(C, kernel, labels)
    svm.set_qpsize(3)
    svm.set_linear_term(-numpy.array([1,2,3,4,5,6,7,8,7,6], dtype=numpy.double));
    svm.set_epsilon(epsilon)
    svm.parallel.set_num_threads(num_threads)
    svm.train()
    
    kernel.init(feats_train, feats_test)
    out = svm.apply().get_labels()
    return out,kernel
开发者ID:coodoing,项目名称:shogun,代码行数:27,代码来源:classifier_svmlight_linear_term_modular.py


示例3: svm_learn

def svm_learn(kernel, labels, options):
	"""train SVM using SVMLight or LibSVM

	Arguments:
	kernel -- kernel object from Shogun toolbox
	lebels -- list of labels
	options -- object containing option data 

	Return:
	trained svm object 
	"""

	try: 
		svm=SVMLight(options.svmC, kernel, Labels(numpy.array(labels, dtype=numpy.double)))
	except NameError:
		svm=LibSVM(options.svmC, kernel, Labels(numpy.array(labels, dtype=numpy.double)))

	if options.quiet == False:
		svm.io.set_loglevel(MSG_INFO)
		svm.io.set_target_to_stderr()

	svm.set_epsilon(options.epsilon)
	svm.parallel.set_num_threads(1)
	if options.weight != 1.0:
		svm.set_C(options.svmC, options.svmC*options.weight)
	svm.train()

	if options.quiet == False:
		svm.io.set_loglevel(MSG_ERROR)

	return svm
开发者ID:aleasoni,项目名称:Summer-Research-2013,代码行数:31,代码来源:kmersvm_train.py


示例4: ShogunPredictor

class ShogunPredictor(object):
    """
    basic single-task promoter model using string kernels
    """

    def __init__(self, degree=4, shifts=32, kernel_cache=10000, cost=1.0):
        #TODO: clean up degree
        self.degree = degree
        self.degree_wdk = degree
        self.degree_spectrum = degree
        self.shifts = shifts
        self.kernel_cache = kernel_cache
        self.cost = cost
        self.center_offset = 50
        self.center_pos = 1200
        self.epsilon = 10e-2
        self.num_threads = 4


    def train(self, data, labels):

        kernel = create_promoter_kernel(data, self.center_offset, self.center_pos, self.degree_wdk, self.degree_spectrum, self.shifts, kernel_cache=self.kernel_cache)

        print "len(labels) = %i" % (len(labels))
        lab = create_labels(labels)
        self.svm = SVMLight(self.cost, kernel, lab)

        # show debugging output
        self.svm.io.enable_progress()
        self.svm.io.set_loglevel(MSG_DEBUG)

        # optimization settings
        num_threads = self.num_threads
        self.svm.parallel.set_num_threads(num_threads)
        self.svm.set_epsilon(self.epsilon)

        self.svm.train()

        return self


    def predict(self, data):

        feat = create_promoter_features(data, self.center_offset, self.center_pos)
        out = self.svm.apply(feat).get_values()

        return out
开发者ID:kuod,项目名称:genomeutils,代码行数:47,代码来源:model.py


示例5: svm_light

def svm_light ():
	print 'SVMLight'

	from shogun.Features import StringCharFeatures, Labels, DNA
	from shogun.Kernel import WeightedDegreeStringKernel
	try:
		from shogun.Classifier import SVMLight
	except ImportError:
		print 'No support for SVMLight available.'
		return

	feats_train=StringCharFeatures(DNA)
	feats_train.set_features(fm_train_dna)
	feats_test=StringCharFeatures(DNA)
	feats_test.set_features(fm_test_dna)
	degree=20

	kernel=WeightedDegreeStringKernel(feats_train, feats_train, degree)

	C=1.2
	epsilon=1e-5
	num_threads=1
	labels=Labels(label_train_dna)

	svm=SVMLight(C, kernel, labels)
	svm.set_epsilon(epsilon)
	svm.parallel.set_num_threads(num_threads)
	svm.train()

	kernel.init(feats_train, feats_test)
	svm.classify().get_labels()
开发者ID:memimo,项目名称:shogun-liblinear,代码行数:31,代码来源:classifier_svmlight_modular.py


示例6: classifier_svmlight_modular

def classifier_svmlight_modular (fm_train_dna=traindat,fm_test_dna=testdat,label_train_dna=label_traindat,C=1.2,epsilon=1e-5,num_threads=1):
	from shogun.Features import StringCharFeatures, Labels, DNA
	from shogun.Kernel import WeightedDegreeStringKernel
	try:
		from shogun.Classifier import SVMLight
	except ImportError:
		print 'No support for SVMLight available.'
		return

	feats_train=StringCharFeatures(DNA)
	feats_train.set_features(fm_train_dna)
	feats_test=StringCharFeatures(DNA)
	feats_test.set_features(fm_test_dna)
	degree=20

	kernel=WeightedDegreeStringKernel(feats_train, feats_train, degree)

	labels=Labels(label_train_dna)

	svm=SVMLight(C, kernel, labels)
	svm.set_epsilon(epsilon)
	svm.parallel.set_num_threads(num_threads)
	svm.train()

	kernel.init(feats_train, feats_test)
	svm.apply().get_labels()
	return kernel
开发者ID:Anshul-Bansal,项目名称:gsoc,代码行数:27,代码来源:classifier_svmlight_modular.py


示例7: _train_single_svm

    def _train_single_svm(self, param, kernel, lab):
    

    
        kernel.set_cache_size(500)
        #lab = shogun_factory.create_labels(data.labels) 
        svm = SVMLight(param.cost, kernel, lab)

        # set up SVM
        num_threads = 8
        svm.io.enable_progress()
        svm.io.set_loglevel(shogun.Classifier.MSG_DEBUG)
        
        svm.parallel.set_num_threads(num_threads)
        svm.set_linadd_enabled(False)
        svm.set_batch_computation_enabled(False)
            
        # normalize cost
        #norm_c_pos = param.cost / float(len([l for l in data.labels if l==1]))
        #norm_c_neg = param.cost / float(len([l for l in data.labels if l==-1]))

        #svm.set_C(norm_c_neg, norm_c_pos)
        
        
        # start training
        svm.train()

        return svm
开发者ID:cwidmer,项目名称:multitask,代码行数:28,代码来源:method_mhc_boosting.py


示例8: train

    def train(self, data, labels):
        """
        model training 
        """

        # centered WDK/WDK-shift
        if self.param["shifts"] == 0:
            kernel_center = WeightedDegreeStringKernel(self.param["degree"])
        else:
            kernel_center = WeightedDegreePositionStringKernel(10, self.param["degree"])
            shifts_vector = numpy.ones(self.param["center_offset"]*2, dtype=numpy.int32)*self.param["shifts"]
            kernel_center.set_shifts(shifts_vector)

        kernel_center.set_cache_size(self.param["kernel_cache"]/3)

        # border spetrum kernels
        size = self.param["kernel_cache"]/3
        use_sign = False
        kernel_left = WeightedCommWordStringKernel(size, use_sign)
        kernel_right = WeightedCommWordStringKernel(size, use_sign)
        
        # assemble combined kernel
        kernel = CombinedKernel()
        kernel.append_kernel(kernel_center)
        kernel.append_kernel(kernel_left)
        kernel.append_kernel(kernel_right)

        ## building features 
        feat = create_features(data, self.param["center_offset"], self.param["center_pos"])
        
        # init combined kernel
        kernel.init(feat, feat)

        print "len(labels) = %i" % (len(labels))
        lab = BinaryLabels(numpy.double(labels))
        self.svm = SVMLight(self.param["cost"], kernel, lab)

        # show debugging output
        self.svm.io.enable_progress()
        self.svm.io.set_loglevel(MSG_DEBUG)

        # optimization settings
        num_threads = 2
        self.svm.parallel.set_num_threads(num_threads)
        self.svm.set_epsilon(10e-8)

        self.svm.train()

        return self
开发者ID:kuod,项目名称:genomeutils,代码行数:49,代码来源:promoter_kernel.py


示例9: train

    def train(self, data, labels):

        kernel = create_promoter_kernel(data, self.center_offset, self.center_pos, self.degree_wdk, self.degree_spectrum, self.shifts, kernel_cache=self.kernel_cache)

        print "len(labels) = %i" % (len(labels))
        lab = create_labels(labels)
        self.svm = SVMLight(self.cost, kernel, lab)

        # show debugging output
        self.svm.io.enable_progress()
        self.svm.io.set_loglevel(MSG_DEBUG)

        # optimization settings
        num_threads = self.num_threads
        self.svm.parallel.set_num_threads(num_threads)
        self.svm.set_epsilon(self.epsilon)

        self.svm.train()

        return self
开发者ID:kuod,项目名称:genomeutils,代码行数:20,代码来源:model.py


示例10: svm_learn

def svm_learn(kernel, labels, svmC, epsilon, weight):
	"""
	"""
	try: 
		svm=SVMLight(svmC, kernel, Labels(numpy.array(labels, dtype=numpy.double)))
	except NameError:
		print 'No support for SVMLight available.'
		return

	svm.io.set_loglevel(MSG_INFO)
	svm.io.set_target_to_stderr()

	svm.set_epsilon(epsilon)
	svm.parallel.set_num_threads(1)
	if weight != 1.0:
		svm.set_C(svmC, svmC*weight)
	svm.train()
	svm.io.set_loglevel(MSG_ERROR)

	return svm
开发者ID:aleasoni,项目名称:Summer-Research-2013,代码行数:20,代码来源:cksvmcv2.py


示例11: serialization_svmlight_modular

def serialization_svmlight_modular(num, dist, width, C):
    from shogun.IO import MSG_DEBUG
    from shogun.Features import RealFeatures, BinaryLabels, DNA, Alphabet
    from shogun.Kernel import WeightedDegreeStringKernel, GaussianKernel
    from shogun.Classifier import SVMLight
    from numpy import concatenate, ones
    from numpy.random import randn, seed

    import sys
    import types
    import random
    import bz2

    try:
        import cPickle as pickle
    except ImportError:
        import pickle as pickle
    import inspect

    def save(filename, myobj):
        """
        save object to file using pickle

        @param filename: name of destination file
        @type filename: str
        @param myobj: object to save (has to be pickleable)
        @type myobj: obj
        """

        try:
            f = bz2.BZ2File(filename, "wb")
        except IOError as details:
            sys.stderr.write("File " + filename + " cannot be written\n")
            sys.stderr.write(details)
            return

        pickle.dump(myobj, f, protocol=2)
        f.close()

    def load(filename):
        """
        Load from filename using pickle

        @param filename: name of file to load from
        @type filename: str
        """

        try:
            f = bz2.BZ2File(filename, "rb")
        except IOError as details:
            sys.stderr.write("File " + filename + " cannot be read\n")
            sys.stderr.write(details)
            return

        myobj = pickle.load(f)
        f.close()
        return myobj

    ##################################################
    # set up toy data and svm

    traindata_real = concatenate((randn(2, num) - dist, randn(2, num) + dist), axis=1)
    testdata_real = concatenate((randn(2, num) - dist, randn(2, num) + dist), axis=1)

    trainlab = concatenate((-ones(num), ones(num)))
    testlab = concatenate((-ones(num), ones(num)))

    feats_train = RealFeatures(traindata_real)
    feats_test = RealFeatures(testdata_real)
    kernel = GaussianKernel(feats_train, feats_train, width)
    # kernel.io.set_loglevel(MSG_DEBUG)

    labels = BinaryLabels(trainlab)

    svm = SVMLight(C, kernel, labels)
    svm.train()
    # svm.io.set_loglevel(MSG_DEBUG)

    ##################################################
    # serialize to file

    fn = "serialized_svm.bz2"
    # print("serializing SVM to file", fn)
    save(fn, svm)

    ##################################################
    # unserialize and sanity check

    # print("unserializing SVM")
    svm2 = load(fn)

    # print("comparing objectives")

    svm2.train()

    # print("objective before serialization:", svm.get_objective())
    # print("objective after serialization:", svm2.get_objective())

    # print("comparing predictions")

#.........这里部分代码省略.........
开发者ID:behollis,项目名称:muViewBranch,代码行数:101,代码来源:serialization_svmlight_modular.py


示例12: _train

    def _train(self, train_data, param):
        """
        training procedure using training examples and labels
        
        @param train_data: Data relevant to SVM training
        @type train_data: dict<str, list<instances> >
        @param param: Parameters for the training procedure
        @type param: ParameterSvm
        """


        assert(param.base_similarity >= 1)
        
        # merge data sets
        data = PreparedMultitaskData(train_data, shuffle=False)
        
        
        # create shogun data objects
        base_wdk = shogun_factory.create_kernel(data.examples, param)
        lab = shogun_factory.create_labels(data.labels)

        # set normalizer
        normalizer = MultitaskKernelNormalizer(data.task_vector_nums)
        
        # load data
        #f = file("/fml/ag-raetsch/home/cwidmer/Documents/phd/projects/multitask/data/mhc/MHC_Distanzen/MHC_Distanzen/ALL_PseudoSeq_BlosumEnc_pearson.txt")
        f = file("/fml/ag-raetsch/home/cwidmer/Documents/phd/projects/multitask/data/mhc/MHC_Distanzen/MHC_Distanzen/All_PseudoSeq_Hamming.txt")
        #f = file("/fml/ag-raetsch/home/cwidmer/Documents/phd/projects/multitask/data/mhc/MHC_Distanzen/MHC_Distanzen/ALL_PseudoSeq_BlosumEnc_euklid.txt")
        #f = file("/fml/ag-raetsch/home/cwidmer/Documents/phd/projects/multitask/data/mhc/MHC_Distanzen/MHC_Distanzen/ALL_RAxML.txt")
        
        num_lines = int(f.readline().strip())
        task_distances = numpy.zeros((num_lines, num_lines))
        name_to_id = {}
        for (i, line) in enumerate(f):
            tokens = line.strip().split("\t")
            name = str(tokens[0])
            name_to_id[name] = i
            entry = numpy.array([v for (j,v) in enumerate(tokens) if j!=0])
            assert len(entry)==num_lines, "len_entry %i, num_lines %i" % (len(entry), num_lines)
            task_distances[i,:] = entry
            
        
        # cut relevant submatrix
        active_ids = [name_to_id[name] for name in data.get_task_names()] 
        tmp_distances = task_distances[active_ids, :]
        tmp_distances = tmp_distances[:, active_ids]
        print "distances ", tmp_distances.shape

        
        # normalize distances
        task_distances = task_distances / numpy.max(tmp_distances)
        
        
        similarities = numpy.zeros((data.get_num_tasks(), data.get_num_tasks()))
                                
        
        # convert distance to similarity
        for task_name_lhs in data.get_task_names():
            for task_name_rhs in data.get_task_names():
                
                
                # convert similarity with simple transformation
                similarity = param.base_similarity - task_distances[name_to_id[task_name_lhs], name_to_id[task_name_rhs]]
                normalizer.set_task_similarity(data.name_to_id(task_name_lhs), data.name_to_id(task_name_rhs), similarity)
                
                # save for later
                similarities[data.name_to_id(task_name_lhs),data.name_to_id(task_name_rhs)] = similarity
                
                
        # set normalizer                
        base_wdk.set_normalizer(normalizer)
        base_wdk.init_normalizer()
        

        # set up svm
        svm = SVMLight(param.cost, base_wdk, lab)
        svm.set_linadd_enabled(False)
        svm.set_batch_computation_enabled(False)
        
        
        # normalize cost
        norm_c_pos = param.cost / float(len([l for l in data.labels if l==1]))
        norm_c_neg = param.cost / float(len([l for l in data.labels if l==-1]))
        
        svm.set_C(norm_c_neg, norm_c_pos)
        
        
        # start training
        svm.train()


        # save additional information
        self.additional_information["svm objective"] = svm.get_objective()
        self.additional_information["num sv"] = svm.get_num_support_vectors()
        #self.additional_information["distances"] = distances
        self.additional_information["similarities"] = similarities


        # wrap up predictors
        svms = {}
#.........这里部分代码省略.........
开发者ID:cwidmer,项目名称:multitask,代码行数:101,代码来源:method_mhc_simple.py


示例13: SVMLight

            
##################################################################
# Train SVMs
##################################################################

# create shogun objects
wdk_tree = shogun_factory.create_kernel(data.examples, param)
lab = shogun_factory.create_labels(data.labels)

wdk_tree.set_normalizer(tree_normalizer)
wdk_tree.init_normalizer()

print "--->",wdk_tree.get_normalizer().get_name()

svm_tree = SVMLight(cost, wdk_tree, lab)
svm_tree.set_linadd_enabled(False)
svm_tree.set_batch_computation_enabled(False)

svm_tree.train()

del wdk_tree
del tree_normalizer

print "finished training tree-norm SVM:", svm_tree.get_objective()


wdk = shogun_factory.create_kernel(data.examples, param)
wdk.set_normalizer(normalizer)
wdk.init_normalizer()
开发者ID:cwidmer,项目名称:multitask,代码行数:28,代码来源:debug_multitask_kernel_tree.py


示例14: test_data


#.........这里部分代码省略.........
    taxonomy = shogun_factory.create_taxonomy(mss.taxonomy.data)
    
    
    support = numpy.linspace(0, 100, 4)
    
    
    distances = [[0, 1, 2, 2], [1, 0, 2, 2], [2, 2, 0, 1], [2, 2, 1, 0]]
    
    # create tree normalizer 
    tree_normalizer = MultitaskKernelPlifNormalizer(support, data.task_vector_names)
    
    
    
    
    task_names = data.get_task_names()
    
    
    FACTOR = 1.0
    
    
    # init gamma matrix
    gammas = numpy.zeros((data.get_num_tasks(), data.get_num_tasks()))
    
    for t1_name in task_names:
        for t2_name in task_names:
            
            similarity = taxonomy.compute_node_similarity(taxonomy.get_id(t1_name), taxonomy.get_id(t2_name))        
            gammas[data.name_to_id(t1_name), data.name_to_id(t2_name)] = similarity
    
    helper.save("/tmp/gammas", gammas)
    
    
    gammas = gammas * FACTOR
    
    cost = param.cost * numpy.sqrt(FACTOR) 
    
    print gammas
    
    
    ##########
    # regular normalizer
    
    normalizer = MultitaskKernelNormalizer(data.task_vector_nums)
    
    for t1_name in task_names:
        for t2_name in task_names:
                    
            similarity = gammas[data.name_to_id(t1_name), data.name_to_id(t2_name)]
            normalizer.set_task_similarity(data.name_to_id(t1_name), data.name_to_id(t2_name), similarity)
    
                
    ##################################################################
    # Train SVMs
    ##################################################################
    
    # create shogun objects
    wdk_tree = shogun_factory.create_kernel(data.examples, param)
    lab = shogun_factory.create_labels(data.labels)
    
    wdk_tree.set_normalizer(tree_normalizer)
    wdk_tree.init_normalizer()
    
    print "--->",wdk_tree.get_normalizer().get_name()
    
    svm_tree = SVMLight(cost, wdk_tree, lab)
    svm_tree.set_linadd_enabled(False)
    svm_tree.set_batch_computation_enabled(False)
    
    svm_tree.train()
    
    del wdk_tree
    del tree_normalizer
    
    print "finished training tree-norm SVM:", svm_tree.get_objective()
    
    
    wdk = shogun_factory.create_kernel(data.examples, param)
    wdk.set_normalizer(normalizer)
    wdk.init_normalizer()
    
    print "--->",wdk.get_normalizer().get_name()
    
    svm = SVMLight(cost, wdk, lab)
    svm.set_linadd_enabled(False)
    svm.set_batch_computation_enabled(False)
    
    svm.train()
    
    print "finished training manually set SVM:", svm.get_objective()
    
    
    alphas_tree = svm_tree.get_alphas()
    alphas = svm.get_alphas()
    
    assert(len(alphas_tree)==len(alphas))
    
    for i in xrange(len(alphas)):
        assert(abs(alphas_tree[i] - alphas[i]) < 0.0001)
        
    print "success: all alphas are the same"
开发者ID:cwidmer,项目名称:multitask,代码行数:101,代码来源:debug_multitask_kernel_plif.py


示例15: do_batch_linadd

def do_batch_linadd ():
	print 'SVMlight batch'

	from shogun.Features import StringCharFeatures, Labels, DNA
	from shogun.Kernel import WeightedDegreeStringKernel
	try:
		from shogun.Classifier import SVMLight
	except ImportError:
		print 'No support for SVMLight available.'
		return

	feats_train=StringCharFeatures(DNA)
	feats_train.set_features(fm_train_dna)
	feats_test=StringCharFeatures(DNA)
	feats_test.set_features(fm_test_dna)
	degree=20

	kernel=WeightedDegreeStringKernel(feats_train, feats_train, degree)

	C=1
	epsilon=1e-5
	num_threads=2
	labels=Labels(label_train_dna)

	svm=SVMLight(C, kernel, labels)
	svm.set_epsilon(epsilon)
	svm.parallel.set_num_threads(num_threads)
	svm.train()

	kernel.init(feats_train, feats_test)

	#print 'SVMLight Objective: %f num_sv: %d' % \
	#	(svm.get_objective(), svm.get_num_support_vectors())
	svm.set_batch_computation_enabled(False)
	svm.set_linadd_enabled(False)
	svm.classify().get_labels()

	svm.set_batch_computation_enabled(True)
	svm.classify().get_labels()
开发者ID:memimo,项目名称:shogun-liblinear,代码行数:39,代码来源:classifier_svmlight_batch_linadd_modular.py


示例16: _inner_train

    def _inner_train(self, train_data, param):
        """
        perform inner training by processing the tree
        """

        data_keys = []
        # top-down processing of taxonomy


        classifiers = []
        classifier_at_node = {}

        root = param.taxonomy.data

        grey_nodes = [root]
        
        while len(grey_nodes)>0:
           
            node = grey_nodes.pop(0) # pop first item
            
            # enqueue children
            if node.children != None:
                grey_nodes.extend(node.children)
    

    
            #####################################################
            #     init data structures
            #####################################################

            # get data below current node
            data = [train_data[key] for key in node.get_data_keys()]
            
            data_keys.append(node.get_data_keys())
    
            print "data at current level"
            for instance_set in data:        
                print instance_set[0].dataset
            
            
            # initialize containers
            examples = []
            labels = []       
    

            # concatenate data
            for instance_set in data:
      
                print "train split_set:", instance_set[0].dataset.organism
                
                for inst in instance_set:
                    examples.append(inst.example)
                    labels.append(inst.label)
    

            # create shogun data objects
            k = shogun_factory.create_kernel(examples, param)
            lab = shogun_factory.create_labels(labels)


            #####################################################
            #    train weak learners    
            #####################################################
            
            cost = param.cost
            
            # set up svm
            svm = SVMLight(cost, k, lab)
                        
            if param.flags["normalize_cost"]:
                # set class-specific Cs
                norm_c_pos = param.cost / float(len([l for l in labels if l==1]))
                norm_c_neg = param.cost / float(len([l for l in labels if l==-1]))
                svm.set_C(norm_c_neg, norm_c_pos)
            
            
            print "using cost: negative class=%f, positive class=%f" % (norm_c_neg, norm_c_pos) 
            
            # enable output
            svm.io.enable_progress()
            svm.io.set_loglevel(shogun.Classifier.MSG_INFO)
            
            # train
            svm.train()
            
            # append svm object
            classifiers.append(svm)
            classifier_at_node[node.name] = svm                            
            
            # save some information
            self.additional_information[node.name + " svm obj"] = svm.get_objective()
            self.additional_information[node.name + " svm num sv"] = svm.get_num_support_vectors()
            self.additional_information[node.name + " runtime"] = svm.get_runtime()


        return (classifiers, classifier_at_node)
开发者ID:cwidmer,项目名称:multitask,代码行数:96,代码来源:method_hierarchy_boosting.py


示例17: classifier_svmlight_batch_linadd_modular

def classifier_svmlight_batch_linadd_modular(fm_train_dna, fm_test_dna,
		label_train_dna, degree, C, epsilon, num_threads):

	from shogun.Features import StringCharFeatures, BinaryLabels, DNA
	from shogun.Kernel import WeightedDegreeStringKernel, MSG_DEBUG
	try:
		from shogun.Classifier import SVMLight
	except ImportError:
		print('No support for SVMLight available.')
		return

	feats_train=StringCharFeatures(DNA)
	#feats_train.io.set_loglevel(MSG_DEBUG)
	feats_train.set_features(fm_train_dna)
	feats_test=StringCharFeatures(DNA)
	feats_test.set_features(fm_test_dna)
	degree=20

	kernel=WeightedDegreeStringKernel(feats_train, feats_train, degree)

	labels=BinaryLabels(label_train_dna)

	svm=SVMLight(C, kernel, labels)
	svm.set_epsilon(epsilon)
	svm.parallel.set_num_threads(num_threads)
	svm.train()

	kernel.init(feats_train, feats_test)

	#print('SVMLight Objective: %f num_sv: %d' % \)
	#	(svm.get_objective(), svm.get_num_support_vectors())
	svm.set_batch_computation_enabled(False)
	svm.set_linadd_enabled(False)
	svm.apply().get_labels()

	svm.set_batch_computation_enabled(True)
	labels = svm.apply().get_labels()
	return labels, svm
开发者ID:behollis,项目名称:muViewBranch,代码行数:38,代码来源:classifier_svmlight_batch_linadd_modular.py


示例18: _train

    def _train(self, train_data, param):
        """
        training procedure using training examples and labels
        
        @param train_data: Data relevant to SVM training
        @type train_data: dict<str, list<instances> >
        @param param: Parameters for the training procedure
        @type param: ParameterSvm
        """
        
          
        # merge data sets
        data = PreparedMultitaskData(train_data, shuffle=False)
        
        
        # create shogun data objects
        base_wdk = shogun_factory.create_kernel(data.examples, param)
        lab = shogun_factory.create_labels(data.labels)

        # set normalizer
        normalizer = MultitaskKernelNormalizer(data.task_vector_nums)

        ########################################################
        print "creating a kernel for each node:"
        ########################################################

        
        # init seq handler 
        task_kernel = SequencesHandlerRbf(1, param.base_similarity, data.get_task_names(), param.flags["wdk_rbf_on"])
        similarities = numpy.zeros((data.get_num_tasks(), data.get_num_tasks()))
        
        # convert distance to similarity
        for task_name_lhs in data.get_task_names():
            for task_name_rhs in data.get_task_names():
                
                
                 
                
                # convert similarity with simple transformation
                similarity = task_kernel.get_similarity(task_name_lhs, task_name_rhs)
                
                print similarity
                
                print "similarity (%s,%s)=%f" % (task_name_lhs, task_name_rhs, similarity)
                
                normalizer.set_task_similarity(data.name_to_id(task_name_lhs), data.name_to_id(task_name_rhs), similarity)
                
                # save for later
                similarities[data.name_to_id(task_name_lhs),data.name_to_id(task_name_rhs)] = similarity
                
                
        # set normalizer                
        base_wdk.set_normalizer(normalizer)
        base_wdk.init_normalizer()
        

        # set up svm
        svm = SVMLight(param.cost, base_wdk, lab)
        svm.set_linadd_enabled(False)
        svm.set_batch_computation_enabled(False)
        
        
        # normalize cost
        norm_c_pos = param.cost / float(len([l for l in data.labels if l==1]))
        norm_c_neg = param.cost / float(len([l for l in data.labels if l==-1]))
        
        svm.set_C(norm_c_neg, norm_c_pos)
        
        
        # start training
        svm.train()


        # save additional information
        self.additional_information["svm objective"] = svm.get_objective()
        self.additional_information["num sv"] = svm.get_num_support_vectors()
        #self.additional_information["distances"] = distances
        self.additional_information["similarities"] = similarities


        # wrap up predictors
        svms = {}
        
        # use a reference to the same svm several times
        for task_name in data.get_task_names():
            
            task_num = data.name_to_id(task_name)
            
            # save svm and task_num
            svms[task_name] = (task_num, param, svm)

        return svms
开发者ID:cwidmer,项目名称:multitask,代码行数:92,代码来源:method_mhc_rbf.py


示例19: serialization_svmlight_modular

def serialization_svmlight_modular(num, dist, width, C):
	from shogun.IO import MSG_DEBUG
	from shogun.Features import RealFeatures, BinaryLabels, DNA, Alphabet
	from shogun.Kernel import WeightedDegreeStringKernel, GaussianKernel
	from shogun.Classifier import SVMLight
	from numpy import concatenate, ones
	from numpy.random import randn, seed

	import sys
	import types
	import random
	import bz2
	try:
		import cPickle as pickle
	except ImportError:
		import pickle as pickle	
	import inspect


	def save(filename, myobj):
		"""
		save object to file using pickle
		
		@param filename: name of destination file
		@type filename: str
		@param myobj: object to save (has to be pickleable)
		@type myobj: obj
		"""

		try:
			f = bz2.BZ2File(filename, 'wb')
		except IOError as details:
			sys.stderr.write('File ' + filename + ' cannot be written\n')
			sys.stderr.write(details)
			return

		pickle.dump(myobj, f, protocol=2)
		f.close()



	def load(filename):
		"""
		Load from filename using pickle
		
		@param filename: name of file to load from
		@type filename: str
		"""
		
		try:
			f = bz2.BZ2File(filename, 'rb')
		except IOError as details:
			sys.stderr.write('File ' + filename + ' cannot be read\n')
			sys.stderr.write(details)
			return

		myobj = pickle.load(f)
		f.close()
		return myobj


	##################################################

	seed(17)
	traindata_real=concatenate((randn(2,num)-dist, randn(2,num)+dist), axis=1)
	testdata_real=concatenate((randn(2,num)-dist, randn(2,num)+dist), axis=1);

	trainlab=concatenate((-ones(num), ones(num)));
	testlab=concatenate((-ones(num), ones(num)));

	feats_train=RealFeatures(traindata_real);
	feats_test=RealFeatures(testdata_real);
	kernel=GaussianKernel(feats_train, feats_train, width);
	#kernel.io.set_loglevel(MSG_DEBUG)

	labels=BinaryLabels(trainlab);

	svm=SVMLight(C, kernel, labels)
	svm.train()
	#svm.io.set_loglevel(MSG_DEBUG)

	##################################################

	#print("labels:")
	#print(pickle.dumps(labels))
	#
	#print("features")
	#print(pickle.dumps(feats_train))
	#
	#print("kernel")
	#print(pickle.dumps(kernel))
	#
	#print("svm")
	#print(pickle.dumps(svm))
	#
	#print("#################################")

	fn = "serialized_svm.bz2"
	#print("serializing SVM to file", fn)

#.........这里部分代码省略.........
开发者ID:coodoing,项目名称:shogun,代码行数:101,代码来源:serialization_svmlight_modular.py


示例20: xrange

for i in xrange(2):
    for j in xrange(2):

        if i==j:
            normalizer.set_task_similarity(i,j, 4.0)
        else:
            normalizer.set_task_similarity(i,j, 1.0)


base_wdk.set_normalizer(normalizer)

print base_wdk.get_kernel_matrix()
print "--->",base_wdk.get_normalizer().get_name()

svm = SVMLight(1, base_wdk, lab)
svm.set_linadd_enabled(False)
svm.set_batch_computation_enabled(False)

svm.train(feat)

print "interally modified kernel. objective:", svm.get_objective()



##################################################################
# regular SVM
##################################################################


wdk = WeightedDegreeStringKernel(feat, feat, 1)
开发者ID:cwidmer,项目名称:multitask,代码行数:30,代码来源:debug_multitask_kernel.py



注:本文中的shogun.Classifier.SVMLight类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python Features.CombinedFeatures类代码示例发布时间:2022-05-27
下一篇:
Python Classifier.LibSVM类代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap