• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python svm.svm_problem函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中svm.svm_problem函数的典型用法代码示例。如果您正苦于以下问题:Python svm_problem函数的具体用法?Python svm_problem怎么用?Python svm_problem使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了svm_problem函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: do_one_cv_classify_predeffolds_valid

def do_one_cv_classify_predeffolds_valid(theinput):
	c = theinput[0]
	gamma = theinput[1]
	nf = theinput[2]
	output = theinput[3]
	input = theinput[4]
	output_valid = theinput[5]
	input_valid = theinput[6]
	useprob = theinput[7]
	fold_start = theinput[8]
	fold_start_valid = theinput[9]
	perfmetric = theinput[10]
	
	param = svm.svm_parameter('-c %g -g %g -b %d' % (c,gamma,int(useprob)))

	prob = svm.svm_problem(output, input)
	fold_start_p = (c_int *len(fold_start))()
	for i in xrange(len(fold_start)):
		fold_start_p[i] = fold_start[i]
		
	prob_valid = svm.svm_problem(output_valid, input_valid)
	fold_start_p_valid = (c_int *len(fold_start_valid))()
	for i in xrange(len(fold_start_valid)):
		fold_start_p_valid[i] = fold_start_valid[i]


	target = (c_double * prob_valid.l)()
	posclass = output[0]
	
#	print prob
	libsvm.svm_cross_validation_sepsets(prob, prob_valid,fold_start_p, fold_start_p_valid,param, nf, target)

	
	ys = prob.y[:prob_valid.l]
	db = array([[ys[i],target[i]] for i in range(prob_valid.l)])
#	print db
	del target
	del fold_start_p
	del fold_start_p_valid
	
	neg = len([x for x in ys if x != posclass])
#	print neg
	pos = prob_valid.l-neg
#	print pos
		
#	print fb,neg,pos,posclass,perfmetric
	
	[topacc,topphi,minfpfnratio,topf1,auc,optbias] = optimize_results(db,neg,pos,posclass,perfmetric)
		
	return topacc,topphi,minfpfnratio,topf1,auc,optbias
开发者ID:MD2Korg,项目名称:memphis-dataprocessingframework,代码行数:50,代码来源:mygrid.py


示例2: build_problem

def build_problem(img_kind, subdir = "data/"):
	subdir = "data/"

	classes = []
	data = []

	the_ones = glob.glob(subdir + "f_" + img_kind + "*.jpg")
	all_of_them = glob.glob(subdir + "f_*_*.jpg")
	the_others = []

	for x in all_of_them:
		if the_ones.count(x) < 1:
			the_others.append(x)
	
	for x in the_ones:
		classes.append(1)
		data.append(get_image_features(cv.LoadImageM(x), True, img_kind))
	
	for x in the_others:
		classes.append(-1)
		data.append(get_image_features(cv.LoadImageM(x), True, img_kind))

	prob = svm.svm_problem(classes, data)

	return prob
开发者ID:prabhat1992,项目名称:emotion_recognition,代码行数:25,代码来源:utils+-+Copy.py


示例3: svm

def svm(y,K,**param_kw):
    """
    Solve the SVM problem. Return ``(alpha, b)``

    `y`
      labels
    `K`
      precopmuted kernel matrix

    Additional keyword arguments are passed on as svm parameters to
    the model.

    The wrapper is needed to precondition the precomputed matrix for
    use with libsvm, and to extract the model parameters and convert
    them into the canonical weight vector plus scalar offset. Normally
    libsvm hides these model paramters, preferring instead to provide
    a high-level model object that can be queried for results.

    """
    i = arange(1,len(K)+1).reshape((-1,1))
    X = hstack((i, K))
    y = asarray(y,dtype=double)
    X = asarray(X,dtype=double)
    prob = svm_problem(y,X)
    param = svm_parameter(kernel_type=PRECOMPUTED,**param_kw)
    model = svm_model(prob, param)
    return get_alpha_b(model)
开发者ID:alanfalloon,项目名称:libsvm-2.88_output_model_params,代码行数:27,代码来源:svm_raw.py


示例4: train

    def train(self,labels,data):
        '''
        Train the classifier.
        
        @param labels: A list of class labels.
        @param data: A 2D array or list of feature vectors.  One feature vector per row.
        '''
        
        # Check the types and convert to np arrays
        if isinstance(data,list) or isinstance(data,tuple):
            data = np.array(data,dtype=np.double)
            

        labels = np.array(labels,dtype=np.double)
            
        # Preprocess the data    
        labels,data = self._preprocessor.train(labels,data)
        labels,data = self._label_scale.train(labels,data)
        
        
        # Create the svm parameter data and problem description
        param = svm.svm_parameter(svm_type=svm.EPSILON_SVR,kernel_type = svm.RBF, p = self._epsilon, gamma=self._gamma)
        prob = svm.svm_problem(labels.tolist(),data.tolist())
        
        # train the svm
        self._model = svm.svm_model(prob, param)
开发者ID:Pictobar,项目名称:pyvision,代码行数:26,代码来源:libsvm.py


示例5: do_one_cv_classify_predeffolds_multi

def do_one_cv_classify_predeffolds_multi(theinput):
	c = theinput[0]
	gamma = theinput[1]
	nf = theinput[2]
	output = theinput[3]
	input = theinput[4]
	useprob = theinput[5]
	fold_start = theinput[6]
			
		
		
	param = svm.svm_parameter('-c %g -g %g -b %d' % (c,gamma,int(useprob)))
	
	prob = svm.svm_problem(output, input)
	target = (c_double * prob.l)()
	posclass = output[0]
	fold_start_p = (c_int *len(fold_start))()
	for i in xrange(len(fold_start)):
		fold_start_p[i] = fold_start[i]
	libsvm.svm_cross_validation_labeltargets(prob, fold_start_p,param, nf, target)

	acc = len([i for i in xrange(len(output)) if output[i] == target[i]])*1.0/prob.l
	del target
	del fold_start_p
	return acc
开发者ID:MD2Korg,项目名称:memphis-dataprocessingframework,代码行数:25,代码来源:mygrid.py


示例6: do_one_cv_classify

def do_one_cv_classify(theinput):
	c = theinput[0]
	gamma = theinput[1]
	nf = theinput[2]
	output = theinput[3]
	input = theinput[4]
	useprob = theinput[5]	
	perfmetric = theinput[6]

	param = svm.svm_parameter('-c %g -g %g -b %d' % (c,gamma,int(useprob)))

	prob = svm.svm_problem(output, input)
	target = (c_double * prob.l)()
	
	posclass = output[0]
	fold_start = (c_int *1)();
	fold_start[0] = -1;
	libsvm.svm_cross_validation(prob, fold_start, param, nf, target)
	ys = prob.y[:prob.l]
	db = array([[ys[i],target[i]] for i in range(prob.l)])
	
	del target
	
	neg = len([x for x in ys if x != posclass])
	pos = prob.l-neg
	
	
	
	[topacc,topphi,minfpfnratio,topf1,auc,optbias] = optimize_results(db,neg,pos,posval,perfmetric)
		
	return topacc,topphi,minfpfnratio,topf1,auc,optbias
开发者ID:MD2Korg,项目名称:memphis-dataprocessingframework,代码行数:31,代码来源:mygrid.py


示例7: generate_model

  def generate_model(self, variant_name, models_folder):
    training_file = variant_name + ".t"
    if self.feature_scaling:
      self.scale_features(variant_name, models_folder)
      training_file += ".scale"
    (y, x) = svm_read_problem(training_file)
    self.m_prob = svm.svm_problem(y, x, self.m_params.kernel_type == PRECOMPUTED)

    libsvm_path = os.environ['LIBSVM_PATH']
    scaled_filename = os.path.abspath(training_file)
    cp = "python grid.py " + scaled_filename
    curdir = os.getcwd()
    os.chdir(libsvm_path + "/tools/")
    result = call_process(cp)
    os.chdir(curdir)
    C,g,rate = [float(l) for l in result.split("\n")[-2].split(" ")]

    print "C: %.8f, gamma: %.8f\n" % (C,g)

    self.m_params.C = C
    self.m_params.gamma = g

    print "\n-----------------------------"
    model = svm.svm_train(self.m_prob, self.m_params)
    print "-----------------------------\n"

    svm_save_model(models_folder + variant_name + ".model", model)
开发者ID:chubbymaggie,项目名称:nitro-1,代码行数:27,代码来源:svm.py


示例8: __init__

 def __init__(self, data_dictionary, model_target, kernel=LINEAR, cv_segments=10, **args):
     #Create an SVM model object
 
     #Check to see if a threshold has been specified in the function's arguments
     try: self.threshold = args['threshold']
     except KeyError: self.threshold=2.3711   # if there is no 'threshold' key, then use the default (2.3711)
     
     #Store some object data
     model_dict = deepcopy(data_dictionary)
     self.model_target = model_target
     self.folds = cv_segments
            
     #Label the exceedances in the training set.
     model_dict[model_target] = self.Assign_Labels(model_dict[model_target])
     
     #Extract the training labels and training set
     self.training_labels = model_dict.pop(model_target)
     self.training_set = np.transpose(model_dict.values())
     self.headers = model_dict.keys()
             
     #Scale the covariates to [-1,1]
     self.Scale_Covariates()
     
     #Generate an SVM model.
     self.svm_problem = svm.svm_problem(self.training_labels, self.training_set)
     self.svm_params = {'kernel_type' : kernel, 'weight_label' : [0,1], 'weight' : [10,1]}
     self.model=svm.svm_model(self.svm_problem, svm.svm_parameter(**self.svm_params))
     
     #Use cross-validation to find the best number of components in the model.
     self.Select_Linear_Model(-5, 10)
     
     #Rebuild the model, calculating the probabilities of class membership
     self.svm_params['probability']=1
     self.model=svm.svm_model(self.svm_problem, svm.svm_parameter(**self.svm_params))
开发者ID:mnfienen,项目名称:beach_gui,代码行数:34,代码来源:svm.py


示例9: train

    def train(self, c, g, probability=True, compensation=True,
              path=None, filename=None, save=True):
        if filename is None:
            filename = os.path.splitext(self.getOption('strArffFileName'))[0]
            filename += '.model'
        if path is None:
            path = self.dctEnvPaths['data']
        param = svm.svm_parameter(kernel_type=svm.RBF,
                                  C=c, gamma=g,
                                  probability=1 if probability else 0)

        labels, samples = self.getData(normalize=True)

        # because we train the SVM with dict we need to redefine the zero-insert
        self.hasZeroInsert = False
        if not self.oClassifier is None:
            self.oClassifier.setOption('hasZeroInsert', True)

        if compensation:
            weight, weight_label = self._calculateCompensation(labels)
            param.weight = weight
            param.weight_label = weight_label
            param.nr_weight = len(weight)

        problem = svm.svm_problem(labels, samples)
        model = svm.svm_model(problem, param)
        if save:
            model.save(os.path.join(path, filename))
        return problem, model
开发者ID:cmci,项目名称:cecog,代码行数:29,代码来源:learning.py


示例10: iqr_model_train

def iqr_model_train(matrix_kernel_train, labels_train, idx2clipid,
                    svm_para = '-w1 50 -t 4 -b 1 -c 1'):
    """
    Light-weighted SVM learning module for online IQR

    @param matrix_kernel_train: n-by-n square numpy array with kernel values
        between training data
    @param labels_train: row-wise labels of training data (1 or True indicates
        positive, 0 or False otherwise
    @param idx2clipid: idx2clipid(row_idx) returns the clipid for the 0-base row
        in matrix
    @param svm_para: (optional) SVM learning parameter

    @rtype: dictionary with 'clipids_SV': list of clipids for support vectors
    @return: output as a dictionary with 'clipids_SV'

    """
    log = logging.getLogger('iqr_model_train')

    # set training inputs
    matrix_kernel_train = np.vstack((np.arange(1, len(matrix_kernel_train)+1),
                                     matrix_kernel_train)).T
    log.debug("Done matrix_kernel_train")

    problem = svm.svm_problem(labels_train.tolist(), matrix_kernel_train.tolist(), isKernel=True)
    log.debug("Done problem")
    svm_param = svm.svm_parameter(svm_para)
    log.debug("Done svm_param")

    # train model
    model = svmutil.svm_train(problem, svm_param)
    log.debug("Done train model")

    # release memory
    del problem
    del svm_param
    log.debug("Done release memory")

    # check learning failure
    if model.l == 0:
        raise Exception('svm model learning failure')
    log.debug("Done checking learning failure (no failure)")

    n_SVs = model.l
    clipids_SVs = []
    idxs_train_SVs = svmtools.get_SV_idxs_nonlinear_svm(model)
    for i in range(n_SVs):
        _idx_1base = idxs_train_SVs[i]
        _idx_0base = _idx_1base - 1
        clipids_SVs.append(idx2clipid[_idx_0base])
        model.SV[i][0].value = i+1 # within SVM model, index needs to be 1-base
    log.debug("Done collecting support vector IDs")

    #svmutil.svm_save_model(filepath_model, model)

    output = dict()
    output['model'] = model
    output['clipids_SVs'] = clipids_SVs

    return output
开发者ID:brandontheis,项目名称:SMQTK,代码行数:60,代码来源:iqr_modules.py


示例11: trainSVM

def trainSVM(kernel, labels):
    #need to add an id number as the first column of the list
    svmKernel = column_stack((arange(1, len(kernel.tolist()) + 1), kernel))
    prob = svm_problem(labels.tolist(), svmKernel.tolist(), isKernel=True)
    param = svm_parameter('-t 4')   

    model = svm_train(prob, param)
    return model
开发者ID:Primer42,项目名称:TuftComp136,代码行数:8,代码来源:main.py


示例12: train

 def train(self, session, doc):
     # doc here is [[class,...], [{vector},...]]
     (labels, vectors) = doc.get_raw(session)
     problem = svm.svm_problem(labels, vectors)
     self.model = svm.svm_model(problem, self.param)
     modelPath = self.get_path(session, 'modelPath')
     self.model.save(str(modelPath))
     self.predicting = 1
开发者ID:ReinSi,项目名称:cheshire3,代码行数:8,代码来源:preParser.py


示例13: train

 def train(self, dataset):
     """
     Trains the svm classifier. Converts words to real numbers for training
     as SVM expects only numbers.
     """
     super(SvmLearner, self).train(dataset)
     prob  = svm.svm_problem(self.results, self.observations)
     param = svm.svm_parameter(kernel_type=svm.LINEAR, C=10, probability=1)
     self.model = svm.svm_model(prob, param)
开发者ID:thoughtnirvana,项目名称:python_learners,代码行数:9,代码来源:learners.py


示例14: train

    def train(self,trainset):
        """
        Trains the SVM.
        """

        self.n_classes = len(trainset.metadata['targets'])

        # Set LIBSVM parameters
        kernel_types = {'linear':libsvm.LINEAR,'polynomial':libsvm.POLY,
                        'rbf':libsvm.RBF,'sigmoid':libsvm.SIGMOID}
        if self.kernel not in kernel_types:
            raise ValueError('Invalid kernel: '+self.kernel+'. Should be either \'linear\', \'polynomial\', \'rbf\' or \'sigmoid\'')

        if self.label_weights != None:
            class_to_id = trainset.metadata['class_to_id']
            nr_weight = self.n_classes
            weight_label = range(self.n_classes)
            weight = [1]*self.n_classes
            for k,v in self.label_weights.iteritems():
                weight[class_to_id[k]] = v
        else:
            nr_weight = 0
            weight_label = []
            weight = []

        libsvm_params = libsvm.svm_parameter(svm_type = libsvm.C_SVC,
                                             kernel_type = kernel_types[self.kernel],
                                             degree=self.degree,
                                             gamma=self.gamma,
                                             coef0=self.coef0,
                                             C=self.C,
                                             probability=int(self.output_probabilities),
                                             cache_size=self.cache_size,
                                             eps=self.tolerance,
                                             shrinking=int(self.shrinking),
                                             nr_weight = nr_weight,
                                             weight_label = weight_label,
                                             weight = weight)
        

        # Put training set in the appropriate format:
        #  if is sparse (i.e. a pair), inputs are converted to dictionaries
        #  if not, inputs are assumed to be sequences and are kept intact
        libsvm_inputs = []
        libsvm_targets = []
        for input,target in trainset:
            if type(input) == tuple:
                libsvm_inputs += [dict(zip(input[1],input[0]))]
            else:
                libsvm_inputs += [input]
            libsvm_targets += [float(target)] # LIBSVM requires double-valued targets

        libsvm_problem = libsvm.svm_problem(libsvm_targets,libsvm_inputs)

        # Train SVM
        self.svm = libsvm.svm_model(libsvm_problem,libsvm_params)
开发者ID:goelhardik,项目名称:projects,代码行数:56,代码来源:classification.py


示例15: leave_one_out

def leave_one_out(y, x, param, n='DUMMY'):
    results = []
    for i, test in enumerate(zip(y, x)):
        training_y = y[:i] + y[i+1:]
        training_x = x[:i] + x[i+1:]
        problem = svm.svm_problem(training_y, training_x)
        model = svmutil.svm_train(problem, param, '-q')
        result = svmutil.svm_predict(y[i:i+1], x[i:i+1], model, '-b 1')
        results.append(result + (test[0], make_d.decode(x[i], make_d.decode_dic)))
    return results
开发者ID:cschu,项目名称:AgoSVM,代码行数:10,代码来源:run_on_real_data.py


示例16: iterGridSearchSVM

    def iterGridSearchSVM(self, c_info=None, g_info=None, fold=5,
                          probability=False, compensation=True):
        swap = lambda a,b: (b,a)
        if not c_info is None and len(c_info) >= 3:
            c_begin, c_end, c_step = c_info[:3]
        else:
            c_begin, c_end, c_step = -5,  15, 2
        if c_end < c_begin:
            c_begin, c_end = swap(c_begin, c_end)
        c_step = abs(c_step)

        if not g_info is None and len(g_info) >= 3:
            g_begin, g_end, g_step = g_info[:3]
        else:
            g_begin, g_end, g_step = -15, 3, 2
        if g_end < g_begin:
            g_begin, g_end = swap(g_begin, g_end)
        g_step = abs(g_step)

        labels, samples = self.getData(normalize=True)
        #print len(labels), len(samples)
        problem = svm.svm_problem(labels, samples)

        if compensation:
            weight, weight_label = self._calculateCompensation(labels)

        n = (c_end - c_begin) / c_step + 1
        n *= (g_end - g_begin) / g_step + 1

        l2c = c_begin
        while l2c <= c_end:
            l2g = g_begin
            while l2g <= g_end:

                param = svm.svm_parameter(kernel_type=svm.RBF,
                                          C=2.**l2c, gamma=2.**l2g,
                                          probability=1 if probability else 0)
                if compensation:
                    param.weight = weight
                    param.weight_label = weight_label
                    param.nr_weight = len(weight)

                predictions = svm.cross_validation(problem, param, fold)
                predictions = map(int, predictions)

                #print n,c,g
                conf = ConfusionMatrix.from_lists(labels, predictions,
                                                  self.l2nl)
                yield n,l2c,l2g,conf

                l2g += g_step
            l2c += c_step
开发者ID:cmci,项目名称:cecog,代码行数:52,代码来源:learning.py


示例17: learnModel

 def learnModel(self, train_y, train_X):
     # scale train data
     svmScaler = preprocessing.MinMaxScaler(feature_range = (-1, 1))
     train_X_scaledArr = svmScaler.fit_transform(train_X)
     
     # learn and save svm model
     X = train_X_scaledArr.tolist()   
     problem = svm_problem(train_y, X)
     paramStr = '-c ' + str(self._param_c) + ' -g ' + str(self._param_g) + ' -q'
     param = svm_parameter(paramStr)
     
     self._model = svm_train(problem, param)
     self._scaler = svmScaler
开发者ID:1987hasit,项目名称:BoVW_Action,代码行数:13,代码来源:svm_tool.py


示例18: leave_one_out

def leave_one_out(y, x, param, n="DUMMY"):
    results = []
    for i, test in enumerate(zip(y, x)):
        training_y = y[:i] + y[i + 1 :]
        training_x = x[:i] + x[i + 1 :]
        problem = svm.svm_problem(training_y, training_x)
        # t0 = time.clock()
        model = svmutil.svm_train(problem, param, "-q")
        # t1 = time.clock()
        # print 'Training took', t1 - t0, 'seconds.'
        result = svmutil.svm_predict(y[i : i + 1], x[i : i + 1], model, "-b 1")
        results.append(result + (test[0], make_d.decode(x[i], make_d.decode_dic)))
    return results
开发者ID:cschu,项目名称:AgoSVM,代码行数:13,代码来源:find_mature_in_precursor.py


示例19: test

def test(word, documents):
    import svm,random
    docs = [d.copy() for d in documents if d[reverse_map[word]]]
    nondocs = [d.copy() for d in documents if not d[reverse_map[word]]]
    nondocs = random.sample(nondocs,min(5*len(docs),len(nondocs)))
    print float(len(nondocs))/(len(docs)+len(nondocs))
    cats = [1 for i in docs] + [0 for i in nondocs]
    obs = docs + nondocs
    for i in xrange(len(obs)):
        obs[i][reverse_map[word]] = 0.
    zobs = zip(obs,cats)
    random.shuffle(zobs)
    obs,cats = zip(*zobs)
    params = svm.svm_parameter(C=1, kernel_type=svm.LINEAR)
    problem = svm.svm_problem(cats,obs)
    target = svm.cross_validation(problem,params,20)
    return sum(target[i] == cats[i] for i in cats)/float(len(cats))
开发者ID:alextp,项目名称:pylda,代码行数:17,代码来源:sslda.py


示例20: do_one_cv

def do_one_cv(theinput):
	nu = theinput[0]
	c = theinput[1]
	gamma = theinput[2]
	nf = theinput[3]
	output = theinput[4]
	input = theinput[5]
	bins = theinput[6]
	
	param = svm.svm_parameter('-s %d -t %d -n %g -c %g -g %g' % (svm.NU_SVR,svm.RBF,nu,c,gamma))

	prob = svm.svm_problem(output, input)
	target = (c_double * prob.l)()
	fold_start = (c_int *1)();
	fold_start[0] = -1;
	
	libsvm.svm_cross_validation_labeltargets(prob, fold_start,param, nf, target)	
	MSE,SCC = evaluations(prob.y[:prob.l],target[:prob.l],bins)
	del target
	return MSE,SCC
开发者ID:MD2Korg,项目名称:memphis-dataprocessingframework,代码行数:20,代码来源:mygrid.py



注:本文中的svm.svm_problem函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python svmlight.classify函数代码示例发布时间:2022-05-27
下一篇:
Python svm.svm_parameter函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap