• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python classifiers.Classifier类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中weka.classifiers.Classifier的典型用法代码示例。如果您正苦于以下问题:Python Classifier类的具体用法?Python Classifier怎么用?Python Classifier使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了Classifier类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: main

def main(args):
    """
    Trains a NaiveBayesUpdateable classifier incrementally on a dataset. The dataset can be supplied as parameter.
    :param args: the commandline arguments
    :type args: list
    """

    # load a dataset
    if len(args) <= 1:
        data_file = helper.get_data_dir() + os.sep + "vote.arff"
    else:
        data_file = args[1]
    helper.print_info("Loading dataset: " + data_file)
    loader = Loader(classname="weka.core.converters.ArffLoader")
    data = loader.load_file(data_file, incremental=True)
    data.class_is_last()

    # classifier
    nb = Classifier(classname="weka.classifiers.bayes.NaiveBayesUpdateable")
    nb.build_classifier(data)

    # train incrementally
    for inst in loader:
        nb.update_classifier(inst)

    print(nb)
开发者ID:fracpete,项目名称:python-weka-wrapper3-examples,代码行数:26,代码来源:incremental_classifier.py


示例2: main

def main(args):
    """
    Loads a dataset, shuffles it, splits it into train/test set. Trains J48 with training set and
    evaluates the built model on the test set.
    :param args: the commandline arguments (optional, can be dataset filename)
    :type args: list
    """

    # load a dataset
    if len(args) <= 1:
        data_file = helper.get_data_dir() + os.sep + "vote.arff"
    else:
        data_file = args[1]
    helper.print_info("Loading dataset: " + data_file)
    loader = Loader(classname="weka.core.converters.ArffLoader")
    data = loader.load_file(data_file)
    data.class_is_last()

    # generate train/test split of randomized data
    train, test = data.train_test_split(66.0, Random(1))

    # build classifier
    cls = Classifier(classname="weka.classifiers.trees.J48")
    cls.build_classifier(train)
    print(cls)

    # evaluate
    evl = Evaluation(train)
    evl.test_model(cls, test)
    print(evl.summary())
开发者ID:fracpete,项目名称:python-weka-wrapper3-examples,代码行数:30,代码来源:train_test_split.py


示例3: main

def main(args):
    """
    Trains a J48 classifier on a training set and outputs the predicted class and class distribution alongside the
    actual class from a test set. Class attribute is assumed to be the last attribute.
    :param args: the commandline arguments (train and test datasets)
    :type args: list
    """

    # load a dataset
    helper.print_info("Loading train: " + args[1])
    loader = Loader(classname="weka.core.converters.ArffLoader")
    train = loader.load_file(args[1])
    train.class_index = train.num_attributes - 1
    helper.print_info("Loading test: " + args[2])
    test = loader.load_file(args[2])
    test.class_is_last()

    # classifier
    cls = Classifier(classname="weka.classifiers.trees.J48")
    cls.build_classifier(train)

    # output predictions
    print("# - actual - predicted - error - distribution")
    for index, inst in enumerate(test):
        pred = cls.classify_instance(inst)
        dist = cls.distribution_for_instance(inst)
        print(
            "%d - %s - %s - %s  - %s" %
            (index+1,
             inst.get_string_value(inst.class_index),
             inst.class_attribute.value(int(pred)),
             "yes" if pred != inst.get_value(inst.class_index) else "no",
             str(dist.tolist())))
开发者ID:fracpete,项目名称:python-weka-wrapper3-examples,代码行数:33,代码来源:output_class_distribution.py


示例4: main

def main():
    """
    Just runs some example code.
    """

    # load a dataset
    iris_file = helper.get_data_dir() + os.sep + "iris.arff"
    helper.print_info("Loading dataset: " + iris_file)
    loader = Loader("weka.core.converters.ArffLoader")
    iris_data = loader.load_file(iris_file)
    iris_data.class_is_last()

    # train classifier
    classifier = Classifier("weka.classifiers.trees.J48")
    classifier.build_classifier(iris_data)

    # save and read object
    helper.print_title("I/O: single object")
    outfile = tempfile.gettempdir() + os.sep + "j48.model"
    serialization.write(outfile, classifier)
    model = Classifier(jobject=serialization.read(outfile))
    print(model)

    # save classifier and dataset header (multiple objects)
    helper.print_title("I/O: single object")
    serialization.write_all(outfile, [classifier, Instances.template_instances(iris_data)])
    objects = serialization.read_all(outfile)
    for i, obj in enumerate(objects):
        helper.print_info("Object #" + str(i+1) + ":")
        if javabridge.get_env().is_instance_of(obj, javabridge.get_env().find_class("weka/core/Instances")):
            obj = Instances(jobject=obj)
        elif javabridge.get_env().is_instance_of(obj, javabridge.get_env().find_class("weka/classifiers/Classifier")):
            obj = Classifier(jobject=obj)
        print(obj)
开发者ID:fracpete,项目名称:python-weka-wrapper3-examples,代码行数:34,代码来源:serialization.py


示例5: python_weka

class python_weka(object):
    def __init__(self, input_x, input_y, labels):
        self.input_x = input_x
        self.input_y = input_y
        self.labels = labels

    def write_arff(self, filename, relation, train_or_predict, input_x, input_y=None):
        f = open(filename, "w")
        f.write("@relation " + relation + "\n")
        for i in self.labels:
            train_or_predict += 1
            if train_or_predict == len(self.labels):
                break
            f.write("@attribute " + i + " " + self.labels[i] + "\n")
        f.write("\n")
        f.write("@data" + "\n")
        for i in range(len(input_x)):
            for j in input_x[i]:
                f.write(str(j) + "  ")
            if train_or_predict == 0:
                f.write(str(input_y[i]))
            else:
                f.write(str(0))
            f.write("\n")
        f.close()

    def train(self):
        filename = "train.arff"
        self.write_arff(filename, "train", 0, self.input_x, self.input_y)
        loader = Loader(classname="weka.core.converters.ArffLoader")
        data = loader.load_file(filename)
        data.class_is_last()
        self.cls = Classifier(classname="weka.classifiers.meta.Bagging", options=["-S", "5"])
        self.cls.build_classifier(data)
        os.remove(filename)

    def predict(self, test_data):
        filename = "test.arff"
        self.write_arff(filename, "test", 0, test_data)
        loader = Loader(classname="weka.core.converters.ArffLoader")
        data = loader.load_file(filename)
        data.class_is_last()
        # evl = Evaluation(data)
        # evl.evaluate_model(self.cls,data)
        # data.set_class_label(data.numAttributes() - 1)
        # data.setClassIndex(data.numAttributes() - 1)
        result = []
        for index, inst in enumerate(data):
            pred = self.cls.classify_instance(inst)
            dist = self.cls.distribution_for_instance(inst)
            result.append(dist[0])
            # print(str(index+1) + ": label index=" + str(pred) + ", class distribution=" + str(dist))
            # print str(index+1) + 'dist:'+ str(dist)
        os.remove(filename)
        return result
开发者ID:joinstu12,项目名称:text_summarization,代码行数:55,代码来源:python_weka.py


示例6: setOptions

 def setOptions(self, options):
     """
     Parses a given list of options.
      
     Parameter(s):
     
         'options' -- the list of options as an array of strings
     """
     
     Classifier.setOptions(self, options)
     
     return
开发者ID:duyvk,项目名称:weka-spmf,代码行数:12,代码来源:JeroR.py


示例7: assign_classify

def assign_classify(file_location, output="classified.out", model="naivebayes.model"):
    data = read_csv_file(file_location)
    jvm.start()
    # load clusters
    obj = serialization.read(model)
    classifier = Classifier(jobject=obj)
    # create file with cluster group
    with open(output, 'w') as cluster_file:
        for index, attrs in enumerate(data):
            inst = Instance.create_instance(attrs[1:])
            pred = classifier.classify_instance(inst)
            print(str(index + 1) + ": label index=" + str(pred))
    jvm.stop()
开发者ID:whaleforever,项目名称:carilogo,代码行数:13,代码来源:classify.py


示例8: predict

def predict(attributes):
    jvm.start()
    file_path = print_to_file(attributes)
    # load the saved model
    objects = serialization.read_all("/Users/hosyvietanh/Desktop/data_mining/trained_model.model")
    classifier = Classifier(jobject=objects[0])
    loader = Loader(classname="weka.core.converters.ArffLoader")
    data = loader.load_file(file_path)
    data.class_is_last()
    for index, inst in enumerate(data):
        pred = classifier.classify_instance(inst)
        dist = classifier.distribution_for_instance(inst)
        return int(pred)
    jvm.stop()
开发者ID:davidnd,项目名称:data-mining,代码行数:14,代码来源:weka_app.py


示例9: playback_speed_checker

def playback_speed_checker(inputFile, dirRef):
    
    TRAINING_ARFF = 'dataset_playback.arff'
    inputRef = ""

    # Start JVM
    jvm.start()
    jvm.start(system_cp=True, packages=True)
    jvm.start(max_heap_size="512m")
    
    # Find reference file
    for file in os.listdir(dirRef):
        if str(file).find(str(os.path.basename(inputFile))) != -1:
            inputRef = os.path.join(dirRef, file)
            break

    # Calculation distance
    (result, distance) = dtw_checker(inputFile, inputRef)

    # Loading data
    loader = Loader(classname="weka.core.converters.ArffLoader")    
    data = loader.load_file(TRAINING_ARFF)
    data.class_is_last()                    # set class attribute

    # Train the classifier
    #cls = Classifier(classname="weka.classifiers.functions.SMO")
    cls = Classifier(classname="weka.classifiers.trees.J48", options = ["-C", "0.3", "-M", "10"])
    cls.build_classifier(data)

    # Classify instance
    speed_instance = Instance.create_instance(numpy.ndarray(distance), classname='weka.core.DenseInstance', weight=1.0)
    speed_instance.dataset = data
    
    # Classify instance
    speed_flag = cls.classify_instance(speed_instance)
    
    if (distance == 0):
        speed_class = 'nominal'
    else:
        if speed_flag == 0: speed_class = 'down_speed'
        if speed_flag == 0: speed_class = 'up_speed'
        
#    print os.path.basename(inputFile) + ' --- ' + speed_class
    
    # Stop JVM
    jvm.stop()    

    print "SPEED IS: " + speed_class

    return speed_class
开发者ID:ignasi42,项目名称:defect_detector,代码行数:50,代码来源:playback_speed_checker_final.py


示例10: getCapabilities

    def getCapabilities(self):
        """
        returns the capabilities of this classifier
        
        Return:
        
            the capabilities of this classifier
        """

        result = Classifier.getCapabilities(self)
    
        # attributes
        result.enable(Capability.NOMINAL_ATTRIBUTES)
        result.enable(Capability.NUMERIC_ATTRIBUTES)
        result.enable(Capability.DATE_ATTRIBUTES)
        result.enable(Capability.STRING_ATTRIBUTES)
        result.enable(Capability.RELATIONAL_ATTRIBUTES)
        result.enable(Capability.MISSING_VALUES)
    
        # class
        result.enable(Capability.NOMINAL_CLASS)
        result.enable(Capability.NUMERIC_CLASS)
        result.enable(Capability.DATE_CLASS)
        result.enable(Capability.MISSING_CLASS_VALUES)
    
        # instances
        result.setMinimumNumberInstances(0)
        
        return result
开发者ID:duyvk,项目名称:weka-spmf,代码行数:29,代码来源:JeroR.py


示例11: main

def main():
    """
    Just runs some example code.
    """

    # load a dataset
    bodyfat_file = helper.get_data_dir() + os.sep + "bodyfat.arff"
    helper.print_info("Loading dataset: " + bodyfat_file)
    loader = Loader("weka.core.converters.ArffLoader")
    bodyfat_data = loader.load_file(bodyfat_file)
    bodyfat_data.class_is_last()

    # classifier help
    helper.print_title("Creating help string")
    classifier = Classifier(classname="weka.classifiers.trees.M5P")
    classifier.build_classifier(bodyfat_data)
    print(classifier)
开发者ID:fracpete,项目名称:python-weka-wrapper3-examples,代码行数:17,代码来源:mp5.py


示例12: train_model

 def train_model(self, training_data):
     model_weka = None
     if os.path.isfile(self.model_file):
         print 'Model ' + self.name + ' already trained.'
     else:
         print 'Starting to train_model model ' + self.name + '.'
         model_weka = Classifier(classname = self.classname, options = self.options) 
         
         model_weka.build_classifier(data = training_data)
         serialization.write(filename = self.model_file, jobject = model_weka)
         print 'Model ' + self.name + ' trained and saved.'
     if os.path.isfile(self.parameter_file):
         print 'Parameters of the model ' + self.name + ' already saved.'
     else:
         if model_weka == None:
             model_weka = Classifier(jobject = serialization.read(self.model_file))
         save_file(file_name = self.parameter_file, content = str(model_weka))
         print 'Parameters of the model ' + self.name + ' saved.'
开发者ID:jonmagal,项目名称:recsys_challenge,代码行数:18,代码来源:model.py


示例13: train_JRip

	def train_JRip(self, min_per_rule=20, optimizations=2, folds=3, seed=42):
		params = [
			'-F', str(folds), # folds
			'-N', str(min_per_rule), # min elements per rule
			'-O', str(optimizations), # optimizations
			'-S', str(seed) #seed
		] 
		self.base_classifier = Classifier(classname='weka.classifiers.rules.JRip', options=params)
		self._train()
开发者ID:sbiastoch,项目名称:thesis,代码行数:9,代码来源:evaluate.py


示例14: train_J48

	def train_J48(self, min_per_rule=20):
		params = [
			'-C','0.3',
			'-M',str(min_per_rule),
	#		'-N',str(folds),
	#		'-R',
		]
		self.base_classifier = Classifier(classname='weka.classifiers.trees.J48', options=params)
		self._train()
开发者ID:sbiastoch,项目名称:thesis,代码行数:9,代码来源:evaluate.py


示例15: train

 def train(self):
     filename = "train.arff"
     self.write_arff(filename, "train", 0, self.input_x, self.input_y)
     loader = Loader(classname="weka.core.converters.ArffLoader")
     data = loader.load_file(filename)
     data.class_is_last()
     self.cls = Classifier(classname="weka.classifiers.meta.Bagging", options=["-S", "5"])
     self.cls.build_classifier(data)
     os.remove(filename)
开发者ID:joinstu12,项目名称:text_summarization,代码行数:9,代码来源:python_weka.py


示例16: getOptions

 def getOptions(self):
     """
     Gets the current settings of the Classifier as string array.
      
     Return:
      
         an array of strings suitable for passing to setOptions
     """
      
     return Classifier.getOptions(self)
开发者ID:duyvk,项目名称:weka-spmf,代码行数:10,代码来源:JeroR.py


示例17: listOptions

 def listOptions(self):
     """
     Returns an enumeration describing the available options.
     
     Return:
      
         an enumeration of all the available options.
     """
      
     return Classifier.listOptions(self)
开发者ID:duyvk,项目名称:weka-spmf,代码行数:10,代码来源:JeroR.py


示例18: trainData

 def trainData(self, arrfFile = None, classname="weka.classifiers.trees.J48", options=["-C", "0.3"]):
     if arrfFile is not None:
         self.initData( arrfFile )
         
     if self.data is None:
         return 
         
     print 'Contruindo classificador ' + str(classname) + ' ' + ' '.join(options)
     self.classifier = Classifier(classname=classname, options=options)
     self.classifier.build_classifier(self.data)
开发者ID:fernandovieiraf02,项目名称:superpixel,代码行数:10,代码来源:wekaWrapper.py


示例19: riaa_checker

def riaa_checker(inputFile):
    
    TRAINING_ARFF = 'C:\Users\ASUS\Desktop\IGNASI\SMC\Workspace\dataset_riaa.arff'

    # Start JVM
    jvm.start()
    jvm.start(system_cp=True, packages=True)
    jvm.start(max_heap_size="512m")

    # Calculation of bark bands information
    (absolute_bark, relative_bark, bark_ratios) = compute_bark_spectrum(inputFile)

    # Loading data
    loader = Loader(classname="weka.core.converters.ArffLoader")    
    data = loader.load_file(TRAINING_ARFF)
    data.class_is_last()                    # set class attribute

    # Train the classifier
    cls = Classifier(classname="weka.classifiers.functions.SMO")
    #cls = Classifier(classname="weka.classifiers.trees.J48", options = ["-C", "0.3", "-M", "10"])
    cls.build_classifier(data)

    # Classify instance
    bark_instance = Instance.create_instance(bark_ratios, classname='weka.core.DenseInstance', weight=1.0)
    bark_instance.dataset = data
    
    # Classify instance
    riaa_flag = cls.classify_instance(bark_instance)
    
    if riaa_flag == 0:
        riaa_class = 'riaa_ok'
    else:
        riaa_class = 'riaa_ko'
        
#    print os.path.basename(inputFile) + ' --- ' + riaa_class
    
    # Stop JVM
    jvm.stop()   

    print "RIAA FILTERING?: " + riaa_class

    return riaa_class
开发者ID:ignasi42,项目名称:defect_detector,代码行数:42,代码来源:riaa_checker_final.py


示例20: getDecisionTree

 def getDecisionTree(self, inputPath):   
     #load arff  
     data = self.load_Arff(inputPath)  
         
     #classifier
     data.set_class_index(data.num_attributes() - 1)   # set class attribute
     classifier = Classifier(classname="weka.classifiers.trees.J48", options=["-C", "0.3"])
     
     data.set_class_index(data.num_attributes() - 1)
     classifier.build_classifier(data)
     
     
     classifierStr = str(classifier)
     for index in range(0,data.num_instances()):
         instance = data.get_instance(index)
         #print instance
         result = classifier.distribution_for_instance(instance)
         
         #print result
     graph = classifier.graph()
     return graph
开发者ID:zhaohengyang,项目名称:Android-malware-detection,代码行数:21,代码来源:weka_interface.py



注:本文中的weka.classifiers.Classifier类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python classifiers.Evaluation类代码示例发布时间:2022-05-26
下一篇:
Python weioConfig.getConfiguration函数代码示例发布时间:2022-05-26
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap