• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python libfann.neural_net函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中pyfann.libfann.neural_net函数的典型用法代码示例。如果您正苦于以下问题:Python neural_net函数的具体用法?Python neural_net怎么用?Python neural_net使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了neural_net函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: __init__

 def __init__(self, dim_state, dim_action, action_positve=False, hidden_layer_action='auto', hidden_layer_value='auto', gamma=0.3, sigma=1):
     if hidden_layer_action == 'auto':
         hidden_layer_action = ceil((dim_state + dim_action) / 2)
     if hidden_layer_value == 'auto':
         hidden_layer_value = ceil((dim_state + 1) / 2)
     self.gamma = gamma
     self.sigma = sigma
     self.ann = libfann.neural_net()
     self.ann.create_standard_array([dim_state, hidden_layer_action, dim_action])
     self.vnn = libfann.neural_net()
     self.vnn.create_standard_array([dim_state, hidden_layer_value, 1])
     self.dim_action = dim_action
开发者ID:DavoudTaghawiNejad,项目名称:mayhem,代码行数:12,代码来源:cacla.py


示例2: create_net

def create_net(in_layers, hidden_layers, out_layers):
	net = fann.neural_net()
	net.create_sparse_array(1, (in_layers, hidden_layers, out_layers))
	net.set_activation_function_hidden(fann.SIGMOID_SYMMETRIC)
	net.set_activation_function_output(fann.LINEAR)

	return net
开发者ID:s-urbaniak,项目名称:studia,代码行数:7,代码来源:xor.py


示例3: doTrain

	def doTrain(self, checkin):
		# train_data = libfann.training_data()
		print 'doTrain'
                ann = libfann.neural_net()
		filename = self.netFileName()
		ann.create_from_file(filename)
		ann.train(checkin.get_inputs(), [checkin.checkin_points])
                
                # print sys.path
                script = sys.path[0] + '/get_vis.py'
                process = subprocess.Popen(["python", script, filename], stdout=subprocess.PIPE)
                result = process.communicate()[0]
                self.visualization = result
                # print result

                # print 'pre redirect'
                # old_stdout = sys.stdout
                # silly = common.sillystring()
                # sys.stdout = silly
                # print 'post redirect'
                # ann.print_connections()
                # self.visualization = silly.content
                                
                ann.save(filename)
                self.exists = True
                self.save()
开发者ID:fredtruman,项目名称:Squares-New-Model,代码行数:26,代码来源:models.py


示例4: __setstate__

    def __setstate__(self, odict):
        ann = libfann.neural_net()
        fake_file_call_s2f( ann.create_from_file,
                            odict.pop('fann_save') )

        self.__dict__.update(odict)
        self.ann = ann
开发者ID:jmoudrik,项目名称:orange-hacks,代码行数:7,代码来源:fann_neural.py


示例5: __init__

 def __init__(self, topology=(3, 250, 2), inputMomentum = 0.05, learning_rate = 0.01, connection_rate = 1):
     """
     Constr.
     @param topology: A vector of integers, specifying the number of neurons in each layer. Must not be None. Must have more than 1 element.
     @param inputMomentum: The training momentum. Must be in the interval [0,1).
     @param learning_rate: The learning rate. Must be in the interval [0,1).
     @param connection_rate: The FANN connection rate. Must be an integer greater or equal to 1.
     """
     
     assert topology is not None and len(topology) > 1, "Topology %s is invalid" % str(topology) 
     assert reduce(lambda x,y: x and y, map(lambda z : isinstance(z, int) and z > 0, topology)), "Topology %s contains invalid elements" % str(topology)
     assert inputMomentum is not None and 0 <= inputMomentum < 1, "Input momentum %s is invalid" % inputMomentum
     assert learning_rate is not None and 0 <= learning_rate < 1, "Learning rate %s is invalid" % learning_rate 
     assert connection_rate is not None and connection_rate >= 1, "Connection rate %s is invalid" % connection_rate 
     
     self.topology = topology
     self.momentum = inputMomentum
     self.learning_rate = learning_rate
     self.connection_rate = connection_rate
     
     self.ann = libfann.neural_net()
     self.ann.create_sparse_array(connection_rate, topology)
     
     self.ann.set_learning_rate(learning_rate)
     self.ann.set_learning_momentum(inputMomentum)
     self.ann.set_activation_function_hidden(libfann.SIGMOID)
     self.ann.set_activation_function_output(libfann.LINEAR)
     self.ann.set_training_algorithm(libfann.TRAIN_INCREMENTAL)
     self.ann.randomize_weights(-0.1, 0.1)
开发者ID:nikolayg,项目名称:MCWeb,代码行数:29,代码来源:FANNWrapper.py


示例6: __init__

 def __init__(self,
              name               = "Eve",
              generation         = 1,
              connection_rate    = 0.5,
              learning_rate      = 0.5,
              max_iterations     = 50,
              bornBefore         = 0,
              trainAlg           = libfann.FANN_TRAIN_RPROP,
              learning_momentum  = 0.0,
              neurons            = [],
              connectionType     = "Sparse"):
     settings.netsTried     += 1
     self.name               = name
     self.generation         = generation
     self.connection_rate    = connection_rate
     self.learning_rate      = learning_rate
     self.max_iterations     = max_iterations
     self.ann                = ""
     self.childrenHad        = 0
     self.bornBefore         = bornBefore
     self.trainAlg           = trainAlg
     self.learning_momentum  = learning_momentum
     self.mseHistory         = []
     self.testmseHistory     = []
     self.summedError        = 1.0
     self.neurons            = copy.deepcopy(neurons)
     if (self.neurons == []):
         self.neurons = [[[settings.flist[random.randrange(len(settings.flist))],0.0001+(0.9999*random.random())],
                          [settings.flist[random.randrange(len(settings.flist))],0.0001+(0.9999*random.random())]] , 
                         [[settings.flist[random.randrange(len(settings.flist))],0.0001+(0.9999*random.random())] 
                            for i in range(settings.num_output)]]
     self.foodcost    = (0.001*(len(self.neurons)-1)) + (0.0001*sum(map(len,self.neurons[0:-1])))
     self.connectionType = connectionType
     if self.ann =="":
         self.ann = libfann.neural_net()
开发者ID:Buggaboo,项目名称:Triathlon,代码行数:35,代码来源:Triathlon-Breeder.py


示例7: execute

	def execute(self, possible_venues):
                print 'execute!'
                if not self.exists:
                    return 'The net does not exist yet, stop trying to execute it'
                ann = libfann.neural_net()
		filename= self.netFileName()
		ann.create_from_file(filename)
		processed_venues=[]
                
                #for tweaking scaling
                maxScore = 0
                minScore = 100

		for v in possible_venues:
			# log.info(v)
			score=ann.run(common.get_inputs(v))[0]
			name= v['name']
			vid= v['id']
                        #also for tweaking scaling
                        if score > maxScore:
                            maxScore= score
                        if score < minScore:
                            minScore=score

			processed_venues.append([name, score, vid])
                print "min, max:"
                print minScore
                print maxScore
                print maxScore - minScore
                return processed_venues
开发者ID:fredtruman,项目名称:Squares-New-Model,代码行数:30,代码来源:models.py


示例8: trainNet

def trainNet():
    ann = libfann.neural_net()
    ann.create_sparse_array(connection_rate, (num_inputs, num_hidden, num_outputs))
    ann.set_learning_rate(learning_rate)
    ann.set_activation_function_output(libfann.SIGMOID_SYMMETRIC)
    ann.train_on_file(train_file, max_iterations, iterations_between_reports, desired_error)
    ann.save(nn_file)
开发者ID:jeffames-cs,项目名称:nnot,代码行数:7,代码来源:ann.py


示例9: main

def main():
    # setting the prediction parameters 
    known_days = 7
    predict_days = 1
    verify_days = 30

    # setting up the parameters of the network
    connection_rate = 1
    learning_rate = 0.1
    num_input = known_days * 2
    num_hidden = 60
    num_output = predict_days
    
    # setting up the parameters of the network, continued
    desired_error = 0.000040
    max_iterations = 10000
    iteration_between_reports = 100

    # setting up the network
    net = libfann.neural_net()
    net.create_sparse_array(connection_rate, (num_input, num_hidden, num_output))
    net.set_learning_rate(learning_rate)
    net.set_activation_function_output(libfann.SIGMOID_SYMMETRIC_STEPWISE)

    # read the input file and format data
    fin = open("cw3.in")
    lines = fin.readlines()
    fin.close()
    rawdata = list(map(float, lines))[-1000:]
    datain0 = rawdata[0::2]
    datain1 = rawdata[1::2]
    n0 = max(datain0) * 1.4
    n1 = max(datain1) * 1.4
    datain0 = list(map(lambda x: x / n0, datain0))
    datain1 = list(map(lambda x: x / n1, datain1))

    # train the network
    data = libfann.training_data()
    drange = range(len(datain0) - known_days - verify_days)
    data.set_train_data(
        map(lambda x: datain0[x:][:known_days] + datain1[x:][:known_days], drange),
        map(lambda x: datain0[x + known_days:][:predict_days], drange)
        )
    net.train_on_data(data, max_iterations, iteration_between_reports, desired_error)

    # 
    result = []
    for i in range(verify_days):
        dtest = datain0[-known_days - verify_days + i:][:known_days] + datain1[-known_days - verify_days + i:][:known_days]
        result += [net.run(dtest)[0] * n0]
    plot.plot(list(map(lambda x: x * n0, datain0[-verify_days: -verify_days])) + result, "r")
    plot.plot(map(lambda x: x * n0, datain0[-verify_days:]), "b")
    #plot.plot(list(map(lambda x: x * n0, datain0[-verify_days * 2: -verify_days])) + result, "r")
    #plot.plot(map(lambda x: x * n0, datain0[-verify_days * 2:]), "b")
    plot.show()

#    net.train_on_file("cw3.in", max_iterations, iteration_between_reports, desired_error)
    #print(net.run([1,1]))
    print("hehe")
    return
开发者ID:starrify,项目名称:CW2013,代码行数:60,代码来源:2013AI_cw3.py


示例10: main

def main():
    """
    Train a neural network to recognize if a sentence is written in english
    or in french. It's based on the char frequency in the sentence and
    it try to figure out a pattern from the training set.
    """
    if len(argv) != 3:
        stderr.write('Usage: python model.py <training_set file> <output file>\n')
        return 1
    ann = fann.neural_net()
    ann.create_sparse_array(CONNECTION_RATE, (NUM_INPUT, NUM_HIDDEN, NUM_OUTPUT))
    ann.set_learning_rate(LEARNING_RATE)
    ann.set_activation_function_output(fann.SIGMOID)
    ann.train_on_file(argv[1], MAX_ITERATIONS, ITERATION_REPORT, DESIRED_ERROR)
    while 1:
        print "Write your text to test your model:"
        text = stdin.readline()
        if len(text) <= 1:
            return 0
        o = np.array(ann.run(text_to_vector(text.lower())))
        predict = np.argmax(o)
        if predict == 0:
            print "%s: is written in french !\n" % (text.replace('\n', ''))
        else:
            print "%s: is written in english !\n" % (text.replace('\n', ''))
    ann.save(argv[2])
    return 0
开发者ID:cesumilo,项目名称:LangDetect,代码行数:27,代码来源:model.py


示例11: train

    def train(self, inputs, outputs, params):
        self.p = inputs.shape[1]       #number of input features
        self.n_r = outputs.shape[1]    #size of output grid in rows
        self.n_c = outputs.shape[2]    #size of output grid in cols

        self.out_min = outputs.min()
        self.out_max = outputs.max()

        d = self.out_max - self.out_min
        self.out_min -= d / 98
        self.out_max -= d / 98

        outputs = (outputs - self.out_min) / (self.out_max - self.out_min)

        assert inputs.shape[0] == outputs.shape[0]

        nn = libfann.neural_net()
        #nn.create_standard_array((self.p, 50, 50, self.n_r*self.n_c))
        nn.create_shortcut_array((self.p, self.n_r*self.n_c))
        nn.set_learning_rate(.7)
        nn.set_activation_function_hidden(libfann.SIGMOID_SYMMETRIC)
        nn.set_activation_function_output(libfann.SIGMOID)

        data = libfann.training_data()
        data.set_train_data(inputs, outputs.reshape((-1, self.n_r*self.n_c)))

        #nn.train_on_data(data, 500, 10, .001)
        nn.cascadetrain_on_data(data, 15, 1, .001)

        nn.save('nn.net')
        nn.destroy()
开发者ID:bhumbers,项目名称:745approx,代码行数:31,代码来源:neural_approx.py


示例12: create_net

def create_net(layers, funcs):
	net = fann.neural_net()
	net.create_sparse_array(1, layers)
	net.set_activation_function_hidden(funcs[0])
	net.set_activation_function_output(funcs[1])

	return net
开发者ID:Verderey,项目名称:Classification_Attemption,代码行数:7,代码来源:demo_1.py


示例13: test

    def test(self, ann_file, test_file):
        """Test an artificial neural network."""
        if not os.path.isfile(ann_file):
            raise IOError("Cannot open %s (no such file)" % ann_file)
        if not os.path.isfile(test_file):
            raise IOError("Cannot open %s (no such file)" % test_file)

        # Get the prefix for the classification columns.
        try:
            dependent_prefix = self.config.data.dependent_prefix
        except:
            dependent_prefix = OUTPUT_PREFIX

        self.ann = libfann.neural_net()
        self.ann.create_from_file(ann_file)

        self.test_data = TrainData()
        try:
            self.test_data.read_from_file(test_file, dependent_prefix)
        except IOError as e:
            logging.error("Failed to process the test data: %s" % e)
            exit(1)

        logging.info("Testing the neural network...")
        fann_test_data = libfann.training_data()
        fann_test_data.set_train_data(self.test_data.get_input(),
            self.test_data.get_output())

        self.ann.test_data(fann_test_data)

        mse = self.ann.get_MSE()
        logging.info("Mean Square Error on test data: %f" % mse)
开发者ID:xieyanfu,项目名称:nbclassify,代码行数:32,代码来源:training.py


示例14: classify_image

    def classify_image(self, im_path, ann_path, config, codebookfile=None):
        """Classify an image file and return the codeword.

        Preprocess and extract features from the image `im_path` as defined
        in the configuration object `config`, and use the features as input
        for the neural network `ann_path` to obtain a codeword.
        If necessary the 'codebookfile' is used to create the codeword.
        """
        if not os.path.isfile(im_path):
            raise IOError("Cannot open %s (no such file)" % im_path)
        if not os.path.isfile(ann_path):
            raise IOError("Cannot open %s (no such file)" % ann_path)
        if 'preprocess' not in config:
            raise ConfigurationError("preprocess settings not set")
        if 'features' not in config:
            raise ConfigurationError("features settings not set")
        if codebookfile and not os.path.isfile(codebookfile):
            raise IOError("Cannot open %s (no such file)" % codebookfile)

        ann = libfann.neural_net()
        ann.create_from_file(str(ann_path))

        # Get the MD5 hash for the image.
        hasher = hashlib.md5()
        with open(im_path, 'rb') as fh:
            buf = fh.read()
            hasher.update(buf)

        # Get a hash that that is unique for this image/preprocess/features
        # combination.
        hashables = get_config_hashables(config)
        hash_ = combined_hash(hasher.hexdigest(),
            config.features, *hashables)

        if hash_ in self.cache:
            phenotype = self.cache[hash_]
        else:
            phenotyper = Phenotyper()
            phenotyper.set_image(im_path)
            if self.roi:
                phenotyper.set_roi(self.roi)
            phenotyper.set_config(config)
            phenotype = phenotyper.make()

            # Cache the phenotypes, in case they are needed again.
            self.cache[hash_] = phenotype

            # Convert phenotype to BagOfWords-code if necessary.
            use_bow = getattr(config.features['surf'], 'bow_clusters', False)
            if use_bow:
                with open(codebookfile, "rb") as cb:
                    codebook = load(cb)
                phenotype = get_bowcode_from_surf_features(phenotype, codebook)

        logging.debug("Using ANN `%s`" % ann_path)
        codeword = ann.run(phenotype)

        return codeword
开发者ID:naturalis,项目名称:nbclassify,代码行数:58,代码来源:classify.py


示例15: run_predictions

def run_predictions():

    import MySQLdb as mdb
    from pyfann import libfann
    #from datetime import date
    from network_functions import save_prediction

    mydate = ""

    con = None
    con = mdb.connect('localhost', 'root',
            'fil1202job', 'stock');

    with con:
        cur = con.cursor(mdb.cursors.DictCursor)
        cur1 = con.cursor()
        #
        # Get a list of all networks
        #
        cur.execute("SELECT a.id, a.group, b.ticker, b.predict_data, a.net_file FROM `network`.`network` a, network.net_group b where a.group = b.id;")
        rows = cur.fetchall()

        for row in rows:
            #
            # For each network get the training data - only most recent data at the moment
            #
            #seldate = "select latest_prediction from network.network where id = " + str(row["id"])
            #cur2.execute(seldate)
            #latestdate = cur2.fetchone()
            #latestdate1 = latestdate[0]

            #print latestdate1
            cur1.execute(row["predict_data"])
            for row1 in cur1.fetchall():
                #
                # Extract Date
                #
                mydate = row1[(len(row1) - 1)]
                row1b = list(row1)
                del row1b[(len(row1b) - 1)]
                #
                # Set up network
                #
                ann = libfann.neural_net()
                ann.create_from_file(row["net_file"])
                #
                # Run Prediction
                #
                print row1b
                print ann.run(row1b)
                prediction = ann.run(row1b)
                prediction = str(prediction).translate(None, '[]')
                #
                # Store results in db - Function
                #
                save_prediction(row["id"], mydate, prediction)

    calc_signals()
开发者ID:philmcc,项目名称:aistocks,代码行数:58,代码来源:network_functions.py


示例16: network

def network(inputsize,h1,h2,h3,h4,h5,h6,outputsize):
    connect_rate = 0.1
    ann = libfann.neural_net()
    ann.create_sparse_array(connect_rate,
                            (inputsize,
                             h1,h2,h3,h4,h5,h6,
                             outputsize))
    ann.set_activation_function_output(libfann.SIGMOID_SYMMETRIC_STEPWISE)
    return ann
开发者ID:haskellpostgresprogrammer,项目名称:python_files,代码行数:9,代码来源:libfannstuff.py


示例17: testNet

def testNet():
    data = libfann.training_data()
    data.read_train_from_file(test_file);

    ann = libfann.neural_net()
    ann.create_from_file(nn_file)

    ann.reset_MSE()
    ann.test_data(data)
    print("Mean square error: {0}".format(ann.get_MSE()));
开发者ID:jeffames-cs,项目名称:nnot,代码行数:10,代码来源:ann.py


示例18: firstTrain

	def firstTrain(self, checkin):
                print 'firstTrain!'
		# train_data = libfann.training_data()
		ann = libfann.neural_net()
		# ann.create_standard(nLAYERS, nINPUTS, nHIDDEN1, nHIDDEN2, nOUTPUTS)
		ann.create_standard_array((nINPUTS, nHIDDEN1, nHIDDEN2, nOUTPUTS))
		# log.info(checkin.get_inputs())
		ann.train(checkin.get_inputs(), [checkin.checkin_points])
		filename = self.netFileName()
		ann.save(filename)
开发者ID:fredtruman,项目名称:Squares-New-Model,代码行数:10,代码来源:models.py


示例19: train

def train(trainFile, layerNumber, neuronNumber, maxIteration):
    desiredError = 1e-2
    #maxIteration = 1000
    iterationBetweenReports = 100
    ann = libfann.neural_net()
    ann.create_standard_array(tuple(neuronNumber))
    ann.set_learning_rate(0.7)
    ann.set_activation_function_output(libfann.SIGMOID_SYMMETRIC_STEPWISE)
    ann.set_activation_function_hidden(libfann.SIGMOID_SYMMETRIC_STEPWISE)
    ann.train_on_file(trainFile, maxIteration, iterationBetweenReports, desiredError)
    ann.save(trainFile + ".net")
开发者ID:yangyanzhe,项目名称:ImageRetrival,代码行数:11,代码来源:imageRetrieval.py


示例20: test

def test(annFile, testFile):
    ann = libfann.neural_net()
    ann.create_from_file(annFile)
    resultFile = testFile + "result"
    with open(testFile) as testFileReader, open(resultFile, "w+") as resultFileWriter:
        for line in testFileReader:
            if len(line.strip()) > 0:
                tempResult = ann.run([float(x) for x in line.split()])
                tempImageClass = tempResult.index(max(tempResult))
#                resultFileWriter.write(str(tempImageClass) + "\n")
                resultFileWriter.write(" ".join([str(x) for x in tempResult]) + " " + str(tempImageClass) + "\n")
开发者ID:yangyanzhe,项目名称:ImageRetrival,代码行数:11,代码来源:imageRetrieval.py



注:本文中的pyfann.libfann.neural_net函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python libfann.training_data函数代码示例发布时间:2022-05-25
下一篇:
Python pyfaidx.Faidx类代码示例发布时间:2022-05-25
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap