• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python networkreader.NetworkReader类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中pybrain.tools.customxml.networkreader.NetworkReader的典型用法代码示例。如果您正苦于以下问题:Python NetworkReader类的具体用法?Python NetworkReader怎么用?Python NetworkReader使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了NetworkReader类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: runThirdStageClassifier

    def runThirdStageClassifier(self):
        out = []
        true = []
        #SingleBatIDToAdd = [1, 2, 3, 5, 6] # for single
        Correct = 0
        print "Loading Network.."
        net = NetworkReader.readFrom("C:\Users\Anoch\PycharmProjects\BatClassification\ThirdStageClassifier.xml")
        print "Loading feature data with SSC = 1 (Single call type)"
        minFreq, maxFreq, Durantion, fl1, fl2, fl3, fl4, fl5, fl6, fl7, fl8, fl9, fl10, pixelAverage, target, path = self.getDistrubedTestDataRUNVERSIONTSC()
        SAMPLE_SIZE = len(minFreq)
        for i in range(0, SAMPLE_SIZE):
            ClassifierOutput= net.activate([minFreq[i], maxFreq[i], Durantion[i], fl1[i], fl2[i], fl3[i], fl4[i], fl5[i], fl6[i], fl7[i], fl8[i], fl9[i], fl10[i], pixelAverage[i]])

            ClassifierOutputID = np.argmax(ClassifierOutput)
            currentTarget = self.convertIDSingleTSC(target[i])
            out.append(ClassifierOutputID)
            true.append(currentTarget)

            #MAPPING FROM BATID TO TSC value:
            TSC_value = ClassifierOutputID
            # Metadata Setup, get path and write: TSC = value
            ds = self.HDFFile[path[i]]
            ds.attrs["TSC"] = TSC_value
        self.HDFFile.flush()
        self.ConfusionMatrix =  self.CorrectRatio(out, true)
        return self.ConfusionMatrix
开发者ID:AnochjhnIruthayam,项目名称:BatClassification,代码行数:26,代码来源:ClassifierConnected.py


示例2: __init__

    def __init__(self, loadWeightsFromFile, filename):
        #neural network as function approximator
        #Initialize neural network
        if loadWeightsFromFile:
	    self.nn = NetworkReader.readFrom(filename)
	else:
	    self.nn = buildNetwork(NODE_INPUT, NODE_HIDDEN, NODE_OUTPUT, bias = True)
开发者ID:DiNAi,项目名称:nn2014-RL-atari,代码行数:7,代码来源:agent.py


示例3: buildNet

	def buildNet(self):
		print "Building a network..."
		if  os.path.isfile(self.path): 
			self.trained = True
 			return NetworkReader.readFrom(self.path) 
		else:
 			return buildNetwork(self.all_data.indim, self.d[self.path]['hidden_dim'], self.all_data.outdim, outclass=SoftmaxLayer)
开发者ID:davidlavy88,项目名称:FaceIdentifier,代码行数:7,代码来源:identify.py


示例4: runClassifier

 def runClassifier(self):
     out = []
     true = []
     #BatIDToAdd = [1, 2, 3, 5, 6, 10, 11, 12, 14, 8, 9] #1-14 are bats; 8 is noise; 9 is something else
     print "Loading Network.."
     net = NetworkReader.readFrom("SecondStageClassifier.xml")
     print "Loading feature data with FSC = 1 (Bat calls)"
     minFreq, maxFreq, Durantion, fl1, fl2, fl3, fl4, fl5, fl6, fl7, fl8, fl9, fl10, pixelAverage, target, path = self.getDistrubedTestDataRUNVERSION()
     SAMPLE_SIZE = len(minFreq)
     for i in range(0, SAMPLE_SIZE):
         ClassifierOutput = net.activate([minFreq[i], maxFreq[i], Durantion[i], fl1[i], fl2[i], fl3[i], fl4[i], fl5[i], fl6[i], fl7[i], fl8[i], fl9[i], fl10[i], pixelAverage[i]])
         ClassifierOutputID = np.argmax(ClassifierOutput)
         currentTarget = self.convertIDMultiSingle(target[i])
         out.append(ClassifierOutputID)
         true.append(currentTarget)
         #MAPPING FROM BATID TO TSC value:
         SSC_value = ClassifierOutputID
         # Metadata Setup, get path and write: TSC = value
         ds = self.HDFFile[path[i]]
         ds.attrs["SSC"] = SSC_value
     # Close HDF5 file to save to disk. This is also done to make sure the next stage classifier can open the file
     self.HDFFile.flush()
     self.HDFFile.close()
     self.ConfusionMatrix = self.CorrectRatio(out, true)
     return self.ConfusionMatrix
开发者ID:AnochjhnIruthayam,项目名称:BatClassification,代码行数:25,代码来源:ClassifierSecondStage.py


示例5: getPersistedData

 def getPersistedData(self, name):
     pathToData = self.relPathFromFilename(name)
     if os.path.isfile(pathToData):
         with open(pathToData, "rb") as f:
             data = pickle.load(f)
         if name == NEURAL_NET_DUMP_NAME:
             data.net = NetworkReader.readFrom(self.relPathFromFilename(name + DATA_DUMP_NN_EXT))
         return data
开发者ID:TanaySinghal,项目名称:SPCSAISelfDrivingCar,代码行数:8,代码来源:learning.py


示例6: testNets

def testNets():
    ds = SupervisedDataSet.loadFromFile('SynapsemonPie/boards')
    net20 = NetworkReader.readFrom('SynapsemonPie/synapsemon_primer20.xml') 
    net50 = NetworkReader.readFrom('SynapsemonPie/synapsemon_primer50.xml') 
    net80 = NetworkReader.readFrom('SynapsemonPie/synapsemon_primer80.xml') 
    net110 = NetworkReader.readFrom('SynapsemonPie/synapsemon_primer110.xml') 
    net140 = NetworkReader.readFrom('SynapsemonPie/synapsemon_primer140.xml') 
    trainer20 = BackpropTrainer(net20, ds)
    trainer50 = BackpropTrainer(net50, ds)
    trainer80 = BackpropTrainer(net80, ds)
    trainer110 = BackpropTrainer(net110, ds)
    trainer140 = BackpropTrainer(net140, ds)
    print trainer20.train()
    print trainer50.train()
    print trainer80.train()
    print trainer110.train()
    print trainer140.train()
开发者ID:johnny-zheng,项目名称:SynapsemonPy,代码行数:17,代码来源:primer_evaluation.py


示例7: main

def main():
    train_file = 'data/train.csv'
    # validation_file = 'data/validation.csv'
    output_model_file = 'model.xml'

    # hidden_size = 4
    epochs = 500

    # load data
    # def loadData():
    train = np.loadtxt(train_file, delimiter=' ')
    Input = train[0:,0:3]
    Output = train[0:,3:5]

    # validation = np.loadtxt(validation_file, delimiter=',')
    # train = np.vstack((train, validation))

    # x_train = train[:, 0:-1]
    # y_train = train[:, -1]
    # y_train = y_train.reshape(-1, 1)

    # input_size = x_train.shape[1]
    # target_size = y_train.shape[1]

    # prepare dataset
    # def prepare dataset(input_size, target_size):
    ds = SDS(Input,Output)
    # ds.addSample(input_size)
    # ds.setField('input', x_train)
    # ds.setField('target', y_train)

    # init and train
    # def initTrain(input_size, hidden_size, input, output):
    # net = buildNetwork(input_size, hidden_size, target_size, bias=True)
    net = buildNetwork(3,  # input layer
                                 4,  # hidden0
                                 2,  # output
                                 hiddenclass=SigmoidLayer,
                                 outclass=SigmoidLayer,
                                 bias=True
                                 )
    net = NetworkReader.readFrom('model.xml')
    for i,o in zip(Input,Output):
        ds.addSample(i,o)
        print i, o

    trainer = BackpropTrainer(net, ds)
        
    print "training for {} epochs...".format(epochs)

    for i in range(epochs):
        mse = trainer.train()
        rmse = sqrt(mse)
        print "training RMSE, epoch {}: {}".format(i + 1, rmse)
        if os.path.isfile("../stopfile.txt") == True:
            break
    
    NetworkWriter.writeToFile(net, output_model_file)
开发者ID:amaneureka,项目名称:iResQ,代码行数:58,代码来源:train.py


示例8: __init__

 def __init__(self):
     print "start a new instance"
     self.loaded=False
     self.has_data_source=False
     try:
         self.net=NetworkReader.readFrom('pickled_ANN')
         print "ANN has been found from an ash jar"
         self.loaded=True
     except IOError:
         print "ash jar is empty, use train() to start a new ANN"
开发者ID:lkong,项目名称:Pickle_ANN,代码行数:10,代码来源:NetFlow_ANN.py


示例9: nfq_action_value

def nfq_action_value(network_fname, state=[0, 0, 0, 0, 0]):
    # TODO generalize away from 9 action values. Ask the network how many
    # discrete action values there are.
    n_actions = 9
    network = NetworkReader.readFrom(network_fname)
    actionvalues = np.empty(n_actions)
    for i_action in range(n_actions):
        network_input = r_[state, one_to_n(i_action, n_actions)]
        actionvalues[i_action] = network.activate(network_input)
    return actionvalues
开发者ID:chrisdembia,项目名称:agent-bicycle,代码行数:10,代码来源:analysis.py


示例10: exoplanet_search

 def exoplanet_search(self,
                      find=default_find):
      """
      This method searches for exoplanets.
      The output will have the format:
          (exostar1_streak, exostar2_streak, ...)
      where an exostar is a star with an exoplanet, and a streak is
      a list of states in which the exostar was observed to have exoplanetary
      behaviour.
      At least 5 stars must be tracked.
      """
      stars, deleted = self.find_objects(find=find)
      print str(deleted / len(self.photos)) + "% of the data was ignored"
      """
      There must be an integer multiple of 5 stars
      in stars, and the stars must be grouped together in lumps
      of 5.
      """
      exostreaks = []
      net = NetworkReader.readFrom("../../Identifier/network.xml")
      for starnum in range(0, len(stars), 5):
          search_stars = stars[starnum: starnum + 5]
          start_time = search_stars[0].states[0].time
          stop_time = search_stars[0].states[-1].time
          for photonum in range(start_time, stop_time + 1, 10):
              print self.photos[photonum]
              photonum = min(photonum, stop_time - 10)
              intensities = []
              for slide in range(photonum, photonum + 10):
                  intensities.append([])
                  photo = self.photos[slide]
                  photo.load()
                  for star in search_stars:
                      state = star.track(slide)
                      brightness = photo.intensity(state.position, state.radius)
                      intensities[-1].append(brightness)
                  photo.close()
              inpt = []
              for starothernum in range(5):
                  lightcurve = []
                  for slides_from_zero in range(10):
                      lightcurve.append(intensities[slides_from_zero][starothernum])
                  array_version = array(lightcurve)
                  array_version /= average(array_version)
                  inpt += list(array_version)
              nnet_output = net.activate(tuple(inpt))
              for o in range(5):
                  if nnet_output[o] > 0.5:
                      exostreak = []
                      for slide in range(photonum, photonum + 10):
                          state = search_stars[o].track(slide)
                          exostreak.append(state)
                      exostreaks.append(exostreak)
      return exostreaks
开发者ID:Bushwallyta271828,项目名称:StarTracker,代码行数:54,代码来源:extract.py


示例11: load_network_from_file

    def load_network_from_file(self, filename):
        """Load Network from File

        Using a NetworkWriter written file, data from the saved network
        will be reconstituted into a new PathPlanningNetwork class.
        This is used to load saved networks.

        Arguments:
            filename: The filename of the saved xml file.
        """
        self._network = NetworkReader.readFrom(filename)

        return
开发者ID:evansneath,项目名称:surgicalsim,代码行数:13,代码来源:network.py


示例12: __init__

 def __init__(self, data, machineID, eta, lmda, netPath, input_size=30, epochs=20, train_str_index=1000, train_end_index=3000):
     '''
     Constructor
     '''
     self.data = data
     self.machineID = machineID
     self.eta = eta
     self.lmda = lmda
     self.INPUT_SIZE = input_size
     self.epochs = epochs
     self.str_train = train_str_index
     self.end_train = train_end_index
     self.net = NetworkReader.readFrom(netPath)
开发者ID:Manrich121,项目名称:ForecastingCloud,代码行数:13,代码来源:Rnn_model.py


示例13: trainNetwork

def trainNetwork():
	print "[Training] Network has Started..."
	inputSize = 0
	with open('file1.txt', 'r') as f:			#automatically closes file at the end of the block
  		#first_line = f.readline()
  		#inputSize = len(first_line)
		dataset = SupervisedDataSet(4, 1)	 #specify size of data and target
		f.seek(0) 							#Move back to beginnning of file
		#iterate through the file. 1 picture per line
		for line in f:
			mylist = json.loads(line)		#list object
	    	target = mylist[-1]				#retrieve and then delete the target classification
	    	del mylist[-2:]
	    	#print target
	    	dataset.addSample(tuple(mylist), (target,))
	        #print json.loads(line)
	if os.path.isfile('annModel.xml'):
		skynet = NetworkReader.readFrom('annModel.xml')#for use if individual sample files used
	else:
		skynet = buildNetwork(dataset.indim, 8, dataset.outdim, bias=True, hiddenclass=TanhLayer) #input,hidden,output
	#SoftmaxLayer, SigmoidLayer, LinearLayer, GaussianLayer
	#Note hidden neuron number is arbitrary, can try 1 or 4 or 3 or 5 if this methods doesnt work out
	trainer = BackpropTrainer(skynet, dataset,learningrate = 0.3, weightdecay = 0.01,momentum = 0.9)
	#trainer.trainUntilConvergence()
	for i in xrange(1000):
		trainer.train()
    #trainer.trainEpochs(1000)
    #Save the now trained neural network
	NetworkWriter.writeToFile(skynet,'annModel.xml')
	print "[Network] has been Written"

################## SVM Method #######################
#Change append method in write method for target persistence
	dataX = []
	datay = []
	with open(writeFile, 'r') as f:
		for line in f:
			mylist = json.loads(line)
			target2 = mylist[-1]
			dataX.append(mylist[:-2])
			datay.append(target2)
	#datay = [target2] * len(dataX) #Targets, size is n_samples, for use with indiviual sample files with same target
	print [target2]
	print dataX
	print datay
	clf = svm.LinearSVC()
	clf.fit(dataX,datay)
    #Persist the trained model
	joblib.dump(clf,'svmModel.pkl')
开发者ID:phalax4,项目名称:illumination,代码行数:49,代码来源:writeUnit.py


示例14: __init__

 def __init__(self, data, machineID, netPath, eta, lmda, input_size=30, epochs=20, train_str_index=1000, train_end_index=3000):
     '''
     Constructor
     '''
     self.cpuData = data[0]
     self.memData = data[1]
     self.machineID = machineID
     self.eta = eta
     self.lmda = lmda
     self.INPUT_SIZE = input_size
     self.epochs = epochs
     self.str_train = train_str_index
     self.end_train = train_end_index
     self.net = NetworkReader.readFrom(netPath)
     
     self.memForecasts = np.genfromtxt("d:/data/memory_fnn/"+machineID.replace("cpu", "memory"),delimiter=',').ravel()
开发者ID:Manrich121,项目名称:ForecastingCloud,代码行数:16,代码来源:Entwine_model.py


示例15: LoadNetwork

    def LoadNetwork(self):
        """
        Loading network dump from file.
        """
        FCLogger.debug('Loading network from PyBrain xml-formatted file...')
        net = None

        if os.path.exists(self.networkFile):
            net = NetworkReader.readFrom(self.networkFile)

            FCLogger.info('Network loaded from dump-file: {}'.format(os.path.abspath(self.networkFile)))

        else:
            FCLogger.warning('{} - file with Neural Network configuration not exist!'.format(os.path.abspath(self.networkFile)))

        self.network = net
开发者ID:chrinide,项目名称:FuzzyClassificator,代码行数:16,代码来源:PyBrainLearning.py


示例16: xforecast

 def xforecast(self):
     net = NetworkReader.readFrom('xtrainedinfo.xml')
     activate_in = []
     with open('xtraindata.csv') as tf:
         xforecast = []
         for line in tf:
             data = [x for x in line.strip().split(',') if x]
             for i in range(1, 10):
                 activate_in.append(float(data[i]))
             # print activate_in
             if float(net.activate((activate_in))) > 4.84e-06:
                 xforecast.append(2)
             elif float(net.activate((activate_in))) > 3.5e-06:
                 xforecast.append(1)
             else:
                 xforecast.append(0)
             activate_in = []
     return xforecast
开发者ID:casyazmon,项目名称:mars_city,代码行数:18,代码来源:xplot.py


示例17: perceptron

def perceptron(hidden_neurons=20, weightdecay=0.01, momentum=0.1):
    INPUT_FEATURES = 200
    CLASSES = 15
    HIDDEN_NEURONS = hidden_neurons
    WEIGHTDECAY = weightdecay
    MOMENTUM = momentum
    
    g = generate_data()
    alldata = g['d']
    testdata = generate_Testdata(g['index'])['d']
    #tstdata, trndata = alldata.splitWithProportion(0.25)
    #print type(tstdata)

    trndata = _convert_supervised_to_classification(alldata,CLASSES)
    tstdata = _convert_supervised_to_classification(testdata,CLASSES)
    trndata._convertToOneOfMany()  
    tstdata._convertToOneOfMany()
    #fnn = buildNetwork(trndata.indim, HIDDEN_NEURONS, trndata.outdim,outclass=SoftmaxLayer)
    fnn = NetworkReader.readFrom('GCM(200+70.87).xml')
    trainer = BackpropTrainer(fnn, dataset=trndata, momentum=MOMENTUM,verbose=True, weightdecay=WEIGHTDECAY,learningrate=0.01)
    result = 0;
    ssss = 0;
    for i in range(1):
        #trainer.trainEpochs(1)
        trnresult = percentError(trainer.testOnClassData(),trndata['class'])
        tstresult = percentError(trainer.testOnClassData(dataset=tstdata), tstdata['class'])
        out = fnn.activateOnDataset(tstdata)
        ssss = out
        out = out.argmax(axis=1)
        result = out
    df = pd.DataFrame(ssss)
    df.to_excel("GCMout.xls")
    df = pd.DataFrame(result)
    df.insert(1,'1',tstdata['class'])
    df.to_excel("GCM.xls")
    error = 0;
    for i in range(len(tstdata['class'])):
        if tstdata['class'][i] != result[i]:
            error = error+1
    #print (len(tstdata['class'])-error)*1.0/len(tstdata['class'])*100
    print AAC(result,tstdata['class'])
    print AUC(np.transpose(tstdata['class'])[0],result.transpose())
    print Fscore(np.transpose(tstdata['class'])[0],result.transpose())
    NetworkWriter.writeToFile(fnn, 'GCM.xml')
开发者ID:Guosmilesmile,项目名称:pythonstudy,代码行数:44,代码来源:GCMrf.py


示例18: improve_network

def improve_network(trainer=default_trainer, transit=default_transit):
    """
    Author: Xander
    This function improves an existing neural net
    capable of detecting exoplanets in lightcurves.
    It writes the network to network.xml
    The input, output pairs should be of the 
    format generate() generates them in.
    A good rule-of-thumb for telling whether the network detects an exoplanet
    is to see if the output is above 0.5.
    """
    print "Retreiving network..."
    net = NetworkReader.readFrom("../network.xml")
    print "Retreiving current performance..."
    f = open("../network_info.txt")
    first_line = f.readlines()[0]
    best_fraction = float(first_line.split("%")[0])
    f.close()
    train_network(net, best_fraction, trainer=trainer, transit=transit)
开发者ID:Bushwallyta271828,项目名称:ClassifierNet,代码行数:19,代码来源:classifier.py


示例19: __init__

    def __init__(self,df=0.9):
        self.inputSize = 80
        self.hiddenSize = 100
        self.outputSize = 1
        self.df = df

        if (os.path.isfile("nn/neural-network.xml")):
            ##print("Loading Network from file")
            self.net = NetworkReader.readFrom('nn/neural-network.xml')
            self.ds = SupervisedDataSet(self.inputSize, self.outputSize)
            self.loadDataSet()
            self.trainer = BackpropTrainer(self.net, self.ds)
        else:
            print("Building Network")
            self.net = buildNetwork(self.inputSize,self.hiddenSize,self.outputSize, bias=True)
            self.ds = SupervisedDataSet(self.inputSize, self.outputSize)
            self.loadDataSet()
            self.trainer = BackpropTrainer(self.net, self.ds)
            self.train()
            self.saveNet()
开发者ID:vascobailao,项目名称:PYTHON,代码行数:20,代码来源:pyBrainNN.py


示例20: runFirstStageClassifier

 def runFirstStageClassifier(self):
     out = []
     true = []
     BatIDToAdd = [1, 2, 3, 5, 6, 10, 11, 12, 14, 8, 9] #1-14 are bats; 8 is noise; 9 is something else
     print "Loading Network.."
     net = NetworkReader.readFrom("C:\Users\Anoch\PycharmProjects\BatClassification\FirstStageClassifier.xml")
     print "Loading feature data..."
     minFreq, maxFreq, Durantion, fl1, fl2, fl3, fl4, fl5, fl6, fl7, fl8, fl9, fl10, pixelAverage, target, path = self.getDistrubedTestDataRUNVERSIONFSC(BatIDToAdd)
     SAMPLE_SIZE = len(minFreq)
     for i in range(0, SAMPLE_SIZE):
         ClassifierOutput = net.activate([minFreq[i], maxFreq[i], Durantion[i], fl1[i], fl2[i], fl3[i], fl4[i], fl5[i], fl6[i], fl7[i], fl8[i], fl9[i], fl10[i]])
         ClassifierOutputID = np.argmax(ClassifierOutput)
         currentTarget = self.convertIDFSC(target[i])
         out.append(ClassifierOutputID)
         true.append(currentTarget)
         #MAPPING FROM BATID TO TSC value:
         FSC_value = ClassifierOutputID
         # Metadata Setup, get path and write: TSC = value
         ds = self.HDFFile[path[i]]
         ds.attrs["FSC"] = FSC_value
         ds.attrs["SSC"] = -1
         ds.attrs["TSC"] = -1
     # Close HDF5 file to save to disk. This is also done to make sure the next stage classifier can open the file
     self.HDFFile.flush()
开发者ID:AnochjhnIruthayam,项目名称:BatClassification,代码行数:24,代码来源:ClassifierConnected.py



注:本文中的pybrain.tools.customxml.networkreader.NetworkReader类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python networkwriter.NetworkWriter类代码示例发布时间:2022-05-25
下一篇:
Python customxml.NetworkWriter类代码示例发布时间:2022-05-25
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap