• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python utilities.fListToString函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中pybrain.utilities.fListToString函数的典型用法代码示例。如果您正苦于以下问题:Python fListToString函数的具体用法?Python fListToString怎么用?Python fListToString使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了fListToString函数的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: trainUntilConvergence

    def trainUntilConvergence(self, dataset=None, maxEpochs=None, verbose=None,
                              continueEpochs=10, validationProportion=0.25):
        """Train the module on the dataset until it converges.

        Return the module with the parameters that gave the minimal validation
        error.

        If no dataset is given, the dataset passed during Trainer
        initialization is used. validationProportion is the ratio of the dataset
        that is used for the validation dataset.

        If maxEpochs is given, at most that many epochs
        are trained. Each time validation error hits a minimum, try for
        continueEpochs epochs to find a better one."""
        epochs = 0
        if dataset == None:
            dataset = self.ds
        if verbose == None:
            verbose = self.verbose
        # Split the dataset randomly: validationProportion of the samples for
        # validation.
        trainingData, validationData = (
            dataset.splitWithProportion(1 - validationProportion))
        if not (len(trainingData) > 0 and len(validationData)):
            raise ValueError("Provided dataset too small to be split into training " +
                             "and validation sets with proportion " + str(validationProportion))
        self.ds = trainingData
        bestweights = self.module.params.copy()
        bestverr = self.testOnData(validationData)
        trainingErrors = []
        validationErrors = [bestverr]
        while True:
            trainingErrors.append(self.train())
            validationErrors.append(self.testOnData(validationData))
            if epochs == 0 or validationErrors[-1] < bestverr:
                # one update is always done
                bestverr = validationErrors[-1]
                bestweights = self.module.params.copy()

            if maxEpochs != None and epochs >= maxEpochs:
                self.module.params[:] = bestweights
                break
            epochs += 1

            if len(validationErrors) >= continueEpochs * 2:
                # have the validation errors started going up again?
                # compare the average of the last few to the previous few
                old = validationErrors[-continueEpochs * 2:-continueEpochs]
                new = validationErrors[-continueEpochs:]
                if min(new) > max(old):
                    self.module.params[:] = bestweights
                    break
        trainingErrors.append(self.testOnData(trainingData))
        self.ds = dataset
        if verbose:
            print 'train-errors:', fListToString(trainingErrors, 6)
            print 'valid-errors:', fListToString(validationErrors, 6)
        return trainingErrors, validationErrors
开发者ID:kaeufl,项目名称:pybrain,代码行数:58,代码来源:backprop.py


示例2: testSingleAction

 def testSingleAction(self):        
     r = self.runSequences(num_actions=1, r_states=map(array, [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]]),
                           num_interactions=1000, lr=0.1, _lambda=0.5, gamma=0.5)
     if self.verbose:
         for x, l in r:
             print x
             for a in l:
                 print fListToString(a, 2)        
     for _, l in r:
         self.assertAlmostEquals(min(l[0]), max(l[0]), places=0) 
         self.assertAlmostEquals(min(l[1]), max(l[1]), places=0)
         self.assertAlmostEquals(min(l[2]), max(l[2]), places=0)             
         self.assertAlmostEquals(max(l[3]) - 1, min(l[3]), places=0) 
开发者ID:DanSGraham,项目名称:School-Projects,代码行数:13,代码来源:linearfa.py


示例3: testSimple

 def testSimple(self):        
     r = self.runSequences(num_actions=3, num_features=5, num_states=4, num_interactions=2000,
                           lr=0.1, _lambda=0.5, gamma=0.5)        
     if self.verbose:
         for x, l in r:
             print x
             for a in l:
                 print fListToString(a[0], 2)        
     for _, l in r:
         self.assertAlmostEquals(min(l[0][0]), max(l[0][0]), places=0) 
         self.assertAlmostEquals(min(l[1][0]), max(l[1][0]), places=0)
         self.assertAlmostEquals(min(l[2][0]) + len(l[2][0]) - 1, max(l[2][0]), places=0)             
         self.assertAlmostEquals(min(l[3][0]), max(l[3][0]), places=0) 
开发者ID:DanSGraham,项目名称:School-Projects,代码行数:13,代码来源:linearfa.py


示例4: _evaluateSequence

 def _evaluateSequence(self, f, seq, verbose = False):
     """Return the ponderated MSE over one sequence."""
     totalError = 0.
     ponderation = 0.
     for input, target in seq:
         res = f(input)
         e = 0.5 * sum((target-res).flatten()**2)
         totalError += e
         ponderation += len(target)
         if verbose:
             print((    'out:    ', fListToString( list( res ) )))
             print((    'correct:', fListToString( target )))
             print((    'error: % .8f' % e))
     return totalError, ponderation
开发者ID:Angeliqe,项目名称:pybrain,代码行数:14,代码来源:supervised.py


示例5: _evaluateSequence

 def _evaluateSequence(self, f, seq, verbose = False):
     """ return the importance-ponderated MSE over one sequence. """
     totalError = 0
     ponderation = 0.
     for input, target, importance in seq:
         res = f(input)
         e = 0.5 * dot(importance.flatten(), ((target-res).flatten()**2))
         totalError += e
         ponderation += sum(importance)
         if verbose:
             print     'out:       ', fListToString(list(res))
             print     'correct:   ', fListToString(target)
             print     'importance:', fListToString(importance)
             print     'error: % .8f' % e
     return totalError, ponderation
开发者ID:ZachPhillipsGary,项目名称:CS200-NLP-ANNsProject,代码行数:15,代码来源:importance.py


示例6: testSingleStateFullDiscounted

 def testSingleStateFullDiscounted(self):
     r = self.runSequences(num_actions=4, num_features=3, num_states=1, num_interactions=500,
                           gamma=0, lr=0.25)
     if self.verbose:
         for x, l in r:
             print x
             for a in l:
                 print fListToString(a[0], 2)        
     for _, l in r:        
         self.assertAlmostEquals(min(l[0][0]), 1, places=0) 
         self.assertAlmostEquals(max(l[0][0]), 1, places=0) 
         self.assertAlmostEquals(2 * min(l[1][0]), 1, places=0) 
         self.assertAlmostEquals(2 * max(l[1][0]), 1, places=0) 
         self.assertAlmostEquals(min(l[2][0]), 0, places=0) 
         self.assertAlmostEquals(max(l[2][0]), len(l[2][0]) - 1, places=0) 
         self.assertAlmostEquals(min(l[3][0]), max(l[3][0]), places=0)                 
开发者ID:DanSGraham,项目名称:School-Projects,代码行数:16,代码来源:linearfa.py


示例7: _oneGeneration

    def _oneGeneration(self):
        self.oldPops.append(self.pop)
        self.generation += 1
        fitnesses = self._evaluatePopulation()
        # store best in hall of fame
        besti = argmax(array(fitnesses))
        best = self.pop[besti]
        bestFits = sorted(fitnesses)[::-1][:self._numSelected()]
        self.hallOfFame.append(best)
        self.hallOfFitnesses.append(bestFits)

        if self.verbose:
            print 'Generation', self.generation
            print '        relat. fits:', fListToString(sorted(fitnesses), 4)
            if len(best.params) < 20:
                print '        best params:', fListToString(best.params, 4)

        self.pop = self._selectAndReproduce(self.pop, fitnesses)
开发者ID:DanSGraham,项目名称:code,代码行数:18,代码来源:coevolution.py


示例8: trainUntilConvergence

    def trainUntilConvergence(self, dataset=None, maxEpochs=None, verbose=None,
                              continueEpochs=10, validationProportion=0.25):
        epochs = 0
        if dataset == None:
            dataset = self.ds
        if verbose == None:
            verbose = self.verbose
        trainingData, validationData = (
            dataset.splitWithProportion(1 - validationProportion))
        if not (len(trainingData) > 0 and len(validationData)):
            raise ValueError("Provided dataset too small to be split into training " +
                             "and validation sets with proportion " + str(validationProportion))
        self.ds = trainingData
        bestweights = self.module.params.copy()
        bestverr = self.testOnData(validationData)
        trainingErrors = []
        validationErrors = [bestverr]
        while True:
            trainingErrors.append(self.train())
            validationErrors.append(self.testOnData(validationData))
            if epochs == 0 or validationErrors[-1] < bestverr:
                bestverr = validationErrors[-1]
                bestweights = self.module.params.copy()

            if maxEpochs != None and epochs >= maxEpochs:
                self.module.params[:] = bestweights
                break
            epochs += 1

            if len(validationErrors) >= continueEpochs * 2:
                old = validationErrors[-continueEpochs * 2:-continueEpochs]
                new = validationErrors[-continueEpochs:]
                if min(new) > max(old):
                    self.module.params[:] = bestweights
                    break
        trainingErrors.append(self.testOnData(trainingData))
        self.ds = dataset
        if verbose:
            print 'train-errors:', fListToString(trainingErrors, 6)
            print 'valid-errors:', fListToString(validationErrors, 6)
        return trainingErrors, validationErrors
开发者ID:LuckyMagpie,项目名称:Cocktail,代码行数:41,代码来源:backprop.py


示例9: _updateShaping

 def _updateShaping(self):
     """ Daan: "This won't work. I like it!"  """
     assert self.numberOfCenters == 1
     possible = self.shapingFunction.getPossibleParameters(self.windowSize)
     matchValues = []
     pdfs = [multivariateNormalPdf(s, self.mus[0], self.sigmas[0])
             for s in self.samples]
     
     for p in possible:
         self.shapingFunction.setParameter(p)
         transformedFitnesses = self.shapingFunction(self.fitnesses)
         #transformedFitnesses /= sum(transformedFitnesses)
         sumValue = sum([x * log(y) for x, y in zip(pdfs, transformedFitnesses) if y > 0])
         normalization = sum([x * y for x, y in zip(pdfs, transformedFitnesses) if y > 0])
         matchValues.append(sumValue / normalization)
         
     
     self.shapingFunction.setParameter(possible[argmax(matchValues)])
     
     if len(self.allsamples) % 100 == 0:
         print possible[argmax(matchValues)]
         print fListToString(matchValues, 3)
开发者ID:ZachPhillipsGary,项目名称:CS200-NLP-ANNsProject,代码行数:22,代码来源:fem.py


示例10: trainUntilConvergence

    def trainUntilConvergence(self, dataset=None, maxEpochs=None, verbose=None,
                              continueEpochs=10, validationProportion=0.25,
                              trainingData=None, validationData=None,
                              convergence_threshold=10):
        """Train the module on the dataset until it converges.
        Return the module with the parameters that gave the minimal validation
        error.
        If no dataset is given, the dataset passed during Trainer
        initialization is used. validationProportion is the ratio of the dataset
        that is used for the validation dataset.

        If the training and validation data is already set, the splitPropotion is ignored
        If maxEpochs is given, at most that many epochs
        are trained. Each time validation error hits a minimum, try for
        continueEpochs epochs to find a better one."""
        epochs = 0
        if dataset is None:
            dataset = self.ds
        if verbose is None:
            verbose = self.verbose
        if trainingData is None or validationData is None:
            # Split the dataset randomly: validationProportion of the samples for
            # validation.
            trainingData, validationData = (
                dataset.splitWithProportion(1 - validationProportion))
        if not (len(trainingData) > 0 and len(validationData)):
            raise ValueError("Provided dataset too small to be split into training " +
                             "and validation sets with proportion " + str(validationProportion))
        self.ds = trainingData
        bestweights = self.module.params.copy()
        bestverr = self.testOnData(validationData)
        bestepoch = 0
        self.trainingErrors = []
        self.validationErrors = [bestverr]
        while True:
            trainingError = self.train()
            validationError = self.testOnData(validationData)
            if isnan(trainingError) or isnan(validationError):
                raise Exception("Training produced NaN results")
            self.trainingErrors.append(trainingError)
            self.validationErrors.append(validationError)
            if epochs == 0 or self.validationErrors[-1] < bestverr:
                # one update is always done
                bestverr = self.validationErrors[-1]
                bestweights = self.module.params.copy()
                bestepoch = epochs

            if maxEpochs != None and epochs >= maxEpochs:
                self.module.params[:] = bestweights
                break
            epochs += 1

            if len(self.validationErrors) >= continueEpochs * 2:
                # have the validation errors started going up again?
                # compare the average of the last few to the previous few
                old = self.validationErrors[-continueEpochs * 2:-continueEpochs]
                new = self.validationErrors[-continueEpochs:]
                if min(new) > max(old):
                    self.module.params[:] = bestweights
                    break
                lastnew = round(new[-1], convergence_threshold)
                if sum(round(y, convergence_threshold) - lastnew for y in new) == 0:
                    self.module.params[:] = bestweights
                    break
        #self.trainingErrors.append(self.testOnData(trainingData))
        self.ds = dataset
        if verbose:
            print(('train-errors:', fListToString(self.trainingErrors, 6)))
            print(('valid-errors:', fListToString(self.validationErrors, 6)))
        return self.trainingErrors[:bestepoch], self.validationErrors[:1 + bestepoch]
开发者ID:lbvienna,项目名称:compare_documents,代码行数:70,代码来源:ExtendedBackprop.py


示例11: BalanceTask


# any episodic task
task = BalanceTask()

# any neural network controller
net = buildNetwork(task.outdim, 1, task.indim)

# any optimization algorithm to be plugged in, for example:
# learner = CMAES(storeAllEvaluations = True)
# or:
learner = HillClimber(storeAllEvaluations = True)

# in a non-optimization case the agent would be a LearningAgent:
# agent = LearningAgent(net, ENAC())
# here it is an OptimizationAgent:
agent = OptimizationAgent(net, learner)

# the agent and task are linked in an Experiment
# and everything else happens under the hood.
exp = EpisodicExperiment(task, agent)
exp.doEpisodes(100)

print('Episodes learned from:', len(learner._allEvaluations))
n, fit = learner._bestFound()
print('Best fitness found:', fit)
print('with this network:')
print(n)
print('containing these parameters:')
print(fListToString(n.params, 4))
开发者ID:Angeliqe,项目名称:pybrain,代码行数:28,代码来源:optimizers_for_rl.py


示例12: CompetitiveCoevolution

    from pybrain.utilities import fListToString
    # TODO: convert to unittest
    C = CompetitiveCoevolution(None, [1, 2, 3, 4, 5, 6, 7, 8], populationSize=4)
    def b(x, y):
        C.allResults[(x, y)] = [1, 1, 1, []]
        C.allResults[(y, x)] = [-1, 1, -1, []]
        if x not in C.allOpponents:
            C.allOpponents[x] = []
        if y not in C.allOpponents:
            C.allOpponents[y] = []
        C.allOpponents[x].append(y)
        C.allOpponents[y].append(x)

    b(1, 6)
    b(1, 7)
    b(8, 1)
    b(5, 2)
    b(6, 2)
    b(8, 2)
    b(3, 5)
    b(3, 6)
    b(3, 7)
    b(4, 5)
    b(4, 7)
    b(8, 4)
    print(C.pop)
    print(C.parasitePop)
    print('          ', fListToString(C._competitiveSharedFitness(C.pop, C.parasitePop), 2))
    print('should be:', fListToString([0.83, 0.00, 1.33, 0.83], 2))

开发者ID:Boblogic07,项目名称:pybrain,代码行数:29,代码来源:competitivecoevolution.py


示例13: writeDoubles

 def writeDoubles(self, node, l, precision = 6):
     self.addTextNode(node, fListToString(l, precision)[2:-1])
开发者ID:Boblogic07,项目名称:pybrain,代码行数:2,代码来源:handling.py


示例14: BalanceTask


# any episodic task
task = BalanceTask()

# any neural network controller
net = buildNetwork(task.outdim, 1, task.indim)

# any optimization algorithm to be plugged in, for example:
# learner = CMAES(storeAllEvaluations = True)
# or:
learner = HillClimber(storeAllEvaluations = True)

# in a non-optimization case the agent would be a LearningAgent:
# agent = LearningAgent(net, ENAC())
# here it is an OptimizationAgent:
agent = OptimizationAgent(net, learner)

# the agent and task are linked in an Experiment
# and everything else happens under the hood.
exp = EpisodicExperiment(task, agent)
exp.doEpisodes(100)

print 'Episodes learned from:', len(learner._allEvaluations)
n, fit = learner._bestFound()
print 'Best fitness found:', fit
print 'with this network:'
print n
print 'containing these parameters:'
print fListToString(n.params, 4)
开发者ID:Boblogic07,项目名称:pybrain,代码行数:28,代码来源:optimizers_for_rl.py



注:本文中的pybrain.utilities.fListToString函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python utilities.percentError函数代码示例发布时间:2022-05-25
下一篇:
Python utilities.abstractMethod函数代码示例发布时间:2022-05-25
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap