• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python core.VGDLParser类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中vgdl.core.VGDLParser的典型用法代码示例。如果您正苦于以下问题:Python VGDLParser类的具体用法?Python VGDLParser怎么用?Python VGDLParser使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了VGDLParser类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: test6

def test6():
    """ Now with memory!"""
    from numpy import ndarray
    from examples.gridphysics.mazes import polarmaze_game
    from pybrain.optimization import SNES
    g = VGDLParser().parseGame(polarmaze_game)
    g.buildLevel(cheese_maze)
    game_env = GameEnvironment(g)
    net = buildNet(game_env.outdim, 10, 4, temperature=0.1, recurrent=True)
    
    algo = SNES(lambda x: someEpisodes(game_env, x, avgOver=6, maxSteps=30, exploretoo=False), net, verbose=True, desiredEvaluation=0.85)
    print algo.batchSize
    rows, cols = 2,3
    episodesPerStep = 5
    for i in range(rows*cols):
        pylab.subplot(rows, cols, i+1)
        algo.learn(episodesPerStep)
        if isinstance(algo.bestEvaluable, ndarray):
            net._setParameters(algo.bestEvaluable)
        else:
            net = algo.bestEvaluable
        plotBackground(game_env)    
        plotTrajectories(game_env, net)
        pylab.title(str((i+1)*episodesPerStep))
        if algo.desiredEvaluation <= algo.bestEvaluation:
            break
        print
    pylab.show()
开发者ID:chongdashu,项目名称:py-vgdl,代码行数:28,代码来源:nomodel_pomdp.py


示例2: plotLSPIValues

def plotLSPIValues(gametype, layout, discountFactor=0.9, useTD=False, showValue=False):
    # build the game
    g = VGDLParser().parseGame(gametype)
    g.buildLevel(layout)
    
    # transform into an MDP and the mapping to observations
    C = MDPconverter(g)
    Ts, R, fMap = C.convert()    
    
    # find the the best least-squares approximation to the policy,
    # given only observations, not the state information
    if useTD:
        # state-based
        _, Tlspi = LSTD_PI_policy(fMap, Ts, R, discountFactor=discountFactor)
    else:
        # state-action-based
        _, Tlspi = LSPI_policy(fMap, Ts, R, discountFactor=discountFactor)
    
    # evaluate the policy
    Vlspi = trueValues(Tlspi, R, discountFactor=discountFactor)
        
    # plot those values    
    featurePlot((g.width, g.height), C.states, Vlspi)
    
    if showValue:
        # expected discounted reward at initial state
        Vinit = Vlspi[C.initIndex()]
        pylab.xlabel("V0=%.4f"%Vinit)
开发者ID:chongdashu,项目名称:py-vgdl,代码行数:28,代码来源:model_pomdp.py


示例3: testAugmented

def testAugmented():
    from vgdl.core import VGDLParser
    from pybrain.rl.experiments.episodic import EpisodicExperiment
    from vgdl.mdpmap import MDPconverter
    from vgdl.agents import PolicyDrivenAgent
    
    
    zelda_level2 = """
wwwwwwwwwwwww
wA wwk1ww   w
ww  ww    1 w
ww     wwww+w
wwwww1ww  www
wwwww  0  Gww
wwwwwwwwwwwww
"""

    
    from examples.gridphysics.mazes.rigidzelda import rigidzelda_game
    g = VGDLParser().parseGame(rigidzelda_game)
    g.buildLevel(zelda_level2)
    env = GameEnvironment(g, visualize=False,
                          recordingEnabled=True, actionDelay=150)
    C = MDPconverter(g, env=env, verbose=True)
    Ts, R, _ = C.convert()
    print C.states
    print Ts[0]
    print R
    env.reset()
    agent = PolicyDrivenAgent.buildOptimal(env)
    env.visualize = True
    env.reset()
    task = GameTask(env)    
    exper = EpisodicExperiment(task, agent)
    exper.doEpisodes(1)
开发者ID:sarobe,项目名称:VGDLEntityCreator,代码行数:35,代码来源:interfaces.py


示例4: test4

def test4():
    from numpy import ndarray
    from examples.gridphysics.mazes import polarmaze_game
    from pybrain.optimization import SNES, WeightGuessing
    g = VGDLParser().parseGame(polarmaze_game)
    g.buildLevel(labyrinth2)
    game_env = GameEnvironment(g)
    net = buildNet(game_env.outdim, 5, 4, temperature=0.1, recurrent=False)
    
    algo = SNES(lambda x: someEpisodes(game_env, x, avgOver=3), net, verbose=True, desiredEvaluation=0.75)
    #algo = WeightGuessing(lambda x: someEpisodes(game_env, x), net, verbose=True, desiredEvaluation=0.78)
    rows, cols = 2,2
    episodesPerStep = 4
    for i in range(rows*cols):
        pylab.subplot(rows, cols, i+1)
        algo.learn(episodesPerStep)
        if isinstance(algo.bestEvaluable, ndarray):
            net._setParameters(algo.bestEvaluable)
        else:
            net = algo.bestEvaluable
        plotBackground(game_env)    
        plotTrajectories(game_env, net)
        pylab.title(str((i+1)*episodesPerStep))
        if algo.desiredEvaluation <= algo.bestEvaluation:
            break
        print
    pylab.show()
开发者ID:chongdashu,项目名称:py-vgdl,代码行数:27,代码来源:nomodel_pomdp.py


示例5: testRolloutVideo

def testRolloutVideo(actions=[0, 0, 2, 2, 0, 3] * 2):        
    from examples.gridphysics.mazes import polarmaze_game, maze_level_1
    from vgdl.core import VGDLParser
    from vgdl.tools import makeGifVideo
    game_str, map_str = polarmaze_game, maze_level_1
    g = VGDLParser().parseGame(game_str)
    g.buildLevel(map_str)
    makeGifVideo(GameEnvironment(g, visualize=True), actions)
开发者ID:sarobe,项目名称:VGDLEntityCreator,代码行数:8,代码来源:interfaces.py


示例6: testRollout

def testRollout(actions=[0, 0, 2, 2, 0, 3] * 20):        
    from examples.gridphysics.mazes import polarmaze_game, maze_level_1
    from vgdl.core import VGDLParser
    game_str, map_str = polarmaze_game, maze_level_1
    g = VGDLParser().parseGame(game_str)
    g.buildLevel(map_str)    
    env = GameEnvironment(g, visualize=True, actionDelay=100)
    env.rollOut(actions)
开发者ID:sarobe,项目名称:VGDLEntityCreator,代码行数:8,代码来源:interfaces.py


示例7: _createVGDLGame

def _createVGDLGame( gameSpec, levelSpec ):
    import uuid
    from vgdl.core import VGDLParser
    # parse, run and play.
    game = VGDLParser().parseGame(gameSpec)
    game.buildLevel(levelSpec)
    game.uiud = uuid.uuid4()
    return game
开发者ID:dylski,项目名称:py-vgdl,代码行数:8,代码来源:rlenvironment.py


示例8: test2

def test2():
    from examples.gridphysics.mazes import polarmaze_game, maze_level_1
    from vgdl.core import VGDLParser
    game_str, map_str = polarmaze_game, maze_level_1
    g = VGDLParser().parseGame(game_str)
    g.buildLevel(map_str)    
    actions = [1, 0, 0, 3, 0, 2, 0, 2, 0, 0, 0]
    env = GameEnvironment(g, visualize=True, actionDelay=100)
    env.rollOut(actions)
    env.reset()
    senv = SubjectiveGame(g, actionDelay=1500)
    senv.rollOut(actions)
开发者ID:sarobe,项目名称:VGDLEntityCreator,代码行数:12,代码来源:subjective.py


示例9: testStochMaze

def testStochMaze():
    from vgdl.core import VGDLParser
    from examples.gridphysics.mazes.stochastic import stoch_game, stoch_level
    g = VGDLParser().parseGame(stoch_game)
    g.buildLevel(stoch_level)
    C = MDPconverter(g, verbose=True)
    Ts, R, fMap = C.convert()
    print C.states
    print R
    for T in Ts:
        print T
    print fMap
开发者ID:sarobe,项目名称:VGDLEntityCreator,代码行数:12,代码来源:mdpmap.py


示例10: testMaze

def testMaze():
    from vgdl.core import VGDLParser
    from examples.gridphysics.mazes import polarmaze_game, maze_level_1
    game_str, map_str = polarmaze_game, maze_level_1
    g = VGDLParser().parseGame(game_str)
    g.buildLevel(map_str)
    C = MDPconverter(g, verbose=True)
    Ts, R, fMap = C.convert()
    print C.states
    print R
    for T in Ts:
        print T
    print fMap
开发者ID:sarobe,项目名称:VGDLEntityCreator,代码行数:13,代码来源:mdpmap.py


示例11: test4

def test4():
    """ Same thing, but animated. """
    from examples.gridphysics.mazes.windy import windy_stoch_game, windy_level
    from pybrain.rl.experiments.episodic import EpisodicExperiment
    from vgdl.interfaces import GameEnvironment, GameTask
    from vgdl.agents import PolicyDrivenAgent 
    g = VGDLParser().parseGame(windy_stoch_game)
    g.buildLevel(windy_level)
    env = GameEnvironment(g, visualize=True, actionDelay=100)
    task = GameTask(env)
    agent = PolicyDrivenAgent.buildOptimal(env)
    exper = EpisodicExperiment(task, agent)
    res = exper.doEpisodes(5)
    print res
开发者ID:chongdashu,项目名称:py-vgdl,代码行数:14,代码来源:model_mdp.py


示例12: testPolicyAgent

def testPolicyAgent():
    from pybrain.rl.experiments.episodic import EpisodicExperiment
    from vgdl.core import VGDLParser
    from examples.gridphysics.mazes import polarmaze_game, maze_level_2
    from vgdl.agents import PolicyDrivenAgent
    game_str, map_str = polarmaze_game, maze_level_2
    g = VGDLParser().parseGame(game_str)
    g.buildLevel(map_str)
    
    env = GameEnvironment(g, visualize=False, actionDelay=100)
    task = GameTask(env)
    agent = PolicyDrivenAgent.buildOptimal(env)
    env.visualize = True
    env.reset()
    exper = EpisodicExperiment(task, agent)
    res = exper.doEpisodes(2)
    print res
开发者ID:sarobe,项目名称:VGDLEntityCreator,代码行数:17,代码来源:interfaces.py


示例13: test1

def test1():
    from examples.gridphysics.mazes import polarmaze_game, maze_level_1    
    
    game_str, map_str = polarmaze_game, maze_level_1
    g = VGDLParser().parseGame(game_str)
    g.buildLevel(map_str)
    
    game_env = GameEnvironment(g)
    print 'number of observations:', game_env.outdim
    
    net = buildNet(game_env.outdim, 2, 2)
    for i in range(200):
        net.randomize()
        net.reset()
        print someEpisodes(game_env, net),
        if i% 20 == 19:
            print
开发者ID:chongdashu,项目名称:py-vgdl,代码行数:17,代码来源:nomodel_pomdp.py


示例14: test3

def test3():
    from examples.gridphysics.mazes import polarmaze_game
    from examples.gridphysics.mazes.simple import maze_level_1b
    from vgdl.core import VGDLParser
    from pybrain.rl.experiments.episodic import EpisodicExperiment
    from vgdl.interfaces import GameTask
    from vgdl.agents import InteractiveAgent, UserTiredException
    game_str, map_str = polarmaze_game, maze_level_1b
    g = VGDLParser().parseGame(game_str)
    g.buildLevel(map_str)    
    senv = SubjectiveGame(g, actionDelay=100, recordingEnabled=True)
    #senv = GameEnvironment(g, actionDelay=100, recordingEnabled=True, visualize=True)
    task = GameTask(senv)    
    iagent = InteractiveAgent()
    exper = EpisodicExperiment(task, iagent)
    try:
        exper.doEpisodes(1)
    except UserTiredException:
        pass
    print senv._allEvents
开发者ID:sarobe,项目名称:VGDLEntityCreator,代码行数:20,代码来源:subjective.py


示例15: plotOptimalValues

def plotOptimalValues(gametype, layout, discountFactor=0.9, showValue=False):
    # build the game
    g = VGDLParser().parseGame(gametype)
    g.buildLevel(layout)
    
    # transform into an MDP
    C = MDPconverter(g)
    Ts, R, _ = C.convert()
    
    # find the optimal policy
    _, Topt = policyIteration(Ts, R, discountFactor=discountFactor)
    
    # evaluate the policy
    Vopt = trueValues(Topt, R, discountFactor=discountFactor)
        
    # plot those values    
    featurePlot((g.width, g.height), C.states, Vopt, plotdirections=True)
    
    if showValue:
        # expected discounted reward at initial state
        Vinit = Vopt[C.initIndex()]
        pylab.xlabel("V0=%.4f"%Vinit)
开发者ID:chongdashu,项目名称:py-vgdl,代码行数:22,代码来源:model_mdp.py


示例16: testInteractions

def testInteractions():
    from random import randint
    from pybrain.rl.experiments.episodic import EpisodicExperiment
    from vgdl.core import VGDLParser
    from examples.gridphysics.mazes import polarmaze_game, maze_level_1    
    from pybrain.rl.agents.agent import Agent
    
    class DummyAgent(Agent):
        total = 4
        def getAction(self):
            res = randint(0, self.total - 1)
            return res    
        
    game_str, map_str = polarmaze_game, maze_level_1
    g = VGDLParser().parseGame(game_str)
    g.buildLevel(map_str)
    
    env = GameEnvironment(g, visualize=True, actionDelay=100)
    task = GameTask(env)
    agent = DummyAgent()
    exper = EpisodicExperiment(task, agent)
    res = exper.doEpisodes(2)
    print res
开发者ID:sarobe,项目名称:VGDLEntityCreator,代码行数:23,代码来源:interfaces.py


示例17: testRecordingToGif

def testRecordingToGif(human=False):
    from pybrain.rl.experiments.episodic import EpisodicExperiment
    from vgdl.core import VGDLParser
    from examples.gridphysics.mazes import polarmaze_game, maze_level_2
    from vgdl.agents import PolicyDrivenAgent, InteractiveAgent
    from vgdl.tools import makeGifVideo
    
    game_str, map_str = polarmaze_game, maze_level_2
    g = VGDLParser().parseGame(game_str)
    g.buildLevel(map_str)
    env = GameEnvironment(g, visualize=human, recordingEnabled=True, actionDelay=200)
    task = GameTask(env)
    if human:
        agent = InteractiveAgent()
    else:
        agent = PolicyDrivenAgent.buildOptimal(env)
    exper = EpisodicExperiment(task, agent)
    res = exper.doEpisodes(1)
    print res
    
    actions = [a for _, a, _ in env._allEvents]
    print actions
    makeGifVideo(env, actions, initstate=env._initstate)
开发者ID:sarobe,项目名称:VGDLEntityCreator,代码行数:23,代码来源:interfaces.py


示例18: test3

def test3():
    from examples.gridphysics.mazes.simple import office_layout_2, consistent_corridor
    from examples.gridphysics.mazes import polarmaze_game
    from pybrain.optimization import SNES
    g = VGDLParser().parseGame(polarmaze_game)
    g.buildLevel(consistent_corridor)
    game_env = GameEnvironment(g)
    net = buildNet(game_env.outdim, 4, 4, temperature=0.05, recurrent=False)
    
    algo = SNES(lambda x: someEpisodes(game_env, x), net, verbose=True, desiredEvaluation=0.78)
    rows, cols = 2,2
    episodesPerStep = 3
    for i in range(rows*cols):
        pylab.subplot(rows, cols, i+1)
        algo.learn(episodesPerStep)
        net._setParameters(algo.bestEvaluable)
        plotBackground(game_env)    
        plotTrajectories(game_env, net)
        pylab.title(str((i+1)*episodesPerStep))
        if algo.desiredEvaluation <= algo.bestEvaluation:
            break
        print
    pylab.show()
开发者ID:chongdashu,项目名称:py-vgdl,代码行数:23,代码来源:nomodel_pomdp.py


示例19: test2

def test2():
    from examples.gridphysics.mazes import polarmaze_game, maze_level_1    
    from pybrain.optimization import SNES
    
    game_str, map_str = polarmaze_game, maze_level_1
    g = VGDLParser().parseGame(game_str)
    g.buildLevel(map_str)
    game_env = GameEnvironment(g, actionDelay=100, recordingEnabled=True)
    net = buildNet(game_env.outdim, 6, 2)
    
    algo = SNES(lambda x: someEpisodes(game_env, x), net, verbose=True, desiredEvaluation=0.43)
    rows, cols = 3,3
    episodesPerStep = 2
    for i in range(rows*cols):
        pylab.subplot(rows, cols, i+1)
        algo.learn(episodesPerStep)
        net._setParameters(algo.bestEvaluable)
        plotBackground(game_env)    
        plotTrajectories(game_env, net)
        pylab.title(str((i+1)*episodesPerStep))
        if algo.desiredEvaluation <= algo.bestEvaluation:
            break
        print
    pylab.show()
开发者ID:chongdashu,项目名称:py-vgdl,代码行数:24,代码来源:nomodel_pomdp.py


示例20: testLoadSave

def testLoadSave():
    from vgdl.core import VGDLParser
    from examples.gridphysics.aliens import aliens_level, aliens_game
        
    map_str, game_str = aliens_level, aliens_game
    g = VGDLParser().parseGame(game_str)
    g.buildLevel(map_str)
    
    for _ in range(1000):
        s = g.getFullState()
        g.setFullState(s)
开发者ID:chongdashu,项目名称:py-vgdl,代码行数:11,代码来源:speed_test.py



注:本文中的vgdl.core.VGDLParser类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python vhost.VHost类代码示例发布时间:2022-05-26
下一篇:
Python vertx_tests.start_tests函数代码示例发布时间:2022-05-26
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap