• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python simulator.Simulator类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中simulator.Simulator的典型用法代码示例。如果您正苦于以下问题:Python Simulator类的具体用法?Python Simulator怎么用?Python Simulator使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了Simulator类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: run2

def run2(): #helps to find sweetspot for alpha, gammma values

    alphas = [0.1, 0.2, 0.4, 0.6, 0.8, 1.0]
    gammas = [0.1, 0.2, 0.4, 0.6, 0.8, 1.0]
    heatmap = []

    for i, alpha in enumerate(alphas):
        row = []
        for j, gamma in enumerate(gammas):
            e = Environment()
            a = e.create_agent(LearningAgent)
            a.alpha = alpha
            a.gamma = gamma

            e.set_primary_agent(a, enforce_deadline=True)
            sim = Simulator(e, update_delay=0.0, display=False)
            sim.run(n_trials=100)
            print "Successful journeys : {}".format(a.targetReachedCount)
            row.append(a.targetReachedCount / 100.0)
            #qstats.append(a.q_learn_stats())
        heatmap.append(row)

    print heatmap
    ax = sns.heatmap(heatmap, xticklabels=gammas, yticklabels=alphas, annot=True)
    ax.set(xlabel="gamma", ylabel="alpha")
    plt.show()
开发者ID:Suyyala,项目名称:machine-learning,代码行数:26,代码来源:agent.py


示例2: run_episode

 def run_episode(self, simulator=None):
     ''' Run a single episode for a maximum number of steps. '''
     if simulator == None:
         simulator = Simulator()
     state = simulator.get_state()
     states = [state]
     rewards = []
     actions = []
     end_ep = False
     act = self.action_policy(state)
     acts = [act]
     while not end_ep:
         action = self.policy(state, act)
         new_state, reward, end_ep, steps = simulator.take_action(action)
         new_act = self.action_policy(new_state)
         delta = reward - self.state_quality(state, act)
         if not end_ep:
             delta += (self.gamma**steps) * self.state_quality(new_state, new_act)
         self.tdiff += abs(delta)
         self.steps += 1.0
         state = new_state
         states.append(state)
         actions.append(action)
         rewards.append(reward)
         act = new_act
         acts.append(act)
     self.tdiffs.append(self.tdiff / self.steps)
     self.episodes += 1
     self.total += sum(rewards)
     self.returns.append(sum(rewards))
     return states, actions, rewards, acts
开发者ID:WarwickMasson,项目名称:aaai-platformer,代码行数:31,代码来源:learn.py


示例3: level_2

class level_2(unittest.TestCase):

    def setUp(self):
        self.game = Game()
        self.game.addSquare(Square(Color.blue, Direction.top, 2, 1))
        self.game.board.setColor(0, 1, Color.blue)
        self.game.addSquare(Square(Color.red, Direction.right, 0, 0))
        self.game.board.setColor(0, 2, Color.red)
        self.game.addSquare(Square(Color.grey, Direction.left, 1, 3))
        self.game.board.setColor(1, 1, Color.grey)

        self.simulator = Simulator(self.game)

    def test_goal(self):
        self.game.moveSquare(Color.red)
        self.assertTrue(not self.game.isDone())
        self.game.moveSquare(Color.red)
        self.assertTrue(not self.game.isDone())
        self.game.moveSquare(Color.blue)
        self.assertTrue(not self.game.isDone())
        self.game.moveSquare(Color.blue)
        self.assertTrue(not self.game.isDone())
        self.game.moveSquare(Color.grey)
        self.assertTrue(not self.game.isDone())
        self.game.moveSquare(Color.grey)
        self.assertTrue(self.game.isDone())

    def test_simulation(self):
        print self.simulator.find_solution()
开发者ID:haudren,项目名称:squares,代码行数:29,代码来源:tests.py


示例4: run

def run():
    """Run the agent for a finite number of trials."""

    # Set up environment and agent
    e = Environment()  # create environment (also adds some dummy traffic)
    a = e.create_agent(LearningAgent)  # create agent
    e.set_primary_agent(a, enforce_deadline=True)  # specify agent to track
    # NOTE: You can set enforce_deadline=False while debugging to allow longer trials

    # Now simulate it
    sim = Simulator(e, update_delay=0.00001, display=False)  # create simulator (uses pygame when display=True, if available)
    # NOTE: To speed up simulation, reduce update_delay and/or set display=False

    sim.run(n_trials=100)  # run for a specified number of trials
    # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line
    
    # Print summary #
    allPenalities = a.numberOfPenaltiesList
    allFailures = a.numberOfFailuresList
    numberOfTrials = float(len(allFailures))
    numberOfFailures = float(allFailures[-1])
    numberOfSuccess = numberOfTrials - numberOfFailures
    numberOfSuccessFirstHalf = ((numberOfTrials) / 2) - float(allFailures[len(allFailures)/2])
    numberOfSuccessSecondHalf = numberOfSuccess - numberOfSuccessFirstHalf
    print ("=================================================================================")
    print ("SUMMARY")
    print ("=================================================================================")
    print ("Total Penalities received = %3.2f" % (sum(allPenalities)))
    print ("\tPenalities received in the first half of trials  = %3.2f" % (sum(allPenalities[:len(allPenalities)/2])))
    print ("\tPenalities received in the second half of trials = %3.2f" % (sum(allPenalities[len(allPenalities)/2:])))
    print ("Success Rate: %3.2f%%" % (numberOfSuccess/numberOfTrials*100))
    print ("\tSuccess Rate of the first half : %3.2f%%" % (numberOfSuccessFirstHalf/(numberOfTrials/2)*100))
    print ("\tSuccess Rate of the second half: %3.2f%%" % (numberOfSuccessSecondHalf/(numberOfTrials/2)*100))
开发者ID:ZAZAZakari,项目名称:udacity,代码行数:33,代码来源:agent.py


示例5: run

def run(msg = ''):
    """Run the agent for a finite number of trials."""

    # set up environment and agent
    e = Environment()  # create environment (also adds some dummy traffic)
    a = e.create_agent(LearningAgent)  # create agent
    e.set_primary_agent(a, enforce_deadline=True)  # specify agent to track
    # NOTE: you can set enforce_deadline=False while debugging to allow longer trials

    # Now simulate it
    sim = Simulator(e, update_delay=0, display=False)  # create simulator (uses pygame when display=True, if available)
    # NOTE: to speed up simulation, reduce update_delay and/or set display=False

    sim.run(n_trials=100)  # run for a specified number of trials
    # NOTE: to quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line

    results = a.results
    average_cycles = mean([result[0] for result in results])
    average_reward = mean([result[1] for result in results])
    average_violations = mean([result[2] for result in results])
    # print '=' * 10, msg
    # print 'Average Cycles:', average_cycles
    # print 'Average Reward:', average_reward
    # print 'Average Violations:', average_violations

    return average_cycles, average_reward, average_violations
开发者ID:allanbreyes,项目名称:smartcab,代码行数:26,代码来源:agent.py


示例6: main

def main():
    drone = RealDrone()
    # controller = ConConController(drone=drone,
            # log=True)
    controller = SingleAxisController(drone=drone, log=True)
    sim = Simulator(drone=drone, controller=controller)
    sim.start()
开发者ID:EECampCaoCao,项目名称:EECampTeaching,代码行数:7,代码来源:real.py


示例7: run

def run():
    """Run the agent for a finite number of trials."""
    # create output file
    target_dir = os.path.dirname(os.path.realpath(__file__))
    target_path = os.path.join(target_dir, 'qlearning_tuning_report.txt')
    if not os.path.exists(target_dir):
        os.makedirs(target_dir)
	# loop the parameters
    for epsilon in [0.1, 0.5, 0.9]:
        for alpha in np.arange(0.1, 1, 0.2):
            for gamma in np.arange(0.1, 1, 0.2):
                print epsilon, alpha, gamma
                # Set up environment and agent
                e = Environment()  # create environment (also adds some dummy traffic)
                a = e.create_agent(QAgent, epsilon, alpha, gamma)  # create agent
                e.set_primary_agent(a, enforce_deadline=True)  # specify agent to track
				# NOTE: You can set enforce_deadline=False while debugging to allow longer trials

				# Now simulate it
                sim = Simulator(e, update_delay=0.001, display=False)  # create simulator (uses pygame when display=True, if available)
				# NOTE: To speed up simulation, reduce update_delay and/or set display=False
                sim.run(n_trials=100)  # run for a specified number of trials
                # get the count for the number of successful trials and average running time
                summary = sim.report()
                
                # write out the results
                try:
					with open(target_path, 'a') as f:
						f.write('epsilon {}, alpha {}, gamma {} : success {}, avg_time {}, total_reward {}\n'.format(epsilon, alpha, gamma, summary[0], summary[1], round(a.total_reward, 3)))
						f.close()
                except:
					raise
开发者ID:ibowen,项目名称:Machine-Learning-Engineer-Nanodegree,代码行数:32,代码来源:agent4.py


示例8: run

def run():
    """Run the agent for a finite number of trials."""
    random.seed(42)
    if False:#save output
        f = open('out', 'w')
    else:
        f = StringIO.StringIO()
    alphas = [0.1]
    gammas = [0.1]
    epsilons = [0.1]
    for alpha in alphas:
        for gamma in gammas:
            for epsilon in epsilons:
                # Set up environment and agent
                e = Environment()  # create environment (also adds some dummy traffic)
                a = e.create_agent(LearningAgent, alpha = alpha, epsilon = epsilon, gamma = gamma)  # create agent
                e.set_primary_agent(a, enforce_deadline=True)  # specify agent to track
                # NOTE: You can set enforce_deadline=False while debugging to allow longer trials

                # Now simulate it
                sim = Simulator(e, update_delay=0.0, display=False)  # create simulator (uses pygame when display=True, if available)
                # NOTE: To speed up simulation, reduce update_delay and/or set display=False

                sim.run(n_trials=100)  # run for a specified number of trials
                # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line
                f.write('Alpha: {} Gamma: {}, Epsilon: {}, RESULTS: {}\n'.format(alpha, gamma, epsilon, sum(a.history)))
                f.write('Number of states seen: {}\n'.format(len(a.Q)))
                f.write('History of results\n')
                f.write(str(a.history))
                f.write('State frequencies:\n')
                f.write('\n'.join(str(z) for z in sorted(a.s.items(), key=lambda x: x[1])))
                f.write('\n\n')
开发者ID:dgarwin,项目名称:machine-learning,代码行数:32,代码来源:agent.py


示例9: main

def main():
    """ Example: UnitXObjectの変数を保存し,取り出し,確認する.
    """
    from simulator import Simulator
    s = Simulator()
    UnitXObject.manager = s.get_manager()
    UnitXObject.scopes = s.get_scopes()
    
    # Regist part
    crr_scope = s.get_scopes().peek()
    crr_scope['x'] = UnitXObject(value=1.5, varname='x', is_none=False, unit=Unit(ex_numer=u'm', numer=u'cm', ex_denom=None, denom=None))
    crr_scope['y'] = UnitXObject(value=1500, varname='y', is_none=False, unit=Unit(ex_numer=u'm', numer=u'km', ex_denom=u'時', denom=u'分'))
    s.get_scopes().new_scope()
    
    # Find & Show part
    found_scope = s.get_scopes().peek().find_scope_of('x')
    Util.dump(s.get_scopes())

    # Checking equals()
    tmp_obj = UnitXObject(value=1.5, varname='x', is_none=False, unit=Unit(ex_numer=None, numer=u'cm', ex_denom=None, denom=None))
    print tmp_obj
    print crr_scope['x'] == tmp_obj

    # Clear part
    s.get_scopes().del_scope()
    s.get_scopes().del_scope()
    return Constants.EXIT_SUCCESS
开发者ID:supertask,项目名称:UnitX,代码行数:27,代码来源:unitx_object.py


示例10: run

def run(get_result = False, gm = 0.2, al = 0.5):
    """Run the agent for a finite number of trials."""
    if get_result:
        ## print for GridSearch
        print ("Running trial  for gamma = %.1f, alpha = %.1f" %(gm, al))

    # Set up environment and agent
    e = Environment()  # create environment (also adds some dummy traffic)
    a = e.create_agent(LearningAgent, gm = gm, al = al)  # create agent
    e.set_primary_agent(a, enforce_deadline=True)  # specify agent to track
    # NOTE: You can set enforce_deadline=False while debugging to allow longer trials

    # Now simulate it
    sim = Simulator(e, update_delay=0.0, display=False)  # create simulator (uses pygame when display=True, if available)
    # NOTE: To speed up simulation, reduce update_delay and/or set display=False

    n_trials = 100
    sim.run(n_trials=n_trials)  # run for a specified number of trials
    # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line

    print "average silly moves for the last 10 trials: ", np.average(a.silly_fq[-10])
    print "average risky moves for the last 10 trials: ", np.average(a.risk_fq[-10])


    """The Following Code is for GridSearch"""
    if get_result:
        summary = sim.rep.summary()
        rate = sum(summary[-1][-10:])/float(10)
        deadline = sum(summary[-2][-10:])/float(10)
        risk_fq = sum(a.risk_fq[-10:])
        print ("success_rate   for gamma = %.1f, alpha = %.1f is %.2f" %(gm, al, rate))
        print ("final_deadline for gamma = %.1f, alpha = %.1f is %.2f" %(gm, al, deadline))
        print ("risk_frequecy  for gamma = %.1f, alpha = %.1f is %d" %(gm, al, risk_fq))
        print
        return (rate, deadline, risk_fq)
开发者ID:HoijanLai,项目名称:MLND,代码行数:35,代码来源:agent.py


示例11: run

def run():
    """Run the agent for a finite number of trials."""

    # Set up environment and agent
    e = Environment()  # create environment (also adds some dummy traffic)
    a = e.create_agent(LearningAgent)  # create agent
    e.set_primary_agent(a, enforce_deadline=True)  # set agent to track

    # Now simulate it
    sim = Simulator(e, update_delay=5.0)  # reduce update_delay to speed up simulation
    sim.run(n_trials=100)  # press Esc or close pygame window to quit

    fig, ax = plt.subplots( nrows=1, ncols=1) 
    plt.xlabel('Order of trials')
    plt.ylabel('# of incured penalties')
    plt.title('Penalties')
    ax.plot(a.records)
    fig.savefig('penalties.png')

    fig, ax = plt.subplots( nrows=1, ncols=1) 
    plt.xlabel('Order of trials')
    plt.ylabel('# of rewards')
    plt.title('Rewards')
    ax.plot(a.rewards)
    fig.savefig('rewards.png')
开发者ID:ArthurLu,项目名称:MLND.Project4,代码行数:25,代码来源:agent.py


示例12: run

def run():
    """Run the agent for a finite number of trials."""
    successnum = dict()
    for i in range(10, 36,10):
        for j in range(40,71,10):
            for k in range(6,16,4):
                arguemns = (i/100.0, j/100.0, k/100.0)
                tenSucc = []
                for index in range(0, 5):
                    # Set up environment and agent
                    e = Environment()  # create environment (also adds some dummy traffic)
                    a = e.create_agent(LearningAgent,arguemns)  # create agent
                    e.set_primary_agent(a, enforce_deadline=True)  # specify agent to track
                    # NOTE: You can set enforce_deadline=False while debugging to allow longer trials

                    # Now simulate it
                    sim = Simulator(e, update_delay=0.001, display=False)  # create simulator (uses pygame when display=True, if available)
                    # NOTE: To speed up simulation, reduce update_delay and/or set display=False

                    sim.run(n_trials=100)  # run for a specified number of trials
                    # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line
                    tenSucc.append(e.success)
                successnum[arguemns] = tenSucc

    print(successnum)
开发者ID:danache,项目名称:ML_Udacity,代码行数:25,代码来源:agent.py


示例13: Launcher

class Launcher(object):
  
  def setup_logging(self):
    t = datetime.now()
    self.tstamp = '%d-%d-%d-%d-%d' % (t.year, t.month, t.day, t.hour, t.minute)
    fname = LOG_FILE_PATH + LOG_FILENAME + self.tstamp + '.log'    
    logging.basicConfig(filename=fname,level=logging.INFO,format=FORMAT)  
  
  def configure(self, p):
    print('constructing simulator')
    self.sim = Simulator(p['ins'], p['strat'], p['start_date'], p['end_date'], p['open_bal'], self.tstamp)

  def simulate(self):
    print('running simulator')
    start = clock()
    self.sim.run()
    end = clock()
    dur_str = 'seconds = %f' % (end - start)
    print(dur_str)
    logging.info('sim time = ' + dur_str)

  def report(self):
    print('plotting')
    start = clock()
    self.sim.plot()
    end = clock()
    dur_str = 'seconds = %f' % (end - start)
    print(dur_str)
    logging.info('plot time = ' + dur_str)

  def go(self, p):
    self.setup_logging()
    self.configure(p)
    self.simulate()
    self.report()
开发者ID:davidbarkhuizen,项目名称:simagora,代码行数:35,代码来源:launcher.py


示例14: run

def run():
    """Run the agent for a finite number of trials."""

    # Set up environment and agent
    e = Environment()  # create environment (also adds some dummy traffic)
    a = e.create_agent(LearningAgent)  # create agent
    e.set_primary_agent(a, enforce_deadline=True)  # specify agent to track
    # NOTE: You can set enforce_deadline=False while debugging to allow longer trials

    # Now simulate it
    sim = Simulator(e, update_delay=0, display=False)  # create simulator (uses pygame when display=True, if available)
    # NOTE: To speed up simulation, reduce update_delay and/or set display=False

    sim.run(n_trials=100)  # run for a specified number of trials
    # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line
    num_successes = np.sum(a.successes)
    last_failure = a.find_last_failure()
    total_penalty = a.cumulative_penalties
    avg_time_remaining = np.mean(a.all_times_remaining)

    print "Total number of successes: {}".format(num_successes)
    print "Failure last occurred at trial: {}".format(last_failure)
    print 'Total penalties incurred: {}'.format(total_penalty)
    print "Average time remaining: {}".format(avg_time_remaining)


    for state in a.state_q_dict:
        print state
        for action in a.state_q_dict[state]:
            print "Action: {}, Q: {:2f}".format(action,a.state_q_dict[state][action])

    print a.state_q_dict[('right','red',None,None,None)]
    
    return (num_successes,last_failure,total_penalty,avg_time_remaining)
开发者ID:btaborsky,项目名称:machine-learning,代码行数:34,代码来源:agent.py


示例15: main2

def main2():
  """
  Looks at which games are possible after a certain number of differences.
  """
  GAME_LENGTH = 16
  # possible_tuples[i] is the set of tuples for which there exists a game
  # whose ith element is that tuple
  possible_tuples = []
  for n in range(0, 2 ** GAME_LENGTH):
    g = int_to_game(n, GAME_LENGTH)
    sim = Simulator(g)
    assert sim.get_game_length() is not None, 'Non-terminating game: %s' % g
    t = 0
    while not sim.done():
      if t >= len(possible_tuples):
        possible_tuples.append(set())
      possible_tuples[t].add(tuple(sim.state))
      sim.step_forward()
      t += 1
    # Add ending tuple as well
    if t >= len(possible_tuples):
      possible_tuples.append(set())
    possible_tuples[t].add(tuple(sim.state))
  print 'Number of possible tuples after t steps:'
  for t in range(len(possible_tuples)):
    print '%d: %d' % (t, len(possible_tuples[t]))
开发者ID:mikemeko,项目名称:18.821-projects,代码行数:26,代码来源:pow_two_games.py


示例16: update

 def update(self):
     ''' Learn for a single episode. '''
     simulator = Simulator()
     state = simulator.get_state()
     act = self.action_policy(state)
     feat = self.action_features[act](state)
     end_episode = False
     traces = [
         np.zeros((BASIS_COUNT,)),
         np.zeros((BASIS_COUNT,)),
         np.zeros((BASIS_COUNT,))]
     while not end_episode:
         action = self.policy(state, act)
         state, reward, end_episode, _ = simulator.take_action(action)
         new_act = self.action_policy(state)
         new_feat = self.action_features[new_act](state)
         delta = reward + self.gamma * self.action_weights[new_act].dot(new_feat) - self.action_weights[act].dot(feat)
         for i in range(3):
             traces[i] *= self.lmb * self.gamma
         traces[act] += feat
         for i in range(3):
             self.action_weights[i] += self.alpha * delta * traces[i] / COEFF_SCALE
         act = new_act
         feat = new_feat
     return [reward]
开发者ID:WarwickMasson,项目名称:aaai-goal,代码行数:25,代码来源:learn.py


示例17: mainQ

def mainQ(_learning=True):
    # Set player types and logging if provided in command line
    if len(sys.argv) == 3:
        pair = (sys.argv[1], sys.argv[2])
    else:
        pair = None

    # Prompt players
    # Needs to be adapted to get define parameters
    player_pair = promptPlayers(pair, _learning)
    # Create new game
    game = Game(player_pair)

    ######
    # Create new simulation
    # Flags:
    #   - debug: (True, False)
    sim = Simulator(game)


    ######
    # Run a simulation
    # Flags:
    # - tolerance=0.05 Epsilon tolerance to being testing.
    # - n_test=0  Number of test to be conducted after training

    sim.run(tolerance=0.001,n_test=100)
开发者ID:armandosrz,项目名称:UdacityNanoMachine,代码行数:27,代码来源:main.py


示例18: run

def run():
    """Run the agent for a finite number of trials."""

    options = parseOptions()

    env = Environment()  # create environment (also adds some dummy traffic)
    sim = Simulator(env, update_delay=0, display=options.display) # create simulator (uses pygame when display=True, if available)

    results = {}

    from settings import params
    for agent, symbol in [(options.player1, 1), (options.player2, -1)]:
        kwargs = params[agent]
        env.add_agent(
            symbol=symbol, 
            file=options.file, 
            clear=options.clear,
            save=options.save,
            **kwargs)

    sim.run(n_trials=options.iterations)  # run for a specified number of trials

    for agent in env.agents:
        results["X" if agent.symbol == 1 else 'O'] = agent.wins

    print results

    dispatcher.send(signal='main.complete', sender={})
开发者ID:mleonardallen,项目名称:machine-learning,代码行数:28,代码来源:main.py


示例19: run

def run():
    """Run the agent for a finite number of trials."""
    
    # create common place to set debug values
    dbg_deadline = True
    dbg_update_delay = 0.01
    dbg_display = False
    dbg_trials = 100 
    
    # create switches to run as random, way_light, way_light_vehicles
    # random = take random actions only
    # way_light_only = Traffic Light, Way Point
    # way_light_Vehicle = Traffic Light, Way Point, Left, Right, Oncoming
    # way_light_modified (or any other value) = Way Point, Combination Light and Vehicle State
    dbg_runtype = 'way_light_only'

    # Set up environment and agent
    e = Environment()  # create environment (also adds some dummy traffic)
    a = e.create_agent(LearningAgent)  # create agent
    # set the run type (random choice, simple state, state with vehicles)
    a.run_type = dbg_runtype
    e.set_primary_agent(a, enforce_deadline=dbg_deadline)  # specify agent to track
    # NOTE: You can set enforce_deadline=False while debugging to allow longer trials

    # Now simulate it
    sim = Simulator(e, update_delay=dbg_update_delay, display=dbg_display)  # create simulator (uses pygame when display=True, if available)
    # NOTE: To speed up simulation, reduce update_delay and/or set display=False

    sim.run(n_trials=dbg_trials)  # run for a specified number of trials
    # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line

    # at the end of the simulation show results
    # call qlearner reset to get last trial result
    a.q_learner.reset(a.step_count)
    a.q_learner.show_results()
开发者ID:escolmebartlebooth,项目名称:mlnd-5-smartcab,代码行数:35,代码来源:agent.py


示例20: start_puzzle

  def start_puzzle(self):
    while(not self.solved):
      while(True):
        response_code = self.code
        for i, l in enumerate(self.lines):
          clear()
          put_text(self.lesson)
          print_code(response_code, "\nThe code currently is:")
          resp = int(get_text('Place the line \'%s\': ' % l))
          response_code = self.process_input(resp, l, response_code)
        threads = self.translator(response_code)
        simulator = Simulator(threads, self.predicate, self.semaphores, self.poll_rate)
        success, message = simulator.run_sim()
        simulator.visualize()
        if success:
          put_text('Simulator test Passed!')
        else:  
          put_text('Simulator test Failed!')
        put_text(message)
        get_text('Check against the real answer? (y/n)')

        clear()
        put_text(self.lesson)
        print_code(response_code, "\nThe code currently is:")

        if(response_code == self.answer):
          put_text("Congratulations! That's correct. Good job!\n")
          break
        else:
          get_text("Woops! That's incorrect. Try again? (y/n)\n")
      self.solved = True
开发者ID:dchen741,项目名称:TaDa,代码行数:31,代码来源:puzzle.py



注:本文中的simulator.Simulator类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python simuvex.SimState类代码示例发布时间:2022-05-27
下一篇:
Python simulation_parameters.parameter_storage函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap