本文整理汇总了Python中search.aStarSearch函数的典型用法代码示例。如果您正苦于以下问题:Python aStarSearch函数的具体用法?Python aStarSearch怎么用?Python aStarSearch使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了aStarSearch函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: foodHeuristic
def foodHeuristic(state, problem):
"""
Your heuristic for the FoodSearchProblem goes here.
This heuristic must be consistent to ensure correctness. First, try to come up
with an admissible heuristic; almost all admissible heuristics will be consistent
as well.
If using A* ever finds a solution that is worse uniform cost search finds,
your heuristic is *not* consistent, and probably not admissible! On the other hand,
inadmissible or inconsistent heuristics may find optimal solutions, so be careful.
The state is a tuple ( pacmanPosition, foodGrid ) where foodGrid is a
Grid (see game.py) of either True or False. You can call foodGrid.asList()
to get a list of food coordinates instead.
If you want access to info like walls, capsules, etc., you can query the problem.
For example, problem.walls gives you a Grid of where the walls are.
If you want to *store* information to be reused in other calls to the heuristic,
there is a dictionary called problem.heuristicInfo that you can use. For example,
if you only want to count the walls once and store that value, try:
problem.heuristicInfo['wallCount'] = problem.walls.count()
Subsequent calls to this heuristic can access problem.heuristicInfo['wallCount']
"""
position, foodGrid = state
"*** YOUR CODE HERE ***"
maxDist = 0
for food in foodGrid.asList():
prob = PositionSearchProblem(problem.startingGameState, start=position, goal=food, warn=False, visualize=False)
dist = len(search.aStarSearch(prob,manhattanHeuristic))
if dist > maxDist:
maxDist = dist
return maxDist
开发者ID:larabear,项目名称:AI-Pacman-project,代码行数:34,代码来源:searchAgents.py
示例2: findPathToClosestDot
def findPathToClosestDot(self, gameState):
"Returns a path (a list of actions) to the closest dot, starting from gameState"
# Here are some useful elements of the startState
startPosition = gameState.getPacmanPosition()
food = gameState.getFood()
walls = gameState.getWalls()
problem = AnyFoodSearchProblem(gameState)
"*** YOUR CODE HERE ***"
"""listFoodDistance = [0]
for i in range(0, food.width):
for j in range(0, food.height):
if food[i][j] == True:
#listFoodDistance.append(abs(position[0] - i) + abs(position[1] - j))
listFoodDistance.append((i,j), (mazeDistance(startPosition, (i,j), gameState)))
closestFood = startPosition
minDist = food.width * food.height
for food in listFoodDistance:
if food[1] < minDist:
closestFood = food[0]
minDist = food[1]
search.aStarSearch()
print listFoodDistance"""
return search.aStarSearch(problem)
开发者ID:acihla,项目名称:artificialintelligencealgs,代码行数:25,代码来源:searchAgents.py
示例3: getAction
def getAction(self, state):
"""
From game.py:
The Agent will receive a GameState and must return an action from
Directions.{North, South, East, West, Stop}
"""
if len(self.answer) > 0:
answer = self.answer[0]
self.answer = self.answer[1:]
return answer
else:
self.time = 1
if state.getFood().count() <= 20 and self.time == 1:
problem = FoodSearchProblem(state)
self.answer = search.aStarSearch(problem, foodHeuristic)
answer = self.answer[0]
self.answer = self.answer[1:]
return answer
problem = AnyFoodSearchProblem(state)
self.answer = search.bfs(problem)
answer = self.answer[0]
self.answer = self.answer[1:]
return answer
开发者ID:NobodyInAmerica,项目名称:PacMan-AI,代码行数:28,代码来源:searchAgents.py
示例4: pathToClosestFood
def pathToClosestFood(self, gameState, cRegion, fRegion):
problem = ApproximateSearchProblem(gameState, close_region=cRegion, far_region=fRegion)
"*** YOUR CODE HERE ***"
action = search.aStarSearch(problem, ApproximateHeuristic)
print "Actions: " + str(action)
return action
开发者ID:jeremyrios,项目名称:CS188-Artifical-Intelligence,代码行数:7,代码来源:searchAgents.py
示例5: getAction
def getAction(self, state):
"""
From game.py:
The Agent will receive a GameState and must return an action from
Directions.{North, South, East, West, Stop}
"""
return search.aStarSearch(self.prob, foodHeuristic)
开发者ID:omerzk,项目名称:AI,代码行数:7,代码来源:searchAgents.py
示例6: getAction
def getAction(self, state):
"""
From game.py:
The Agent will receive a GameState and must return an action from
Directions.{North, South, East, West, Stop}
"""
problem = FoodSearchProblem(state)
return search.aStarSearch(problem, approxHeuristic)[0]
开发者ID:BazzalSeed,项目名称:webDevelopment,代码行数:8,代码来源:searchAgents.py
示例7: findPathToClosestDot
def findPathToClosestDot(self, gameState):
"Returns a path (a list of actions) to the closest dot, starting from gameState"
# Here are some useful elements of the startState
startPosition = gameState.getPacmanPosition()
food = gameState.getFood()
walls = gameState.getWalls()
problem = AnyFoodSearchProblem(gameState)
return search.aStarSearch(problem)
开发者ID:tomselvi,项目名称:188-proj1,代码行数:8,代码来源:searchAgents.py
示例8: getAction
def getAction(self, state):
"""
From game.py:
The Agent will receive a GameState and must return an action from
Directions.{North, South, East, West, Stop}
"""
"*** YOUR CODE HERE ***"
problem = FoodSearchProblem(state)
return search.aStarSearch(problem)
开发者ID:Ray-Zhang,项目名称:artificial_intelligence_practice,代码行数:9,代码来源:searchAgents.py
示例9: registerInitialState
def registerInitialState(self, state):
"This method is called before any moves are made."
problem = CornersProblem(state)
answer = search.aStarSearch(problem, cornersHeuristic)
self.answer = answer
self.secondAnswer = []
self.time = 0
self.initialFoodCount = state.getFood().count()
开发者ID:NobodyInAmerica,项目名称:PacMan-AI,代码行数:9,代码来源:searchAgents.py
示例10: test_tetris
def test_tetris(ntrial=10, lookahead=1, heuristic=evaluate_state, watchGames=False, verbose=False):
"""
Test harness
"""
if lookahead < 1:
print "Bad Lookahead! Please pick 1 for no lookahead, 2 for 1-piece, etc..."
return
else:
print "Lookahead: " + str(lookahead - 1) + " pieces"
if verbose:
print "Verbose Printing Enabled"
else:
print "Verbose Printing Disabled"
if watchGames:
print "Game Replay Enabled"
else:
print "Game Replay Disabled"
total_lines = []
for i in range(ntrial):
problem = TetrisSearchProblem(lookahead=lookahead,verbose=verbose)
current_node = None
# Game loop: keep playing the game until all of the pieces are done
while current_node is None or len(current_node["pieces"]) > 0:
game_replay, goal_node = search.aStarSearch(problem, heuristic)
current_node = goal_node
if watchGames:
for grid in game_replay:
print_grid(grid)
sleep(0.2)
sleep(2)
lines_cleared = 0
for j in range(len(game_replay)-1):
before = max(get_height_list(game_replay[j]))
after = max(get_height_list(game_replay[j+1]))
if after < before:
lines_cleared += before - after
print "Lines cleared: " + str(lines_cleared)
with open('gameLogs/trial_3'+str(i)+'_linesCleared='+str(lines_cleared)+'.txt', 'w') as fout:
for g in game_replay:
fout.write(str(g))
fout.write('\n')
break
#return # TODO: remove once we have a real goal state
total_lines.append(lines_cleared)
print "Lines by Game: " + str(total_lines)
print "Total Lines: " + str(sum(total_lines)) + " in " + str(ntrial) + " games."
开发者ID:saagar,项目名称:ai-tetris,代码行数:56,代码来源:algo.py
示例11: chooseAction
def chooseAction(self,gameState):
currObs = self.getCurrentObservation()
self.isPacman = currObs.getAgentState(self.index).isPacman
opponents = self.getOpponents(currObs)
self.visibleAgents= []
for x in opponents:
self.visibleAgents += [currObs.getAgentPosition(x)]
food = self.getFood(currObs)
capsules = self.getCapsules(currObs)
foodList= food.asList(True)
foodList+=capsules
defendedFood = self.getFoodYouAreDefending(currObs).asList(True)
mypos = gameState.getAgentState(self.index).getPosition()
#check and initialise a few variables only at the start of the game
if self.first:
self.allFood = len(foodList)
self.first = False
self.width = currObs.getWalls().width
self.height= currObs.getWalls().height
self.isRed = currObs.isOnRedTeam(self.index)
#goal = random.choice(food.asList(True))
self.foodLeft = len(foodList)
self.foodEaten = self.allFood - self.foodLeft
#CHOOSE GOAL Here
treshHold = self.foodLeft/3
#treshHold = 4
if self.foodEaten <=treshHold :
#while foodEaten is less than 5 keep eating
goal= self.closest(foodList,mypos)
elif self.isPacman :
#defend and return food
#goal = self.closest(currObs,defendedFood,mypos)
goal = self.getClosestGoal(currObs,mypos)
else:
#after touching base, return to eat more food
self.allFood-=self.foodEaten
self.foodEaten = 0
goal= self.closest(foodList,mypos)
#goal = random.choice(food.asList(True))
afsp = searchAgents.AnyFoodSearchProblem(currObs,self.index,food,goal,self.visibleAgents,opponents,self.getMazeDistance)
self.a = search.aStarSearch(afsp, searchAgents.manhattanHeuristic)
action = None
if len(self.a) != 0:
action = self.a.pop(0)
else:
action = random.choice(gameState.getLegalActions(self.index))
return action
开发者ID:Musket33rs,项目名称:Project2,代码行数:49,代码来源:cookieMonsters.py
示例12: findPathToClosestDot
def findPathToClosestDot(self, gameState):
"""
Returns a path (a list of actions) to the closest dot, starting from
gameState.
"""
# Here are some useful elements of the startState
startPosition = gameState.getPacmanPosition()
food = gameState.getFood()
walls = gameState.getWalls()
problem = AnyFoodSearchProblem(gameState)
"*** YOUR CODE HERE ***"
#util.raiseNotDefined()
actions = search.aStarSearch(problem)
return actions
开发者ID:gabrielfarah,项目名称:CS188-search,代码行数:15,代码来源:searchAgents.py
示例13: getAction
def getAction(self, gameState):
"""
From game.py:
The Agent will receive a GameState and must return an action from
Directions.{North, South, East, West, Stop}
"""
"*** YOUR CODE HERE ***"
startPosition = gameState.getPacmanPosition()
food = gameState.getFood()
walls = gameState.getWalls()
problem = AnyFoodSearchProblem(gameState)
actions = search.aStarSearch(problem)
return actions
开发者ID:TingtingHuang,项目名称:CSE-511A-Introduction-to-Artificial-Intelligence,代码行数:15,代码来源:searchAgents.py
示例14: registerInitialState
def registerInitialState(self, state):
self.searchFunction = lambda prob: search.aStarSearch(prob, foodHeuristic)
self.searchType = FoodSearchProblem
self.counter = 0
self.actions = []
currentState = state
while(currentState.getFood().count() > 0):
nextPathSegment = self.findPathToClosestDot(currentState) # The missing piece
self.actions += nextPathSegment
for action in nextPathSegment:
legal = currentState.getLegalActions()
if action not in legal:
t = (str(action), str(currentState))
raise Exception, 'findPathToClosestDot returned an illegal move: %s!\n%s' % t
currentState = currentState.generateSuccessor(0, action)
self.actionIndex = 0
开发者ID:Nickiller,项目名称:pacman,代码行数:16,代码来源:searchAgents.py
示例15: mazeDistanceAStar
def mazeDistanceAStar(point1, point2, gameState):
"""
Returns the maze distance between any two points, using the search functions
you have already built. The gameState can be any game state -- Pacman's position
in that state is ignored.
Example usage: mazeDistance( (2,4), (5,6), gameState)
This might be a useful helper function for your ApproximateSearchAgent.
"""
x1, y1 = point1
x2, y2 = point2
walls = gameState.getWalls()
assert not walls[x1][y1], 'point1 is a wall: ' + point1
assert not walls[x2][y2], 'point2 is a wall: ' + str(point2)
prob = PositionSearchProblem(gameState, start=point1, goal=point2, warn=False)
return len(search.aStarSearch(prob, manhattanHeuristic))
开发者ID:axelys,项目名称:CS188.1x_hw1,代码行数:17,代码来源:searchAgents.py
示例16: find_tetris
def find_tetris(problem):
"""
Continues until we find a tetris
"""
current_node = None
# Game loop: keep playing the game until all of the pieces are done
while current_node is None or len(current_node["pieces"]) > 0:
game_replay, goal_node = search.aStarSearch(problem, heuristic=evaluate_state)
current_node = goal_node
for grid in game_replay:
print_grid(grid)
print
sleep(1)
return # TODO: remove once we have a real goal state
开发者ID:saagar,项目名称:ai-tetris,代码行数:17,代码来源:algo.py
示例17: registerInitialState
def registerInitialState(self, state):
"""
This is the first time that the agent sees the layout of the game board. Here, we
choose a path to the goal. In this phase, the agent should compute the path to the
goal and store it in a local variable.
state: a GameState object (pacman.py)
"""
if self.searchFunction == None:
import sys
print "No search function provided for SearchAgent"
sys.exit(1)
# If you wrap your solution in the timing code provided, you'll know how long the pathfinding takes.
starttime = time.time()
self.searchFunction=lambda x: search.aStarSearch(x, getFoodHeuristic(state))
problem = self.searchType(state)
self.actions = deque(self.searchFunction(problem))
print 'Path found with total cost of %d in %.1f seconds' % (problem.getCostOfActions(self.actions), time.time() - starttime)
开发者ID:samighoche,项目名称:Six-Degrees-Of-Collaboration,代码行数:19,代码来源:actualdistances.py
示例18: findPathToClosestDot
def findPathToClosestDot(self, gameState):
"""
Returns a path (a list of actions) to the closest dot, starting from
gameState.
"""
# Here are some useful elements of the startState
startPosition = gameState.getPacmanPosition()
food = gameState.getFood()
walls = gameState.getWalls()
problem = AnyFoodSearchProblem(gameState)
"*** YOUR CODE HERE ***"
startPosition = gameState.getPacmanPosition()
food = gameState.getFood()
walls = gameState.getWalls()
problem = AnyFoodSearchProblem(gameState)
problem.goal = helper(startPosition, food.asList())
actions = search.aStarSearch(problem, manhattanHeuristic)
return actions
开发者ID:dyx0718,项目名称:Pac-Man,代码行数:19,代码来源:searchAgents.py
示例19: test2
def test2():
import time
t0 = time.time()
total = 0
for piece in PIECES:
b = Board(data='0,0,0,1,1,1,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,2,2,0,0,0,0,0;0,0,0,0,2,2,0,0,0,0;0,0,0,2,2,0,0,0,0,0;0,0,0,0,2,2,0,0,0,0;0,0,0,0,2,2,0,0,0,0;0,0,0,2,2,2,0,0,0,0;0,0,0,2,2,2,0,0,0,0')
base_piece = Piece(piece, right_rotations=0)
res = b.get_valid_positions(piece)
total += len(res)
for el in res:
b = Board(data='0,0,0,1,1,1,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,0,0,0,0,0,0,0;0,0,0,2,2,0,0,0,0,0;0,0,0,0,2,2,0,0,0,0;0,0,0,2,2,0,0,0,0,0;0,0,0,0,2,2,0,0,0,0;0,0,0,0,2,2,0,0,0,0;0,0,0,2,2,2,0,0,0,0;0,0,0,2,2,2,0,0,0,0')
b.place_piece(el[0], el[1], el[2])
start_loc = (3, -1)
problem = BoardSearchProblem(b, el[0]._type, el[0].right_rotations, (el[1], el[2]), base_piece.right_rotations, start_loc)
path = aStarSearch(problem, boardHeuristic)
print b
print path
print time.time() - t0
print total
开发者ID:mrtong96,项目名称:aigames_blockbattle,代码行数:21,代码来源:board.py
示例20: get_path
def get_path(self, start_piece, start_loc, end_piece, end_loc):
piece_type = start_piece._type
start_rot = start_piece.right_rotations
end_rot = end_piece.right_rotations
problem = BoardSearchProblem(self, piece_type, end_rot, end_loc, start_rot, start_loc)
backwards_path = aStarSearch(problem, boardHeuristic)
if backwards_path == 'Error':
return []
path_map = {'up': 'down', 'left': 'right', 'right':'left',\
'turnleft': 'turnright', 'turnright': 'turnleft'}
path = []
for action in backwards_path[::-1]:
path.append(path_map[action])
while path and path[-1] == 'down':
path.pop()
path.append('drop')
return path
开发者ID:mrtong96,项目名称:aigames_blockbattle,代码行数:23,代码来源:board.py
注:本文中的search.aStarSearch函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论