Module pacai.agents.ghost.directional
Expand source code
from pacai.agents.ghost.base import GhostAgent
from pacai.core.actions import Actions
from pacai.core import distance
from pacai.util import probability
class DirectionalGhost(GhostAgent):
"""
A ghost that prefers to rush Pacman, or flee when scared.
"""
def __init__(self, index, prob_attack = 0.8, prob_scaredFlee = 0.8, **kwargs):
super().__init__(index, **kwargs)
self.prob_attack = prob_attack
self.prob_scaredFlee = prob_scaredFlee
def getDistribution(self, state):
# Read variables from state.
ghostState = state.getGhostState(self.index)
legalActions = state.getLegalActions(self.index)
pos = state.getGhostPosition(self.index)
isScared = ghostState.isScared()
speed = 1
if (isScared):
speed = 0.5
actionVectors = [Actions.directionToVector(action, speed) for action in legalActions]
newPositions = [(pos[0] + action[0], pos[1] + action[1]) for action in actionVectors]
pacmanPosition = state.getPacmanPosition()
# Select best actions given the state.
distancesToPacman = [distance.manhattan(pos, pacmanPosition) for pos in newPositions]
if (isScared):
bestScore = max(distancesToPacman)
bestProb = self.prob_scaredFlee
else:
bestScore = min(distancesToPacman)
bestProb = self.prob_attack
zipActions = zip(legalActions, distancesToPacman)
bestActions = [action for action, distance in zipActions if distance == bestScore]
# Construct distribution.
dist = {}
for action in bestActions:
dist[action] = float(bestProb) / len(bestActions)
for action in legalActions:
if (action not in dist):
dist[action] = 0
dist[action] += float(1 - bestProb) / len(legalActions)
probability.normalize(dist)
return dist
Classes
class DirectionalGhost (index, prob_attack=0.8, prob_scaredFlee=0.8, **kwargs)
-
A ghost that prefers to rush Pacman, or flee when scared.
Expand source code
class DirectionalGhost(GhostAgent): """ A ghost that prefers to rush Pacman, or flee when scared. """ def __init__(self, index, prob_attack = 0.8, prob_scaredFlee = 0.8, **kwargs): super().__init__(index, **kwargs) self.prob_attack = prob_attack self.prob_scaredFlee = prob_scaredFlee def getDistribution(self, state): # Read variables from state. ghostState = state.getGhostState(self.index) legalActions = state.getLegalActions(self.index) pos = state.getGhostPosition(self.index) isScared = ghostState.isScared() speed = 1 if (isScared): speed = 0.5 actionVectors = [Actions.directionToVector(action, speed) for action in legalActions] newPositions = [(pos[0] + action[0], pos[1] + action[1]) for action in actionVectors] pacmanPosition = state.getPacmanPosition() # Select best actions given the state. distancesToPacman = [distance.manhattan(pos, pacmanPosition) for pos in newPositions] if (isScared): bestScore = max(distancesToPacman) bestProb = self.prob_scaredFlee else: bestScore = min(distancesToPacman) bestProb = self.prob_attack zipActions = zip(legalActions, distancesToPacman) bestActions = [action for action, distance in zipActions if distance == bestScore] # Construct distribution. dist = {} for action in bestActions: dist[action] = float(bestProb) / len(bestActions) for action in legalActions: if (action not in dist): dist[action] = 0 dist[action] += float(1 - bestProb) / len(legalActions) probability.normalize(dist) return dist
Ancestors
- GhostAgent
- BaseAgent
- abc.ABC
Static methods
def loadAgent(name, index, args={})
-
Inherited from:
GhostAgent
.loadAgent
Load an agent with the given class name. The name can be fully qualified or just the bare class name. If the bare name is given, the class should …
Methods
def final(self, state)
-
Inherited from:
GhostAgent
.final
Inform the agent about the result of a game.
def getAction(self, state)
-
Inherited from:
GhostAgent
.getAction
The BaseAgent will receive an
AbstractGameState
, and must return an action fromDirections
. def getDistribution(self, state)
-
Inherited from:
GhostAgent
.getDistribution
Returns a dictionary encoding a distribution over possible actions.
Expand source code
def getDistribution(self, state): # Read variables from state. ghostState = state.getGhostState(self.index) legalActions = state.getLegalActions(self.index) pos = state.getGhostPosition(self.index) isScared = ghostState.isScared() speed = 1 if (isScared): speed = 0.5 actionVectors = [Actions.directionToVector(action, speed) for action in legalActions] newPositions = [(pos[0] + action[0], pos[1] + action[1]) for action in actionVectors] pacmanPosition = state.getPacmanPosition() # Select best actions given the state. distancesToPacman = [distance.manhattan(pos, pacmanPosition) for pos in newPositions] if (isScared): bestScore = max(distancesToPacman) bestProb = self.prob_scaredFlee else: bestScore = min(distancesToPacman) bestProb = self.prob_attack zipActions = zip(legalActions, distancesToPacman) bestActions = [action for action, distance in zipActions if distance == bestScore] # Construct distribution. dist = {} for action in bestActions: dist[action] = float(bestProb) / len(bestActions) for action in legalActions: if (action not in dist): dist[action] = 0 dist[action] += float(1 - bestProb) / len(legalActions) probability.normalize(dist) return dist
def observationFunction(self, state)
-
Inherited from:
GhostAgent
.observationFunction
Make an observation on the state of the game. Called once for each round of the game.
def registerInitialState(self, state)
-
Inherited from:
GhostAgent
.registerInitialState
Inspect the starting state.