Module pacai.agents.capture.reflex
Expand source code
import logging
import random
import time
from pacai.agents.capture.capture import CaptureAgent
from pacai.util import util
class ReflexCaptureAgent(CaptureAgent):
"""
A base class for reflex agents that chooses score-maximizing actions.
"""
def __init__(self, index, **kwargs):
super().__init__(index, **kwargs)
def chooseAction(self, gameState):
"""
Picks among the actions with the highest return from `ReflexCaptureAgent.evaluate`.
"""
actions = gameState.getLegalActions(self.index)
start = time.time()
values = [self.evaluate(gameState, a) for a in actions]
logging.debug('evaluate() time for agent %d: %.4f' % (self.index, time.time() - start))
maxValue = max(values)
bestActions = [a for a, v in zip(actions, values) if v == maxValue]
return random.choice(bestActions)
def getSuccessor(self, gameState, action):
"""
Finds the next successor which is a grid position (location tuple).
"""
successor = gameState.generateSuccessor(self.index, action)
pos = successor.getAgentState(self.index).getPosition()
if (pos != util.nearestPoint(pos)):
# Only half a grid position was covered.
return successor.generateSuccessor(self.index, action)
else:
return successor
def evaluate(self, gameState, action):
"""
Computes a linear combination of features and feature weights.
"""
features = self.getFeatures(gameState, action)
weights = self.getWeights(gameState, action)
stateEval = sum(features[feature] * weights[feature] for feature in features)
return stateEval
def getFeatures(self, gameState, action):
"""
Returns a dict of features for the state.
The keys match up with the return from `ReflexCaptureAgent.getWeights`.
"""
successor = self.getSuccessor(gameState, action)
return {
'successorScore': self.getScore(successor)
}
def getWeights(self, gameState, action):
"""
Returns a dict of weights for the state.
The keys match up with the return from `ReflexCaptureAgent.getFeatures`.
"""
return {
'successorScore': 1.0
}
Classes
class ReflexCaptureAgent (index, **kwargs)
-
A base class for reflex agents that chooses score-maximizing actions.
Expand source code
class ReflexCaptureAgent(CaptureAgent): """ A base class for reflex agents that chooses score-maximizing actions. """ def __init__(self, index, **kwargs): super().__init__(index, **kwargs) def chooseAction(self, gameState): """ Picks among the actions with the highest return from `ReflexCaptureAgent.evaluate`. """ actions = gameState.getLegalActions(self.index) start = time.time() values = [self.evaluate(gameState, a) for a in actions] logging.debug('evaluate() time for agent %d: %.4f' % (self.index, time.time() - start)) maxValue = max(values) bestActions = [a for a, v in zip(actions, values) if v == maxValue] return random.choice(bestActions) def getSuccessor(self, gameState, action): """ Finds the next successor which is a grid position (location tuple). """ successor = gameState.generateSuccessor(self.index, action) pos = successor.getAgentState(self.index).getPosition() if (pos != util.nearestPoint(pos)): # Only half a grid position was covered. return successor.generateSuccessor(self.index, action) else: return successor def evaluate(self, gameState, action): """ Computes a linear combination of features and feature weights. """ features = self.getFeatures(gameState, action) weights = self.getWeights(gameState, action) stateEval = sum(features[feature] * weights[feature] for feature in features) return stateEval def getFeatures(self, gameState, action): """ Returns a dict of features for the state. The keys match up with the return from `ReflexCaptureAgent.getWeights`. """ successor = self.getSuccessor(gameState, action) return { 'successorScore': self.getScore(successor) } def getWeights(self, gameState, action): """ Returns a dict of weights for the state. The keys match up with the return from `ReflexCaptureAgent.getFeatures`. """ return { 'successorScore': 1.0 }
Ancestors
- CaptureAgent
- BaseAgent
- abc.ABC
Subclasses
Static methods
def loadAgent(name, index, args={})
-
Inherited from:
CaptureAgent
.loadAgent
Load an agent with the given class name. The name can be fully qualified or just the bare class name. If the bare name is given, the class should …
Methods
def chooseAction(self, gameState)
-
Picks among the actions with the highest return from
ReflexCaptureAgent.evaluate()
.Expand source code
def chooseAction(self, gameState): """ Picks among the actions with the highest return from `ReflexCaptureAgent.evaluate`. """ actions = gameState.getLegalActions(self.index) start = time.time() values = [self.evaluate(gameState, a) for a in actions] logging.debug('evaluate() time for agent %d: %.4f' % (self.index, time.time() - start)) maxValue = max(values) bestActions = [a for a, v in zip(actions, values) if v == maxValue] return random.choice(bestActions)
def evaluate(self, gameState, action)
-
Computes a linear combination of features and feature weights.
Expand source code
def evaluate(self, gameState, action): """ Computes a linear combination of features and feature weights. """ features = self.getFeatures(gameState, action) weights = self.getWeights(gameState, action) stateEval = sum(features[feature] * weights[feature] for feature in features) return stateEval
def final(self, gameState)
-
Inherited from:
CaptureAgent
.final
Inform the agent about the result of a game.
def getAction(self, gameState)
-
Inherited from:
CaptureAgent
.getAction
Calls
CaptureAgent.chooseAction
on a grid position, but continues on partial positions. If you subclassCaptureAgent
, you shouldn't need to … def getCurrentObservation(self)
-
Inherited from:
CaptureAgent
.getCurrentObservation
Returns the GameState object corresponding this agent's current observation (the observed state of the game - this may not include all of your …
def getFeatures(self, gameState, action)
-
Returns a dict of features for the state. The keys match up with the return from
ReflexCaptureAgent.getWeights()
.Expand source code
def getFeatures(self, gameState, action): """ Returns a dict of features for the state. The keys match up with the return from `ReflexCaptureAgent.getWeights`. """ successor = self.getSuccessor(gameState, action) return { 'successorScore': self.getScore(successor) }
def getFood(self, gameState)
-
Inherited from:
CaptureAgent
.getFood
Returns the food you're meant to eat. This is in the form of a
Grid
wherem[x][y] = True
if there is food you can eat (based on … def getFoodYouAreDefending(self, gameState)
-
Inherited from:
CaptureAgent
.getFoodYouAreDefending
Returns the food you're meant to protect (i.e., that your opponent is supposed to eat). This is in the form of a
Grid
where `m[x][y] … def getMazeDistance(self, pos1, pos2)
-
Inherited from:
CaptureAgent
.getMazeDistance
Returns the distance between two points using the builtin distancer.
def getOpponents(self, gameState)
-
Inherited from:
CaptureAgent
.getOpponents
Returns agent indices of your opponents. This is the list of the numbers of the agents (e.g., red might be 1, 3, 5)
def getPreviousObservation(self)
-
Inherited from:
CaptureAgent
.getPreviousObservation
Returns the
AbstractGameState
object corresponding to the last state this agent saw. That is the observed state of the game … def getScore(self, gameState)
-
Inherited from:
CaptureAgent
.getScore
Returns how much you are beating the other team by in the form of a number that is the difference between your score and the opponents score. This …
def getSuccessor(self, gameState, action)
-
Finds the next successor which is a grid position (location tuple).
Expand source code
def getSuccessor(self, gameState, action): """ Finds the next successor which is a grid position (location tuple). """ successor = gameState.generateSuccessor(self.index, action) pos = successor.getAgentState(self.index).getPosition() if (pos != util.nearestPoint(pos)): # Only half a grid position was covered. return successor.generateSuccessor(self.index, action) else: return successor
def getTeam(self, gameState)
-
Inherited from:
CaptureAgent
.getTeam
Returns agent indices of your team. This is the list of the numbers of the agents (e.g., red might be the list of 1,3,5)
def getWeights(self, gameState, action)
-
Returns a dict of weights for the state. The keys match up with the return from
ReflexCaptureAgent.getFeatures()
.Expand source code
def getWeights(self, gameState, action): """ Returns a dict of weights for the state. The keys match up with the return from `ReflexCaptureAgent.getFeatures`. """ return { 'successorScore': 1.0 }
def observationFunction(self, state)
-
Inherited from:
CaptureAgent
.observationFunction
Make an observation on the state of the game. Called once for each round of the game.
def registerInitialState(self, gameState)
-
Inherited from:
CaptureAgent
.registerInitialState
This method handles the initial setup of the agent and populates useful fields, such as the team the agent is on and the …
def registerTeam(self, agentsOnTeam)
-
Inherited from:
CaptureAgent
.registerTeam
Fills the self.agentsOnTeam field with a list of the indices of the agents on your team.