Skip to content

Commit

Permalink
Example agent teams updated
Browse files Browse the repository at this point in the history
  • Loading branch information
jsego committed Nov 26, 2024
1 parent f6aa277 commit 03e04a2
Show file tree
Hide file tree
Showing 3 changed files with 149 additions and 42 deletions.
6 changes: 3 additions & 3 deletions agents/team_name_1/my_team.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,9 @@
import random
import util

from captureAgents import CaptureAgent
from capture_agents import CaptureAgent
from game import Directions
from util import nearestPoint
from util import nearest_point


#################
Expand Down Expand Up @@ -104,7 +104,7 @@ def get_successor(self, game_state, action):
"""
successor = game_state.generate_successor(self.index, action)
pos = successor.get_agent_state(self.index).get_position()
if pos != nearestPoint(pos):
if pos != nearest_point(pos):
# Only half a grid position was covered
return successor.generate_successor(self.index, action)
else:
Expand Down
6 changes: 3 additions & 3 deletions agents/team_name_2/my_team.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,9 @@
import random
import util

from captureAgents import CaptureAgent
from capture_agents import CaptureAgent
from game import Directions
from util import nearestPoint
from util import nearest_point


#################
Expand Down Expand Up @@ -104,7 +104,7 @@ def get_successor(self, game_state, action):
"""
successor = game_state.generate_successor(self.index, action)
pos = successor.get_agent_state(self.index).get_position()
if pos != nearestPoint(pos):
if pos != nearest_point(pos):
# Only half a grid position was covered
return successor.generate_successor(self.index, action)
else:
Expand Down
179 changes: 143 additions & 36 deletions agents/team_template/my_team.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# my_team.py
# ---------
# baseline_team.py
# ---------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
Expand All @@ -12,16 +12,28 @@
# Pieter Abbeel ([email protected]).


# baseline_team.py
# ---------------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero ([email protected]) and Dan Klein ([email protected]).
# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html

import random
from captureAgents import CaptureAgent
import util

from capture_agents import CaptureAgent
from game import Directions
from util import nearest_point


#################
# Team creation #
#################

def create_team(first_index, second_index, is_red,
first='DummyAgent', second='DummyAgent', num_training=0):
first='OffensiveReflexAgent', second='DefensiveReflexAgent', num_training=0):
"""
This function should return a list of two agents that will form the
team, initialized using firstIndex and secondIndex as their agent
Expand All @@ -36,55 +48,150 @@ def create_team(first_index, second_index, is_red,
any extra arguments, so you should make sure that the default
behavior is what you want for the nightly contest.
"""

# The following line is an example only; feel free to change it.
return [eval(first)(first_index), eval(second)(second_index)]


##########
# Agents #
##########

class DummyAgent(CaptureAgent):
class ReflexCaptureAgent(CaptureAgent):
"""
A Dummy agent to serve as an example of the necessary agent structure.
You should look at baseline_team.py for more details about how to
create an agent as this is the bare minimum.
"""
A base class for reflex agents that choose score-maximizing actions
"""

def __init__(self, index, time_for_computing=.1):
super().__init__(index, time_for_computing)
self.start = None

def register_initial_state(self, game_state):
self.start = game_state.get_agent_position(self.index)
CaptureAgent.register_initial_state(self, game_state)

def choose_action(self, game_state):
"""
This method handles the initial setup of the
agent to populate useful fields (such as what team
we're on).
Picks among the actions with the highest Q(s,a).
"""
actions = game_state.get_legal_actions(self.index)

A distanceCalculator instance caches the maze distances
between each pair of positions, so your agents can use:
self.distancer.getDistance(p1, p2)
# You can profile your evaluation time by uncommenting these lines
# start = time.time()
values = [self.evaluate(game_state, a) for a in actions]
# print 'eval time for agent %d: %.4f' % (self.index, time.time() - start)

IMPORTANT: This method may run for at most 15 seconds.
"""
max_value = max(values)
best_actions = [a for a, v in zip(actions, values) if v == max_value]

'''
Make sure you do not delete the following line. If you would like to
use Manhattan distances instead of maze distances in order to save
on initialization time, please take a look at
CaptureAgent.register_initial_state in capture_agents.py.
'''
CaptureAgent.register_initial_state(self, game_state)
food_left = len(self.get_food(game_state).as_list())

'''
Your initialization code goes here, if you need any.
'''
if food_left <= 2:
best_dist = 9999
best_action = None
for action in actions:
successor = self.get_successor(game_state, action)
pos2 = successor.get_agent_position(self.index)
dist = self.get_maze_distance(self.start, pos2)
if dist < best_dist:
best_action = action
best_dist = dist
return best_action

def choose_action(self, game_state):
return random.choice(best_actions)

def get_successor(self, game_state, action):
"""
Picks among actions randomly.
Finds the next successor which is a grid position (location tuple).
"""
actions = game_state.get_legal_actions(self.index)
successor = game_state.generate_successor(self.index, action)
pos = successor.get_agent_state(self.index).get_position()
if pos != nearest_point(pos):
# Only half a grid position was covered
return successor.generate_successor(self.index, action)
else:
return successor

def evaluate(self, game_state, action):
"""
Computes a linear combination of features and feature weights
"""
features = self.get_features(game_state, action)
weights = self.get_weights(game_state, action)
return features * weights

def get_features(self, game_state, action):
"""
Returns a counter of features for the state
"""
features = util.Counter()
successor = self.get_successor(game_state, action)
features['successor_score'] = self.get_score(successor)
return features

def get_weights(self, game_state, action):
"""
Normally, weights do not depend on the game state. They can be either
a counter or a dictionary.
"""
return {'successor_score': 1.0}


class OffensiveReflexAgent(ReflexCaptureAgent):
"""
A reflex agent that seeks food. This is an agent
we give you to get an idea of what an offensive agent might look like,
but it is by no means the best or only way to build an offensive agent.
"""

def get_features(self, game_state, action):
features = util.Counter()
successor = self.get_successor(game_state, action)
food_list = self.get_food(successor).as_list()
features['successor_score'] = -len(food_list) # self.get_score(successor)

# Compute distance to the nearest food

if len(food_list) > 0: # This should always be True, but better safe than sorry
my_pos = successor.get_agent_state(self.index).get_position()
min_distance = min([self.get_maze_distance(my_pos, food) for food in food_list])
features['distance_to_food'] = min_distance
return features

def get_weights(self, game_state, action):
return {'successor_score': 100, 'distance_to_food': -1}


class DefensiveReflexAgent(ReflexCaptureAgent):
"""
A reflex agent that keeps its side Pacman-free. Again,
this is to give you an idea of what a defensive agent
could be like. It is not the best or only way to make
such an agent.
"""

def get_features(self, game_state, action):
features = util.Counter()
successor = self.get_successor(game_state, action)

my_state = successor.get_agent_state(self.index)
my_pos = my_state.get_position()

# Computes whether we're on defense (1) or offense (0)
features['on_defense'] = 1
if my_state.is_pacman: features['on_defense'] = 0

# Computes distance to invaders we can see
enemies = [successor.get_agent_state(i) for i in self.get_opponents(successor)]
invaders = [a for a in enemies if a.is_pacman and a.get_position() is not None]
features['num_invaders'] = len(invaders)
if len(invaders) > 0:
dists = [self.get_maze_distance(my_pos, a.get_position()) for a in invaders]
features['invader_distance'] = min(dists)

if action == Directions.STOP: features['stop'] = 1
rev = Directions.REVERSE[game_state.get_agent_state(self.index).configuration.direction]
if action == rev: features['reverse'] = 1

'''
You should change this in your own agent.
'''
return features

return random.choice(actions)
def get_weights(self, game_state, action):
return {'num_invaders': -1000, 'on_defense': 100, 'invader_distance': -10, 'stop': -100, 'reverse': -2}

0 comments on commit 03e04a2

Please sign in to comment.