Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refactor parse_args() #118

Merged
merged 2 commits into from
Feb 22, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 16 additions & 16 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ poetry install
# `python run cleanrl/ppo.py`
poetry run python cleanrl/ppo.py \
--seed 1 \
--gym-id CartPole-v0 \
--env-id CartPole-v0 \
--total-timesteps 50000

# open another temrminal and enter `cd cleanrl/cleanrl`
Expand All @@ -55,7 +55,7 @@ To use experiment tracking with wandb, run
wandb login # only required for the first time
poetry run python cleanrl/ppo.py \
--seed 1 \
--gym-id CartPole-v0 \
--env-id CartPole-v0 \
--total-timesteps 50000 \
--track \
--wandb-project-name cleanrltest
Expand All @@ -66,37 +66,37 @@ To run training scripts in other games:
poetry shell

# classic control
python cleanrl/dqn.py --gym-id CartPole-v1
python cleanrl/ppo.py --gym-id CartPole-v1
python cleanrl/c51.py --gym-id CartPole-v1
python cleanrl/dqn.py --env-id CartPole-v1
python cleanrl/ppo.py --env-id CartPole-v1
python cleanrl/c51.py --env-id CartPole-v1

# atari
poetry install -E atari
python cleanrl/dqn_atari.py --gym-id BreakoutNoFrameskip-v4
python cleanrl/c51_atari.py --gym-id BreakoutNoFrameskip-v4
python cleanrl/ppo_atari.py --gym-id BreakoutNoFrameskip-v4
python cleanrl/apex_dqn_atari.py --gym-id BreakoutNoFrameskip-v4
python cleanrl/dqn_atari.py --env-id BreakoutNoFrameskip-v4
python cleanrl/c51_atari.py --env-id BreakoutNoFrameskip-v4
python cleanrl/ppo_atari.py --env-id BreakoutNoFrameskip-v4
python cleanrl/apex_dqn_atari.py --env-id BreakoutNoFrameskip-v4

# NEW: 3-4x side-effects free speed up with envpool's atari (only available to linux)
poetry install -E envpool
python cleanrl/ppo_atari_envpool.py --gym-id BreakoutNoFrameskip-v4
python cleanrl/ppo_atari_envpool.py --env-id BreakoutNoFrameskip-v4
# Learn Pong-v5 in ~5-10 mins
# Side effects such as lower sample efficiency might occur
poetry run python ppo_atari_envpool.py --clip-coef=0.2 --num-envs=16 --num-minibatches=8 --num-steps=128 --update-epochs=3

# pybullet
poetry install -E pybullet
python cleanrl/td3_continuous_action.py --gym-id MinitaurBulletDuckEnv-v0
python cleanrl/ddpg_continuous_action.py --gym-id MinitaurBulletDuckEnv-v0
python cleanrl/sac_continuous_action.py --gym-id MinitaurBulletDuckEnv-v0
python cleanrl/td3_continuous_action.py --env-id MinitaurBulletDuckEnv-v0
python cleanrl/ddpg_continuous_action.py --env-id MinitaurBulletDuckEnv-v0
python cleanrl/sac_continuous_action.py --env-id MinitaurBulletDuckEnv-v0

# procgen
poetry install -E procgen
python cleanrl/ppo_procgen.py --gym-id starpilot
python cleanrl/ppg_procgen.py --gym-id starpilot
python cleanrl/ppo_procgen.py --env-id starpilot
python cleanrl/ppg_procgen.py --env-id starpilot

# ppo + lstm
python cleanrl/ppo_atari_lstm.py --gym-id BreakoutNoFrameskip-v4
python cleanrl/ppo_atari_lstm.py --env-id BreakoutNoFrameskip-v4
python cleanrl/ppo_memory_env_lstm.py
```

Expand Down
8 changes: 4 additions & 4 deletions cleanrl/apex_dqn_atari.py
Original file line number Diff line number Diff line change
Expand Up @@ -750,7 +750,7 @@ def linear_schedule(start_e: float, end_e: float, duration: int, t: int):


def act(args, experiment_name, i, q_network, target_network, lock, rollouts_queue, stats_queue, global_step, device):
env = gym.make(args.gym_id)
env = gym.make(args.env_id)
env = wrap_atari(env)
env = gym.wrappers.RecordEpisodeStatistics(env) # records episode reward in `info['episode']['r']`
if args.capture_video:
Expand Down Expand Up @@ -920,7 +920,7 @@ def learn(
parser.add_argument(
"--exp-name", type=str, default=os.path.basename(__file__).rstrip(".py"), help="the name of this experiment"
)
parser.add_argument("--gym-id", type=str, default="BreakoutNoFrameskip-v4", help="the id of the gym environment")
parser.add_argument("--env-id", type=str, default="BreakoutNoFrameskip-v4", help="the id of the environment")
parser.add_argument("--learning-rate", type=float, default=1e-4, help="the learning rate of the optimizer")
parser.add_argument("--seed", type=int, default=2, help="seed of the experiment")
parser.add_argument("--total-timesteps", type=int, default=10000000, help="total timesteps of the experiments")
Expand Down Expand Up @@ -988,7 +988,7 @@ def learn(
args.seed = int(time.time())

# TRY NOT TO MODIFY: setup the environment
experiment_name = f"{args.gym_id}__{args.exp_name}__{args.seed}__{int(time.time())}"
experiment_name = f"{args.env_id}__{args.exp_name}__{args.seed}__{int(time.time())}"
writer = SummaryWriter(f"runs/{experiment_name}")
writer.add_text(
"hyperparameters", "|param|value|\n|-|-|\n%s" % ("\n".join([f"|{key}|{value}|" for key, value in vars(args).items()]))
Expand All @@ -1009,7 +1009,7 @@ def learn(

# TRY NOT TO MODIFY: seeding
device = torch.device("cuda" if torch.cuda.is_available() and args.cuda else "cpu")
env = gym.make(args.gym_id)
env = gym.make(args.env_id)
env = wrap_atari(env)
env = gym.wrappers.RecordEpisodeStatistics(env) # records episode reward in `info['episode']['r']`
env = wrap_deepmind(
Expand Down
20 changes: 10 additions & 10 deletions cleanrl/c51.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,8 @@ def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--exp-name", type=str, default=os.path.basename(__file__).rstrip(".py"),
help="the name of this experiment")
parser.add_argument("--gym-id", type=str, default="CartPole-v1",
help="the id of the gym environment")
parser.add_argument("--learning-rate", type=float, default=2.5e-4,
help="the learning rate of the optimizer")
parser.add_argument("--seed", type=int, default=1,
help="seed of the experiment")
parser.add_argument("--total-timesteps", type=int, default=50000,
help="total timesteps of the experiments")
parser.add_argument("--torch-deterministic", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True,
help="if toggled, `torch.backends.cudnn.deterministic=False`")
parser.add_argument("--cuda", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True,
Expand All @@ -40,6 +34,12 @@ def parse_args():
help="weather to capture videos of the agent performances (check out `videos` folder)")

# Algorithm specific arguments
parser.add_argument("--env-id", type=str, default="CartPole-v1",
help="the id of the environment")
parser.add_argument("--total-timesteps", type=int, default=50000,
help="total timesteps of the experiments")
parser.add_argument("--learning-rate", type=float, default=2.5e-4,
help="the learning rate of the optimizer")
parser.add_argument("--n-atoms", type=int, default=101,
help="the number of atoms")
parser.add_argument("--v-min", type=float, default=-100,
Expand Down Expand Up @@ -71,9 +71,9 @@ def parse_args():
return args


def make_env(gym_id, seed, idx, capture_video, run_name):
def make_env(env_id, seed, idx, capture_video, run_name):
def thunk():
env = gym.make(gym_id)
env = gym.make(env_id)
env = gym.wrappers.RecordEpisodeStatistics(env)
if capture_video:
if idx == 0:
Expand Down Expand Up @@ -122,7 +122,7 @@ def linear_schedule(start_e: float, end_e: float, duration: int, t: int):

if __name__ == "__main__":
args = parse_args()
run_name = f"{args.gym_id}__{args.exp_name}__{args.seed}__{int(time.time())}"
run_name = f"{args.env_id}__{args.exp_name}__{args.seed}__{int(time.time())}"
if args.track:
import wandb

Expand Down Expand Up @@ -150,7 +150,7 @@ def linear_schedule(start_e: float, end_e: float, duration: int, t: int):
device = torch.device("cuda" if torch.cuda.is_available() and args.cuda else "cpu")

# env setup
envs = gym.vector.SyncVectorEnv([make_env(args.gym_id, 0, 0, args.capture_video, run_name)])
envs = gym.vector.SyncVectorEnv([make_env(args.env_id, 0, 0, args.capture_video, run_name)])
assert isinstance(envs.single_action_space, gym.spaces.Discrete), "only discrete action space is supported"

q_network = QNetwork(envs, n_atoms=args.n_atoms, v_min=args.v_min, v_max=args.v_max).to(device)
Expand Down
20 changes: 10 additions & 10 deletions cleanrl/c51_atari.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,14 +25,8 @@ def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--exp-name", type=str, default=os.path.basename(__file__).rstrip(".py"),
help="the name of this experiment")
parser.add_argument("--gym-id", type=str, default="BreakoutNoFrameskip-v4",
help="the id of the gym environment")
parser.add_argument("--learning-rate", type=float, default=2.5e-4,
help="the learning rate of the optimizer")
parser.add_argument("--seed", type=int, default=1,
help="seed of the experiment")
parser.add_argument("--total-timesteps", type=int, default=10000000,
help="total timesteps of the experiments")
parser.add_argument("--torch-deterministic", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True,
help="if toggled, `torch.backends.cudnn.deterministic=False`")
parser.add_argument("--cuda", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True,
Expand All @@ -47,6 +41,12 @@ def parse_args():
help="weather to capture videos of the agent performances (check out `videos` folder)")

# Algorithm specific arguments
parser.add_argument("--env-id", type=str, default="BreakoutNoFrameskip-v4",
help="the id of the environment")
parser.add_argument("--total-timesteps", type=int, default=10000000,
help="total timesteps of the experiments")
parser.add_argument("--learning-rate", type=float, default=2.5e-4,
help="the learning rate of the optimizer")
parser.add_argument("--n-atoms", type=int, default=51,
help="the number of atoms")
parser.add_argument("--v-min", type=float, default=-10,
Expand Down Expand Up @@ -78,9 +78,9 @@ def parse_args():
return args


def make_env(gym_id, seed, idx, capture_video, run_name):
def make_env(env_id, seed, idx, capture_video, run_name):
def thunk():
env = gym.make(gym_id)
env = gym.make(env_id)
env = gym.wrappers.RecordEpisodeStatistics(env)
if capture_video:
if idx == 0:
Expand Down Expand Up @@ -143,7 +143,7 @@ def linear_schedule(start_e: float, end_e: float, duration: int, t: int):

if __name__ == "__main__":
args = parse_args()
run_name = f"{args.gym_id}__{args.exp_name}__{args.seed}__{int(time.time())}"
run_name = f"{args.env_id}__{args.exp_name}__{args.seed}__{int(time.time())}"
if args.track:
import wandb

Expand Down Expand Up @@ -171,7 +171,7 @@ def linear_schedule(start_e: float, end_e: float, duration: int, t: int):
device = torch.device("cuda" if torch.cuda.is_available() and args.cuda else "cpu")

# env setup
envs = gym.vector.SyncVectorEnv([make_env(args.gym_id, 0, 0, args.capture_video, run_name)])
envs = gym.vector.SyncVectorEnv([make_env(args.env_id, 0, 0, args.capture_video, run_name)])
assert isinstance(envs.single_action_space, gym.spaces.Discrete), "only discrete action space is supported"

q_network = QNetwork(envs, n_atoms=args.n_atoms, v_min=args.v_min, v_max=args.v_max).to(device)
Expand Down
20 changes: 10 additions & 10 deletions cleanrl/ddpg_continuous_action.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,14 +20,8 @@ def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--exp-name", type=str, default=os.path.basename(__file__).rstrip(".py"),
help="the name of this experiment")
parser.add_argument("--gym-id", type=str, default="HopperBulletEnv-v0",
help="the id of the gym environment")
parser.add_argument("--learning-rate", type=float, default=3e-4,
help="the learning rate of the optimizer")
parser.add_argument("--seed", type=int, default=1,
help="seed of the experiment")
parser.add_argument("--total-timesteps", type=int, default=1000000,
help="total timesteps of the experiments")
parser.add_argument("--torch-deterministic", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True,
help="if toggled, `torch.backends.cudnn.deterministic=False`")
parser.add_argument("--cuda", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True,
Expand All @@ -42,6 +36,12 @@ def parse_args():
help="weather to capture videos of the agent performances (check out `videos` folder)")

# Algorithm specific arguments
parser.add_argument("--env-id", type=str, default="HopperBulletEnv-v0",
help="the id of the environment")
parser.add_argument("--total-timesteps", type=int, default=1000000,
help="total timesteps of the experiments")
parser.add_argument("--learning-rate", type=float, default=3e-4,
help="the learning rate of the optimizer")
parser.add_argument("--buffer-size", type=int, default=int(1e6),
help="the replay memory buffer size")
parser.add_argument("--gamma", type=float, default=0.99,
Expand All @@ -65,9 +65,9 @@ def parse_args():
return args


def make_env(gym_id, seed, idx, capture_video, run_name):
def make_env(env_id, seed, idx, capture_video, run_name):
def thunk():
env = gym.make(gym_id)
env = gym.make(env_id)
env = gym.wrappers.RecordEpisodeStatistics(env)
if capture_video:
if idx == 0:
Expand Down Expand Up @@ -111,7 +111,7 @@ def forward(self, x):

if __name__ == "__main__":
args = parse_args()
run_name = f"{args.gym_id}__{args.exp_name}__{args.seed}__{int(time.time())}"
run_name = f"{args.env_id}__{args.exp_name}__{args.seed}__{int(time.time())}"
if args.track:
import wandb

Expand Down Expand Up @@ -139,7 +139,7 @@ def forward(self, x):
device = torch.device("cuda" if torch.cuda.is_available() and args.cuda else "cpu")

# env setup
envs = gym.vector.SyncVectorEnv([make_env(args.gym_id, 0, 0, args.capture_video, run_name)])
envs = gym.vector.SyncVectorEnv([make_env(args.env_id, 0, 0, args.capture_video, run_name)])
assert isinstance(envs.single_action_space, gym.spaces.Box), "only continuous action space is supported"

max_action = float(envs.single_action_space.high[0])
Expand Down
20 changes: 10 additions & 10 deletions cleanrl/dqn.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,8 @@ def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--exp-name", type=str, default=os.path.basename(__file__).rstrip(".py"),
help="the name of this experiment")
parser.add_argument("--gym-id", type=str, default="CartPole-v1",
help="the id of the gym environment")
parser.add_argument("--learning-rate", type=float, default=2.5e-4,
help="the learning rate of the optimizer")
parser.add_argument("--seed", type=int, default=1,
help="seed of the experiment")
parser.add_argument("--total-timesteps", type=int, default=25000,
help="total timesteps of the experiments")
parser.add_argument("--torch-deterministic", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True,
help="if toggled, `torch.backends.cudnn.deterministic=False`")
parser.add_argument("--cuda", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True,
Expand All @@ -40,6 +34,12 @@ def parse_args():
help="weather to capture videos of the agent performances (check out `videos` folder)")

# Algorithm specific arguments
parser.add_argument("--env-id", type=str, default="CartPole-v1",
help="the id of the environment")
parser.add_argument("--total-timesteps", type=int, default=25000,
help="total timesteps of the experiments")
parser.add_argument("--learning-rate", type=float, default=2.5e-4,
help="the learning rate of the optimizer")
parser.add_argument("--buffer-size", type=int, default=10000,
help="the replay memory buffer size")
parser.add_argument("--gamma", type=float, default=0.99,
Expand All @@ -65,9 +65,9 @@ def parse_args():
return args


def make_env(gym_id, seed, idx, capture_video, run_name):
def make_env(env_id, seed, idx, capture_video, run_name):
def thunk():
env = gym.make(gym_id)
env = gym.make(env_id)
env = gym.wrappers.RecordEpisodeStatistics(env)
if capture_video:
if idx == 0:
Expand Down Expand Up @@ -103,7 +103,7 @@ def linear_schedule(start_e: float, end_e: float, duration: int, t: int):

if __name__ == "__main__":
args = parse_args()
run_name = f"{args.gym_id}__{args.exp_name}__{args.seed}__{int(time.time())}"
run_name = f"{args.env_id}__{args.exp_name}__{args.seed}__{int(time.time())}"
if args.track:
import wandb

Expand Down Expand Up @@ -131,7 +131,7 @@ def linear_schedule(start_e: float, end_e: float, duration: int, t: int):
device = torch.device("cuda" if torch.cuda.is_available() and args.cuda else "cpu")

# env setup
envs = gym.vector.SyncVectorEnv([make_env(args.gym_id, 0, 0, args.capture_video, run_name)])
envs = gym.vector.SyncVectorEnv([make_env(args.env_id, 0, 0, args.capture_video, run_name)])
assert isinstance(envs.single_action_space, gym.spaces.Discrete), "only discrete action space is supported"

q_network = QNetwork(envs).to(device)
Expand Down
20 changes: 10 additions & 10 deletions cleanrl/dqn_atari.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,14 +25,8 @@ def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--exp-name", type=str, default=os.path.basename(__file__).rstrip(".py"),
help="the name of this experiment")
parser.add_argument("--gym-id", type=str, default="BreakoutNoFrameskip-v4",
help="the id of the gym environment")
parser.add_argument("--learning-rate", type=float, default=1e-4,
help="the learning rate of the optimizer")
parser.add_argument("--seed", type=int, default=1,
help="seed of the experiment")
parser.add_argument("--total-timesteps", type=int, default=10000000,
help="total timesteps of the experiments")
parser.add_argument("--torch-deterministic", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True,
help="if toggled, `torch.backends.cudnn.deterministic=False`")
parser.add_argument("--cuda", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True,
Expand All @@ -47,6 +41,12 @@ def parse_args():
help="weather to capture videos of the agent performances (check out `videos` folder)")

# Algorithm specific arguments
parser.add_argument("--env-id", type=str, default="BreakoutNoFrameskip-v4",
help="the id of the environment")
parser.add_argument("--total-timesteps", type=int, default=10000000,
help="total timesteps of the experiments")
parser.add_argument("--learning-rate", type=float, default=1e-4,
help="the learning rate of the optimizer")
parser.add_argument("--buffer-size", type=int, default=1000000,
help="the replay memory buffer size")
parser.add_argument("--gamma", type=float, default=0.99,
Expand All @@ -72,9 +72,9 @@ def parse_args():
return args


def make_env(gym_id, seed, idx, capture_video, run_name):
def make_env(env_id, seed, idx, capture_video, run_name):
def thunk():
env = gym.make(gym_id)
env = gym.make(env_id)
env = gym.wrappers.RecordEpisodeStatistics(env)
if capture_video:
if idx == 0:
Expand Down Expand Up @@ -124,7 +124,7 @@ def linear_schedule(start_e: float, end_e: float, duration: int, t: int):

if __name__ == "__main__":
args = parse_args()
run_name = f"{args.gym_id}__{args.exp_name}__{args.seed}__{int(time.time())}"
run_name = f"{args.env_id}__{args.exp_name}__{args.seed}__{int(time.time())}"
if args.track:
import wandb

Expand Down Expand Up @@ -152,7 +152,7 @@ def linear_schedule(start_e: float, end_e: float, duration: int, t: int):
device = torch.device("cuda" if torch.cuda.is_available() and args.cuda else "cpu")

# env setup
envs = gym.vector.SyncVectorEnv([make_env(args.gym_id, 0, 0, args.capture_video, run_name)])
envs = gym.vector.SyncVectorEnv([make_env(args.env_id, 0, 0, args.capture_video, run_name)])
assert isinstance(envs.single_action_space, gym.spaces.Discrete), "only discrete action space is supported"

q_network = QNetwork(envs).to(device)
Expand Down
Loading