Skip to content

Commit 94a0bcf

Browse files
committed
Fix RLlib PPO example.
1 parent 0d7c352 commit 94a0bcf

File tree

2 files changed

+3
-4
lines changed

2 files changed

+3
-4
lines changed

examples/e12_rllib/ppo_example.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -129,7 +129,7 @@ def main(
129129
enable_tf1_exec_eagerly=True,
130130
)
131131
.training(
132-
lr=[[0, 1e-3], [1e3, 5e-4], [1e5, 1e-4], [1e7, 5e-5], [1e8, 1e-5]],
132+
lr_schedule=[[0, 1e-3], [1e3, 5e-4], [1e5, 1e-4], [1e7, 5e-5], [1e8, 1e-5]],
133133
train_batch_size=train_batch_size,
134134
)
135135
.multi_agent(

examples/e12_rllib/ppo_pbt_example.py

+2-3
Original file line numberDiff line numberDiff line change
@@ -225,7 +225,7 @@ def main(
225225
scheduler=pbt,
226226
max_concurrent_trials=4,
227227
)
228-
trainable = "PG"
228+
trainable = "PPO"
229229
if resume_training:
230230
tuner = tune.Tuner.restore(
231231
str(experiment_dir),
@@ -246,8 +246,7 @@ def main(
246246

247247
# Get the best checkpoint corresponding to the best result.
248248
best_checkpoint = best_result.checkpoint
249-
250-
best_logdir = Path(best_result.log_dir)
249+
best_logdir = Path(best_checkpoint.path)
251250
model_path = best_logdir
252251

253252
copy_tree(str(model_path), save_model_path, overwrite=True)

0 commit comments

Comments
 (0)