Skip to content

Commit

Permalink
robustness leaderboard
Browse files Browse the repository at this point in the history
  • Loading branch information
fwiebe committed Oct 10, 2023
1 parent 3eafe96 commit 83ab102
Show file tree
Hide file tree
Showing 9 changed files with 171 additions and 5,016 deletions.
1,002 changes: 0 additions & 1,002 deletions leaderboard/data/ddpg/sim_swingup.csv

This file was deleted.

1,002 changes: 0 additions & 1,002 deletions leaderboard/data/dircol_tvlqr_lqr/sim_swingup.csv

This file was deleted.

1,002 changes: 0 additions & 1,002 deletions leaderboard/data/energyshaping_lqr/sim_swingup.csv

This file was deleted.

1,002 changes: 0 additions & 1,002 deletions leaderboard/data/ilqr_tvlqr_lqr/sim_swingup.csv

This file was deleted.

6 changes: 0 additions & 6 deletions leaderboard/data/leaderboard.csv

This file was deleted.

1,002 changes: 0 additions & 1,002 deletions leaderboard/data/sac/sim_swingup.csv

This file was deleted.

143 changes: 143 additions & 0 deletions leaderboard/robustness/compute_leaderboard.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,143 @@
import os
import importlib
import argparse
import pandas
import numpy as np
import yaml

from compute_leaderboard_data import compute_leaderboard_data


parser = argparse.ArgumentParser()
parser.add_argument(
"--data-dir",
dest="data_dir",
help="Directory for saving data. Existing data will be kept.",
default="data",
required=False,
)
parser.add_argument(
"--force-recompute",
dest="recompute",
help="Whether to force the recomputation of the leaderboard even without new data.",
default=False,
required=False,
type=bool,
)

parser.add_argument(
"--link-base",
dest="link",
help="base-link for hosting data. Not needed for local execution",
default="",
required=False,
)

data_dir = parser.parse_args().data_dir
recompute_leaderboard = parser.parse_args().recompute
link_base = parser.parse_args().link

if not os.path.exists(data_dir):
os.makedirs(data_dir)

existing_list = os.listdir(data_dir)
for con in existing_list:
if not os.path.exists(os.path.join(data_dir, con, "benchmark.yml")):
existing_list.remove(con)

for file in os.listdir("."):
if file[:4] == "con_":
if file[4:-3] in existing_list:
print(f"Robustness benchmark data for {file} found.")
else:
print(f"Creating benchmarks for new controller {file}")
compute_leaderboard_data(data_dir, file)
recompute_leaderboard = True

if recompute_leaderboard:
src_dir = "."
save_to = os.path.join(data_dir, "leaderboard.csv")

leaderboard_data = []

for f in os.listdir(src_dir):
if f[:4] == "con_":
mod = importlib.import_module(f[:-3])
if hasattr(mod, "leaderboard_config"):
conf = mod.leaderboard_config
if os.path.exists(
os.path.join(data_dir, conf["name"], "benchmark.yml")
):
print(
f"Found leaderboard_config and data for {mod.leaderboard_config['name']}"
)

with open(
os.path.join(data_dir, conf["name"], "benchmark.yml"),
"r",
) as f:
scores = yaml.safe_load(f)

final_score = (
0.25 * scores["consistency"] / scores["iterations"]
+ 0.25 * scores["robustness"] / scores["iterations"]
+ 0.25 * scores["sensitivity"] / scores["iterations"]
+ 0.25 * (10.0 - scores["min_successful_torque"]) / 10.0
)

if link_base != "":
if "simple_name" in conf.keys():
name_with_link = f"[{conf['simple_name']}]({link_base}{conf['name']}/README.md)"
else:
name_with_link = (
f"[{conf['name']}]({link_base}{conf['name']}/README.md)"
)
else:
if "simple_name" in conf.keys():
name_with_link = conf["simple_name"]
else:
name_with_link = conf["name"]

append_data = [
name_with_link,
conf["short_description"],
"{:.1f}".format(
100 * scores["consistency"] / scores["iterations"]
),
"{:.1f}".format(
100 * scores["robustness"] / scores["iterations"]
),
"{:.1f}".format(
100 * scores["sensitivity"] / scores["iterations"]
),
"{:.1f}".format(scores["min_successful_torque"]),
"{:.3f}".format(final_score),
conf["username"],
]
if link_base != "":
append_data.append(
"[Data and Plots](" + link_base + conf["name"] + ")"
)

leaderboard_data.append(append_data)

header = "Controller,Short Controller Description,Consistency [%],Robustness [%],Sensitivity [%],Min torque [Nm],Overall Robustness Score,Username"

if link_base != "":
header += ",Data"

np.savetxt(
save_to,
leaderboard_data,
header=header,
delimiter=",",
fmt="%s",
comments="",
)
df = pandas.read_csv(save_to)
df = df.drop(df.columns[1], axis=1)
print(
df.sort_values(by=["Overall Robustness Score"], ascending=False).to_markdown(
index=False
)
)
7 changes: 7 additions & 0 deletions leaderboard/robustness/compute_leaderboard_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@

# Local imports
from simple_pendulum.analysis.benchmark import benchmarker
from simple_pendulum.utilities.plot_benchmarks import plot_benchmarks


from sim_parameters import (
mass,
Expand Down Expand Up @@ -64,6 +66,11 @@ def compute_leaderboard_data(data_dir, con_filename):
save_path=os.path.join(save_dir, "benchmark.yml"),
)

plot_benchmarks(
os.path.join(save_dir, "benchmark.yml"),
os.path.join(save_dir, "scores_plot.png"),
)

if os.path.exists(f"readmes/{controller_name}.md"):
os.system(f"cp readmes/{controller_name}.md {save_dir}/README.md")

Expand Down
21 changes: 21 additions & 0 deletions software/python/simple_pendulum/utilities/plot_benchmarks.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
import matplotlib.pyplot as plt
import yaml


def plot_benchmarks(file_path, save_to):
with open(file_path, "r") as f:
scores = yaml.safe_load(f)

labels = ["Consistency", "Robustness", "Sensitivity", "Min torque Score"]
values = []
values.append(scores["consistency"] / scores["iterations"])
values.append(scores["robustness"] / scores["iterations"])
values.append(scores["sensitivity"] / scores["iterations"])
values.append((10.0 - scores["min_successful_torque"]) / 10.0)

fig = plt.figure(figsize=(8, 6))
plt.bar(labels, values, color=["blue", "red", "green", "orange"])
plt.ylabel("Scores")
plt.xlabel("Criteria")
plt.savefig(save_to)
plt.close()

0 comments on commit 83ab102

Please sign in to comment.