From 126de92d788c80e970d9eb855624db478725c18e Mon Sep 17 00:00:00 2001 From: Chris Cummins Date: Tue, 30 Mar 2021 17:59:46 +0100 Subject: [PATCH 1/6] [leaderboard] Move leaderboard utility into compiler_gym namespace. This adds a compiler_gym.leaderboard module that contains the LLVM codesize leaderboard helper code. New API docs provide improved explanation of how to use it. Issue #158. --- BUILD.bazel | 1 + compiler_gym/BUILD | 2 +- compiler_gym/leaderboard/BUILD | 26 +++++ compiler_gym/leaderboard/__init__.py | 5 + .../leaderboard/llvm_codesize.py | 107 ++++++++++++++---- docs/source/compiler_gym/leaderboard.rst | 16 +++ docs/source/index.rst | 1 + leaderboard/llvm_codesize/BUILD | 25 ---- leaderboard/llvm_codesize/e_greedy/BUILD | 3 +- .../llvm_codesize/e_greedy/e_greedy.py | 9 +- .../llvm_codesize/e_greedy/e_greedy_test.py | 6 +- setup.py | 17 +-- tests/leaderboard/BUILD | 15 +++ .../leaderboard/llvm_codesize_test.py | 20 ++-- 14 files changed, 178 insertions(+), 75 deletions(-) create mode 100644 compiler_gym/leaderboard/BUILD create mode 100644 compiler_gym/leaderboard/__init__.py rename leaderboard/llvm_codesize/eval_policy.py => compiler_gym/leaderboard/llvm_codesize.py (63%) create mode 100644 docs/source/compiler_gym/leaderboard.rst delete mode 100644 leaderboard/llvm_codesize/BUILD create mode 100644 tests/leaderboard/BUILD rename leaderboard/llvm_codesize/eval_policy_test.py => tests/leaderboard/llvm_codesize_test.py (81%) diff --git a/BUILD.bazel b/BUILD.bazel index e71d72af8..411b51d32 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -22,6 +22,7 @@ py_library( "//compiler_gym/bin", "//compiler_gym/datasets", "//compiler_gym/envs", + "//compiler_gym/leaderboard", "//compiler_gym/service", "//compiler_gym/spaces", "//compiler_gym/views", diff --git a/compiler_gym/BUILD b/compiler_gym/BUILD index a0c347adf..ddbbea72f 100644 --- a/compiler_gym/BUILD +++ b/compiler_gym/BUILD @@ -22,7 +22,7 @@ py_library( py_library( name = "compiler_env_state", srcs = ["compiler_env_state.py"], - visibility = ["//compiler_gym/envs:__subpackages__"], + visibility = ["//compiler_gym:__subpackages__"], ) py_library( diff --git a/compiler_gym/leaderboard/BUILD b/compiler_gym/leaderboard/BUILD new file mode 100644 index 000000000..5df5c618c --- /dev/null +++ b/compiler_gym/leaderboard/BUILD @@ -0,0 +1,26 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +load("@rules_python//python:defs.bzl", "py_library") + +py_library( + name = "leaderboard", + srcs = ["__init__.py"], + visibility = ["//visibility:public"], + deps = [ + ":llvm_codesize", + ], +) + +py_library( + name = "llvm_codesize", + srcs = ["llvm_codesize.py"], + visibility = ["//visibility:public"], + deps = [ + "//compiler_gym:compiler_env_state", + "//compiler_gym/bin:validate", + "//compiler_gym/envs", + "//compiler_gym/util", + ], +) diff --git a/compiler_gym/leaderboard/__init__.py b/compiler_gym/leaderboard/__init__.py new file mode 100644 index 000000000..2d98773cc --- /dev/null +++ b/compiler_gym/leaderboard/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +"""This module contains helper submodules for creating CompilerGym leaderboard submissions.""" diff --git a/leaderboard/llvm_codesize/eval_policy.py b/compiler_gym/leaderboard/llvm_codesize.py similarity index 63% rename from leaderboard/llvm_codesize/eval_policy.py rename to compiler_gym/leaderboard/llvm_codesize.py index 0da696f73..7c6999ce2 100644 --- a/leaderboard/llvm_codesize/eval_policy.py +++ b/compiler_gym/leaderboard/llvm_codesize.py @@ -2,20 +2,27 @@ # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. -"""This module defines a helper function for evaluating LLVM codesize reduction -policies. - -Usage: - - from compiler_gym.envs import LlvmEnv - from eval_policy import eval_policy - - class MyLlvmCodesizePolicy: - def __call__(env: LlvmEnv) -> None: - pass # ... - - if __name__ == "__main__": - eval_policy(MyLlvmCodesizePolicy()) +"""LLVM is a popular open source compiler used widely in industry and research. +This environment exposes the optimization pipeline as a set of actions that can +be applied to a particular program. The goal of the agent is to select the +sequence of optimizations that lead to the greatest reduction in instruction +count in the program being compiled. Reward is the reduction in codesize +achieved scaled to the reduction achieved by LLVM's builtin -Oz pipeline. Users +who wish to create a submission for this leaderboard should consider using the +:func:`eval_llvm_codesize_policy() +` helper. + ++--------------------+------------------------------------------------------+ +| Property | Value | ++====================+======================================================+ +| Environment | :class:`LlvmEnv `. | ++--------------------+------------------------------------------------------+ +| Observation Space | Any. | ++--------------------+------------------------------------------------------+ +| Reward Space | Instruction count reduction relative to :code:`-Oz`. | ++--------------------+------------------------------------------------------+ +| Test Dataset | The 23 cBench benchmarks. | ++--------------------+------------------------------------------------------+ """ import platform import sys @@ -33,9 +40,9 @@ def __call__(env: LlvmEnv) -> None: from cpuinfo import get_cpu_info from tqdm import tqdm -import compiler_gym # noqa Register environments. -from compiler_gym import CompilerEnvState +import compiler_gym.envs # noqa Register environments. from compiler_gym.bin.validate import main as validate +from compiler_gym.compiler_env_state import CompilerEnvState from compiler_gym.envs import LlvmEnv from compiler_gym.util.tabulate import tabulate from compiler_gym.util.timer import Timer @@ -163,11 +170,71 @@ def run(self): self.n += 1 -def eval_policy(policy: Policy) -> None: - """Evaluate a policy on a target dataset. +def eval_llvm_codesize_policy(policy: Policy) -> None: + """Evaluate an LLVM codesize policy and generate results for a leaderboard + submission. + + To use it, you define your policy as a function that takes an + :class:`LlvmEnv ` instance as input and modifies + it in place. For example, for a trivial random policy: + + >>> from compiler_gym.envs import LlvmEnv + >>> def my_policy(env: LlvmEnv) -> None: + .... # Defines a policy that takes 10 random steps. + ... for _ in range(10): + ... _, _, done, _ = env.step(env.action_space.sample()) + ... if done: break + + If you would like a stateful policy, you could use a class and override the + :code:`__call__()` method: + + >>> class MyPolicy: + ... def __call__(env: LlvmEnv) -> None: + ... pass # ... do fun stuff! + >>> my_policy = MyPolicy() + + You then call the :func:`eval_llvm_codesize_policy() + ` helper + function, passing it your policy as its only argument: + + >>> eval_llvm_codesize_policy(my_policy) + + Put together as a complete example this is what an example leaderboard + submission script looks like: + + .. code-block:: python + + # my_policy.py + from compiler_gym.leaderboard.llvm_codesize import eval_llvm_codesize_policy + from compiler_gym.envs import LlvmEnv + + def my_policy(env: LlvmEnv) -> None: + pass # ... do fun stuff! + + if __name__ == "__main__": + eval_llvm_codesize_policy(my_policy) + + The :func:`eval_llvm_codesize_policy() + ` helper + defines a number of commandline flags that can be overriden to control the + behavior of the evaluation. For example the flag :code:`--n` determines the + number of times the policy is run on each benchmark (default is 10), and + :code:`--logfile` determines the path of the generated results file: + + .. code-block:: + + $ python my_policy.py --n=5 --logfile=my_policy_results.csv + + You can use :code:`--helpfull` flag to list all of the flags that are + defined: + + .. code-block:: + + $ python my_policy.py --helpfull - A policy is a function that takes as input an LlvmEnv environment and - performs a set of actions on it. + Once you are happy with your approach, see the `contributing guide + `_ + for instructions on preparing a submission to the leaderboard. """ def main(argv): diff --git a/docs/source/compiler_gym/leaderboard.rst b/docs/source/compiler_gym/leaderboard.rst new file mode 100644 index 000000000..979a7ed20 --- /dev/null +++ b/docs/source/compiler_gym/leaderboard.rst @@ -0,0 +1,16 @@ +compiler_gym.leaderboard +======================== + +We provide `leaderboards +`_ to track the +performance of user-submitted algorithms on CompilerGym tasks. The goal of the +leaderboards is to provide a venue for researchers to promote their work, and to +provide a common benchmark for evaluating approaches. The +:code:`compiler_gym.leaderboard` module contains helper submodules that can be +used for preparing leaderboard submissions. + +LLVM Codesize +------------- + +.. automodule:: compiler_gym.leaderboard.llvm_codesize + :members: diff --git a/docs/source/index.rst b/docs/source/index.rst index 38cfb62d6..0e554e6f0 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -31,6 +31,7 @@ for applying reinforcement learning to compiler optimizations. compiler_gym/datasets compiler_gym/envs llvm/api + compiler_gym/leaderboard compiler_gym/service compiler_gym/spaces compiler_gym/views diff --git a/leaderboard/llvm_codesize/BUILD b/leaderboard/llvm_codesize/BUILD deleted file mode 100644 index 99973145b..000000000 --- a/leaderboard/llvm_codesize/BUILD +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -py_library( - name = "eval_policy", - srcs = ["eval_policy.py"], - visibility = ["//leaderboard/llvm_codesize:__subpackages__"], - deps = [ - "//compiler_gym", - "//compiler_gym/bin:validate", - ], -) - -py_test( - name = "eval_policy_test", - timeout = "short", - srcs = ["eval_policy_test.py"], - deps = [ - ":eval_policy", - "//tests:test_main", - "//tests/pytest_plugins:common", - ], -) diff --git a/leaderboard/llvm_codesize/e_greedy/BUILD b/leaderboard/llvm_codesize/e_greedy/BUILD index a11a1a9a3..7d5257445 100644 --- a/leaderboard/llvm_codesize/e_greedy/BUILD +++ b/leaderboard/llvm_codesize/e_greedy/BUILD @@ -8,7 +8,7 @@ py_library( name = "e_greedy", srcs = ["e_greedy.py"], deps = [ - "//leaderboard/llvm_codesize:eval_policy", + "//compiler_gym/leaderboard:llvm_codesize", ], ) @@ -18,6 +18,7 @@ py_test( srcs = ["e_greedy_test.py"], deps = [ ":e_greedy", + "//compiler_gym/leaderboard:llvm_codesize", "//tests:test_main", "//tests/pytest_plugins:llvm", ], diff --git a/leaderboard/llvm_codesize/e_greedy/e_greedy.py b/leaderboard/llvm_codesize/e_greedy/e_greedy.py index 9aaf28109..bb7570a59 100644 --- a/leaderboard/llvm_codesize/e_greedy/e_greedy.py +++ b/leaderboard/llvm_codesize/e_greedy/e_greedy.py @@ -4,19 +4,14 @@ # LICENSE file in the root directory of this source tree. """ϵ-greedy policy for LLVM codesize.""" import logging -import os import random -import sys from concurrent.futures import ThreadPoolExecutor, as_completed from typing import NamedTuple from absl import flags from compiler_gym.envs import CompilerEnv, LlvmEnv - -# Import the ../eval_policy.py helper. -sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)) + "/..") -from eval_policy import eval_policy # noqa pylint: disable=wrong-import-position +from compiler_gym.leaderboard.llvm_codesize import eval_llvm_codesize_policy flags.DEFINE_float( "epsilon", 0, "The ratio of patience to the size of the action space. " @@ -113,4 +108,4 @@ def e_greedy_search(env: LlvmEnv) -> None: if __name__ == "__main__": - eval_policy(e_greedy_search) + eval_llvm_codesize_policy(e_greedy_search) diff --git a/leaderboard/llvm_codesize/e_greedy/e_greedy_test.py b/leaderboard/llvm_codesize/e_greedy/e_greedy_test.py index 90b3be344..3b771baa5 100644 --- a/leaderboard/llvm_codesize/e_greedy/e_greedy_test.py +++ b/leaderboard/llvm_codesize/e_greedy/e_greedy_test.py @@ -2,16 +2,16 @@ # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. -"""Tests for //leaderboard/llvm_codesize:eval_policy.""" +"""Tests for //leaderboard/llvm_codesize/e_greedy.""" from concurrent.futures import ThreadPoolExecutor import pytest from absl import flags from compiler_gym.envs import LlvmEnv +from compiler_gym.leaderboard.llvm_codesize import eval_llvm_codesize_policy from leaderboard.llvm_codesize.e_greedy.e_greedy import ( e_greedy_search, - eval_policy, select_best_action, ) from tests.test_main import main as _test_main @@ -33,7 +33,7 @@ def test_random_search(): ] ) with pytest.raises(SystemExit): - eval_policy(e_greedy_search) + eval_llvm_codesize_policy(e_greedy_search) def test_select_best_action_closed_environment(env: LlvmEnv): diff --git a/setup.py b/setup.py index 657337f03..d9d7402d0 100644 --- a/setup.py +++ b/setup.py @@ -48,23 +48,24 @@ def get_tag(self): url="https://github.com/facebookresearch/CompilerGym", license="MIT", packages=[ - "compiler_gym", "compiler_gym.bin", "compiler_gym.datasets", - "compiler_gym.envs", - "compiler_gym.envs.llvm", - "compiler_gym.envs.llvm.service", "compiler_gym.envs.llvm.service.passes", - "compiler_gym.service", + "compiler_gym.envs.llvm.service", + "compiler_gym.envs.llvm", + "compiler_gym.envs", + "compiler_gym.leaderboard", "compiler_gym.service.proto", + "compiler_gym.service", "compiler_gym.spaces", - "compiler_gym.third_party", "compiler_gym.third_party.autophase", - "compiler_gym.third_party.llvm", "compiler_gym.third_party.inst2vec", - "compiler_gym.util", + "compiler_gym.third_party.llvm", + "compiler_gym.third_party", "compiler_gym.util.flags", + "compiler_gym.util", "compiler_gym.views", + "compiler_gym", ], package_dir={ "": "bazel-bin/package.runfiles/CompilerGym", diff --git a/tests/leaderboard/BUILD b/tests/leaderboard/BUILD new file mode 100644 index 000000000..9f0fdff0f --- /dev/null +++ b/tests/leaderboard/BUILD @@ -0,0 +1,15 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +load("@rules_python//python:defs.bzl", "py_test") + +py_test( + name = "llvm_codesize_test", + srcs = ["llvm_codesize_test.py"], + deps = [ + "//compiler_gym/leaderboard:llvm_codesize", + "//tests:test_main", + "//tests/pytest_plugins:common", + ], +) diff --git a/leaderboard/llvm_codesize/eval_policy_test.py b/tests/leaderboard/llvm_codesize_test.py similarity index 81% rename from leaderboard/llvm_codesize/eval_policy_test.py rename to tests/leaderboard/llvm_codesize_test.py index d9744398e..c7cd39fc3 100644 --- a/leaderboard/llvm_codesize/eval_policy_test.py +++ b/tests/leaderboard/llvm_codesize_test.py @@ -2,13 +2,13 @@ # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. -"""Tests for //leaderboard/llvm_codesize:eval_policy.""" +"""Tests for //compiler_gym/leaderboard:llvm_codesize.""" from pathlib import Path import pytest from absl import flags -from leaderboard.llvm_codesize.eval_policy import eval_policy +from compiler_gym.leaderboard.llvm_codesize import eval_llvm_codesize_policy from tests.test_main import main FLAGS = flags.FLAGS @@ -21,14 +21,14 @@ def null_policy(env) -> None: pass -def test_eval_policy(): +def test_eval_llvm_codesize_policy(): FLAGS.unparse_flags() FLAGS(["argv0", "--n=1", "--max_benchmarks=1", "--novalidate"]) with pytest.raises(SystemExit): - eval_policy(null_policy) + eval_llvm_codesize_policy(null_policy) -def test_eval_policy_resume(tmpwd): +def test_eval_llvm_codesize_policy_resume(tmpwd): FLAGS.unparse_flags() # Run eval on a single benchmark. @@ -43,7 +43,7 @@ def test_eval_policy_resume(tmpwd): ] ) with pytest.raises(SystemExit): - eval_policy(null_policy) + eval_llvm_codesize_policy(null_policy) # Check that the log has a single entry (and a header row.) assert Path("test.log").is_file() @@ -64,7 +64,7 @@ def test_eval_policy_resume(tmpwd): ] ) with pytest.raises(SystemExit): - eval_policy(null_policy) + eval_llvm_codesize_policy(null_policy) # Check that the log extends the original. assert Path("test.log").is_file() @@ -86,7 +86,7 @@ def test_eval_policy_resume(tmpwd): ] ) with pytest.raises(SystemExit): - eval_policy(null_policy) + eval_llvm_codesize_policy(null_policy) # Check that the log extends the original. assert Path("test.log").is_file() @@ -96,11 +96,11 @@ def test_eval_policy_resume(tmpwd): assert len(log.rstrip().split("\n")) == 5 -def test_eval_policy_invalid_flag(): +def test_eval_llvm_codesize_policy_invalid_flag(): FLAGS.unparse_flags() FLAGS(["argv0", "--n=-1"]) with pytest.raises(AssertionError): - eval_policy(null_policy) + eval_llvm_codesize_policy(null_policy) if __name__ == "__main__": From 6a0fb6953c2f63077304c6be3bec5b4073b787fb Mon Sep 17 00:00:00 2001 From: Chris Cummins Date: Tue, 30 Mar 2021 23:39:09 +0100 Subject: [PATCH 2/6] [leaderboard] Rename --logfile to --results_logfile. This is to break the duplicate flag error from //tests/benchmarks:parallelization_load_test. --- compiler_gym/leaderboard/llvm_codesize.py | 23 ++++++++++---------- leaderboard/llvm_codesize/e_greedy/README.md | 2 +- tests/leaderboard/llvm_codesize_test.py | 18 +++++++-------- 3 files changed, 22 insertions(+), 21 deletions(-) diff --git a/compiler_gym/leaderboard/llvm_codesize.py b/compiler_gym/leaderboard/llvm_codesize.py index 7c6999ce2..28b697fce 100644 --- a/compiler_gym/leaderboard/llvm_codesize.py +++ b/compiler_gym/leaderboard/llvm_codesize.py @@ -48,7 +48,7 @@ from compiler_gym.util.timer import Timer flags.DEFINE_string( - "logfile", "results.csv", "The path of the file to write results to." + "results_logfile", "results.csv", "The path of the file to write results to." ) flags.DEFINE_string( "hardware_info", @@ -70,8 +70,8 @@ flags.DEFINE_boolean( "resume", False, - "If true, read the --logfile first and run only the policy evaluations not " - "already in the logfile.", + "If true, read the --results_logfile first and run only the policy " + "evaluations not already in the logfile.", ) FLAGS = flags.FLAGS @@ -139,7 +139,7 @@ def __init__(self, env, benchmarks, policy, print_header): self.n = 0 def run(self): - with open(FLAGS.logfile, "a") as logfile: + with open(FLAGS.results_logfile, "a") as logfile: for benchmark in self.benchmarks: self.env.reset(benchmark=benchmark) with Timer() as timer: @@ -219,11 +219,12 @@ def my_policy(env: LlvmEnv) -> None: defines a number of commandline flags that can be overriden to control the behavior of the evaluation. For example the flag :code:`--n` determines the number of times the policy is run on each benchmark (default is 10), and - :code:`--logfile` determines the path of the generated results file: + :code:`--results_logfile` determines the path of the generated results + file: .. code-block:: - $ python my_policy.py --n=5 --logfile=my_policy_results.csv + $ python my_policy.py --n=5 --results_logfile=my_policy_results.csv You can use :code:`--helpfull` flag to list all of the flags that are defined: @@ -242,7 +243,7 @@ def main(argv): assert FLAGS.n > 0, "n must be > 0" print( - f"Writing inference results to '{FLAGS.logfile}' and " + f"Writing inference results to '{FLAGS.results_logfile}' and " f"hardware summary to '{FLAGS.hardware_info}'" ) @@ -268,16 +269,16 @@ def main(argv): # of benchmarks to evaluate. print_header = True init = 0 - if Path(FLAGS.logfile).is_file(): + if Path(FLAGS.results_logfile).is_file(): if FLAGS.resume: - with open(FLAGS.logfile, "r") as f: + with open(FLAGS.results_logfile, "r") as f: for state in CompilerEnvState.read_csv_file(f): if state.benchmark in benchmarks: init += 1 benchmarks.remove(state.benchmark) print_header = False else: - Path(FLAGS.logfile).unlink() + Path(FLAGS.results_logfile).unlink() # Run the benchmark loop in background so that we can asynchronously # log progress. @@ -293,6 +294,6 @@ def main(argv): if FLAGS.validate: FLAGS.env = "llvm-ic-v0" - validate(["argv0", FLAGS.logfile]) + validate(["argv0", FLAGS.results_logfile]) app.run(main) diff --git a/leaderboard/llvm_codesize/e_greedy/README.md b/leaderboard/llvm_codesize/e_greedy/README.md index 5ddc84ff8..537dfc02e 100644 --- a/leaderboard/llvm_codesize/e_greedy/README.md +++ b/leaderboard/llvm_codesize/e_greedy/README.md @@ -52,5 +52,5 @@ terminates when the maximum reward attainable by any action is <= 0. ### Experimental Methodology ```sh -$ python e_greedy.py --n=1 --epsilon=0 --logfile=results_e0.csv +$ python e_greedy.py --n=1 --epsilon=0 --results_logfile=results_e0.csv ``` diff --git a/tests/leaderboard/llvm_codesize_test.py b/tests/leaderboard/llvm_codesize_test.py index c7cd39fc3..4b3759f35 100644 --- a/tests/leaderboard/llvm_codesize_test.py +++ b/tests/leaderboard/llvm_codesize_test.py @@ -39,15 +39,15 @@ def test_eval_llvm_codesize_policy_resume(tmpwd): "--max_benchmarks=1", "--novalidate", "--resume", - "--logfile=test.log", + "--results_logfile=test.csv", ] ) with pytest.raises(SystemExit): eval_llvm_codesize_policy(null_policy) # Check that the log has a single entry (and a header row.) - assert Path("test.log").is_file() - with open("test.log") as f: + assert Path("test.csv").is_file() + with open("test.csv") as f: log = f.read() assert len(log.rstrip().split("\n")) == 2 init_logfile = log @@ -60,15 +60,15 @@ def test_eval_llvm_codesize_policy_resume(tmpwd): "--max_benchmarks=2", "--novalidate", "--resume", - "--logfile=test.log", + "--results_logfile=test.csv", ] ) with pytest.raises(SystemExit): eval_llvm_codesize_policy(null_policy) # Check that the log extends the original. - assert Path("test.log").is_file() - with open("test.log") as f: + assert Path("test.csv").is_file() + with open("test.csv") as f: log = f.read() assert log.startswith(init_logfile) assert len(log.rstrip().split("\n")) == 3 @@ -82,15 +82,15 @@ def test_eval_llvm_codesize_policy_resume(tmpwd): "--max_benchmarks=2", "--novalidate", "--resume", - "--logfile=test.log", + "--results_logfile=test.csv", ] ) with pytest.raises(SystemExit): eval_llvm_codesize_policy(null_policy) # Check that the log extends the original. - assert Path("test.log").is_file() - with open("test.log") as f: + assert Path("test.csv").is_file() + with open("test.csv") as f: log = f.read() assert log.startswith(init_logfile) assert len(log.rstrip().split("\n")) == 5 From 339e118051953102a716ff7a154debc8c52ac953 Mon Sep 17 00:00:00 2001 From: Chris Cummins Date: Tue, 30 Mar 2021 22:10:12 +0100 Subject: [PATCH 3/6] [leaderboard] Make it clear that users can set observation spaces. Issue #142. --- compiler_gym/leaderboard/llvm_codesize.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/compiler_gym/leaderboard/llvm_codesize.py b/compiler_gym/leaderboard/llvm_codesize.py index 28b697fce..4e6506d6c 100644 --- a/compiler_gym/leaderboard/llvm_codesize.py +++ b/compiler_gym/leaderboard/llvm_codesize.py @@ -193,7 +193,18 @@ def eval_llvm_codesize_policy(policy: Policy) -> None: ... pass # ... do fun stuff! >>> my_policy = MyPolicy() - You then call the :func:`eval_llvm_codesize_policy() + The role of your policy to perform actions on the supplied environment that + it thinks provides the best possible cumulative reward. The policy may set + the observation space as it likes: + + >>> env.observation_space = "InstCount" # Set a new space for env.step() + >>> env.observation["InstCount"] # Calculate a one-off observation. + + However, the policy may not change the reward space of the environment, or + the benchmark. + + Once you have defined your policy, call the + :func:`eval_llvm_codesize_policy() ` helper function, passing it your policy as its only argument: @@ -219,8 +230,7 @@ def my_policy(env: LlvmEnv) -> None: defines a number of commandline flags that can be overriden to control the behavior of the evaluation. For example the flag :code:`--n` determines the number of times the policy is run on each benchmark (default is 10), and - :code:`--results_logfile` determines the path of the generated results - file: + :code:`--results_logfile` determines the path of the generated results file: .. code-block:: From 7b79be5a877c9836f2343080c98fe87ffd84ef5e Mon Sep 17 00:00:00 2001 From: Chris Cummins Date: Wed, 31 Mar 2021 12:38:42 +0100 Subject: [PATCH 4/6] [CONTRIBUTING] Improve leaderboard submission instructions. Re-order the file so that leaderboard submissions appear directly below pull requests. Then provide more details about the submission review process. --- CONTRIBUTING.md | 34 ++++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 557286040..1d714e38c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -37,6 +37,26 @@ We actively welcome your pull requests. ("CLA"). +## Leaderboard Submissions + +To add a new result to the leaderboard, add a new entry to the leaderboard table +and file a [Pull Request](#pull-requests). Please include: + +1. A list of all authors. +2. A CSV file of your results. The + [compiler_gym.leaderboard](https://facebookresearch.github.io/CompilerGym/compiler_gym/leaderboard.html) + package provides utilities to help generate results using your agent. +3. A write-up of your approach. You may use the + [submission template](/leaderboard/SUBMISSION_TEMPLATE.md) as a guide. + +We do not require that you submit the source code for your approach. Once you +submit your pull request we will validate your results CSV files and may ask +clarifying questions if we feel that those would be useful to improve +reproducibility. Please [take a look +here](https://github.com/facebookresearch/CompilerGym/pull/127) for an example +of a well-formed pull request submission. + + ## Code Style We want to ease the burden of code formatting using tools. Our code style @@ -62,20 +82,6 @@ Other common sense rules we encourage are: easy-to-write. -## Leaderboard Submissions - -To add a new result to the leaderboard, add a new entry to the leaderboard table -and file a [Pull Request](#pull-requests). Please include: - -1. A list of all authors. -2. A CSV file of your results. -3. A write-up of your approach. You may use the - [submission template](/leaderboard/SUBMISSION_TEMPLATE.md) as a guide. - -Please [take a look -here](https://github.com/facebookresearch/CompilerGym/pull/127) for an example -of a well-formed pull request submission. - ## Contributor License Agreement ("CLA") In order to accept your pull request, we need you to submit a CLA. You From 0acd2fe93569867404cab8733ecb9bdace7d6ba4 Mon Sep 17 00:00:00 2001 From: Chris Cummins Date: Wed, 31 Mar 2021 12:59:34 +0100 Subject: [PATCH 5/6] [leaderboard] Rename LLVM codesize to instruction count. Be clear that this leaderboard evaluates performance at reducing the instruction count of LLVM-IR, not the binary codesize. --- README.md | 25 ++++----- compiler_gym/leaderboard/BUILD | 6 +-- compiler_gym/leaderboard/__init__.py | 14 ++++- .../{llvm_codesize.py => llvm_instcount.py} | 54 +++++++++++-------- docs/source/compiler_gym/leaderboard.rst | 14 ++--- .../e_greedy/BUILD | 4 +- .../e_greedy/README.md | 0 .../e_greedy/e_greedy.py | 4 +- .../e_greedy/e_greedy_test.py | 8 +-- .../e_greedy/results_e0.csv | 0 .../e_greedy/results_e10.csv | 0 tests/leaderboard/BUILD | 6 +-- ...odesize_test.py => llvm_instcount_test.py} | 20 +++---- 13 files changed, 85 insertions(+), 70 deletions(-) rename compiler_gym/leaderboard/{llvm_codesize.py => llvm_instcount.py} (84%) rename leaderboard/{llvm_codesize => llvm_instcount}/e_greedy/BUILD (83%) rename leaderboard/{llvm_codesize => llvm_instcount}/e_greedy/README.md (100%) rename leaderboard/{llvm_codesize => llvm_instcount}/e_greedy/e_greedy.py (97%) rename leaderboard/{llvm_codesize => llvm_instcount}/e_greedy/e_greedy_test.py (83%) rename leaderboard/{llvm_codesize => llvm_instcount}/e_greedy/results_e0.csv (100%) rename leaderboard/{llvm_codesize => llvm_instcount}/e_greedy/results_e10.csv (100%) rename tests/leaderboard/{llvm_codesize_test.py => llvm_instcount_test.py} (81%) diff --git a/README.md b/README.md index 220a483ab..c299c5f53 100644 --- a/README.md +++ b/README.md @@ -36,9 +36,11 @@ developers to expose new optimization problems for AI. - [Installation](#installation) - [Trying it out](#trying-it-out) - [Leaderboards](#leaderboards) - - [llvm-ic-v0](#llvm-ic-v0) + - [LLVM Instruction Count](#llvm-instruction-count) - [Contributing](#contributing) - [Citation](#citation) + + # Getting Started Starting with CompilerGym is simple. If you not already familiar with the gym @@ -158,24 +160,23 @@ CompilerGym tasks. To submit a result please see [this document](https://github.com/facebookresearch/CompilerGym/blob/development/CONTRIBUTING.md#leaderboard-submissions). -## llvm-ic-v0 - -LLVM is a popular open source compiler used widely in industry and research. -This environment exposes the optimization pipeline as a set of actions that can -be applied to a particular program. The goal of the agent is to select the -sequence of optimizations that lead to the greatest reduction in instruction -count in the program being compiled. Reward is the reduction in codesize -achieved scaled to the reduction achieved by LLVM's builtin `-Oz` pipeline. +## LLVM Instruction Count -### cBench-v1 +LLVM is a popular open source compiler used widely in industry and research. The +`llvm-ic-v0` environment exposes LLVM's optimizing passes as a set of actions +that can be applied to a particular program. The goal of the agent is to select +the sequence of optimizations that lead to the greatest reduction in instruction +count in the program being compiled. Reward is the reduction in instruction +count achieved scaled to the reduction achieved by LLVM's builtin `-Oz` +pipeline. This leaderboard tracks the results achieved by algorithms on the `llvm-ic-v0` environment on the 23 benchmarks in the `cBench-v1` dataset. | Author | Algorithm | Links | Date | Walltime (mean) | Codesize Reduction (geomean) | | --- | --- | --- | --- | --- | --- | -| Facebook | Greedy search | [write-up](leaderboard/llvm_codesize/e_greedy/README.md), [results](leaderboard/llvm_codesize/e_greedy/results_e0.csv) | 2021-03 | 169.237s | 1.055× | -| Facebook | e-Greedy search (e=0.1) | [write-up](leaderboard/llvm_codesize/e_greedy/README.md), [results](leaderboard/llvm_codesize/e_greedy/results_e10.csv) | 2021-03 | 152.579s | 1.041× | +| Facebook | Greedy search | [write-up](leaderboard/llvm_instcount/e_greedy/README.md), [results](leaderboard/llvm_instcount/e_greedy/results_e0.csv) | 2021-03 | 169.237s | 1.055× | +| Facebook | e-Greedy search (e=0.1) | [write-up](leaderboard/llvm_instcount/e_greedy/README.md), [results](leaderboard/llvm_instcount/e_greedy/results_e10.csv) | 2021-03 | 152.579s | 1.041× | # Contributing diff --git a/compiler_gym/leaderboard/BUILD b/compiler_gym/leaderboard/BUILD index 5df5c618c..319c52661 100644 --- a/compiler_gym/leaderboard/BUILD +++ b/compiler_gym/leaderboard/BUILD @@ -9,13 +9,13 @@ py_library( srcs = ["__init__.py"], visibility = ["//visibility:public"], deps = [ - ":llvm_codesize", + ":llvm_instcount", ], ) py_library( - name = "llvm_codesize", - srcs = ["llvm_codesize.py"], + name = "llvm_instcount", + srcs = ["llvm_instcount.py"], visibility = ["//visibility:public"], deps = [ "//compiler_gym:compiler_env_state", diff --git a/compiler_gym/leaderboard/__init__.py b/compiler_gym/leaderboard/__init__.py index 2d98773cc..d69ba3a40 100644 --- a/compiler_gym/leaderboard/__init__.py +++ b/compiler_gym/leaderboard/__init__.py @@ -2,4 +2,16 @@ # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. -"""This module contains helper submodules for creating CompilerGym leaderboard submissions.""" +"""This package contains modules that can be used for preparing leaderboard +submissions. + +We provide `leaderboards +`_ to track the +performance of user-submitted algorithms on compiler optimization tasks. The +goal of the leaderboards is to provide a venue for researchers to promote their +work, and to provide a common framework for evaluating and comparing different +approaches. We accept submissions to the leaderboards through pull requests, see +`here +`_ +for instructions. +""" diff --git a/compiler_gym/leaderboard/llvm_codesize.py b/compiler_gym/leaderboard/llvm_instcount.py similarity index 84% rename from compiler_gym/leaderboard/llvm_codesize.py rename to compiler_gym/leaderboard/llvm_instcount.py index 4e6506d6c..a8dad1066 100644 --- a/compiler_gym/leaderboard/llvm_codesize.py +++ b/compiler_gym/leaderboard/llvm_instcount.py @@ -3,14 +3,12 @@ # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """LLVM is a popular open source compiler used widely in industry and research. -This environment exposes the optimization pipeline as a set of actions that can -be applied to a particular program. The goal of the agent is to select the -sequence of optimizations that lead to the greatest reduction in instruction -count in the program being compiled. Reward is the reduction in codesize -achieved scaled to the reduction achieved by LLVM's builtin -Oz pipeline. Users -who wish to create a submission for this leaderboard should consider using the -:func:`eval_llvm_codesize_policy() -` helper. +The :code:`llvm-ic-v0` environment exposes LLVM's optimizing passes as a set of +actions that can be applied to a particular program. The goal of the agent is to +select the sequence of optimizations that lead to the greatest reduction in +instruction count in the program being compiled. Reward is the reduction in +instruction count achieved scaled to the reduction achieved by LLVM's builtin +:code:`-Oz` pipeline. +--------------------+------------------------------------------------------+ | Property | Value | @@ -23,6 +21,11 @@ +--------------------+------------------------------------------------------+ | Test Dataset | The 23 cBench benchmarks. | +--------------------+------------------------------------------------------+ + +Users who wish to create a submission for this leaderboard may use +:func:`eval_llvm_instcount_policy() +` to +automatically evaluate their agent on the test set. """ import platform import sys @@ -170,7 +173,7 @@ def run(self): self.n += 1 -def eval_llvm_codesize_policy(policy: Policy) -> None: +def eval_llvm_instcount_policy(policy: Policy) -> None: """Evaluate an LLVM codesize policy and generate results for a leaderboard submission. @@ -185,17 +188,21 @@ def eval_llvm_codesize_policy(policy: Policy) -> None: ... _, _, done, _ = env.step(env.action_space.sample()) ... if done: break - If you would like a stateful policy, you could use a class and override the + If your policy is stateful, you can use a class and override the :code:`__call__()` method: >>> class MyPolicy: - ... def __call__(env: LlvmEnv) -> None: + ... def __init__(self): + ... self.my_stateful_vars = {} # or similar + ... def __call__(self, env: LlvmEnv) -> None: ... pass # ... do fun stuff! >>> my_policy = MyPolicy() - The role of your policy to perform actions on the supplied environment that - it thinks provides the best possible cumulative reward. The policy may set - the observation space as it likes: + The role of your policy is to perform a sequence of actions on the supplied + environment so as to maximize cumulative reward. By default, no observation + space is set on the environment, so :meth:`env.step() + ` will return :code:`None` for the + observation. You may set a new observation space: >>> env.observation_space = "InstCount" # Set a new space for env.step() >>> env.observation["InstCount"] # Calculate a one-off observation. @@ -204,29 +211,30 @@ def eval_llvm_codesize_policy(policy: Policy) -> None: the benchmark. Once you have defined your policy, call the - :func:`eval_llvm_codesize_policy() - ` helper + :func:`eval_llvm_instcount_policy() + ` helper function, passing it your policy as its only argument: - >>> eval_llvm_codesize_policy(my_policy) + >>> eval_llvm_instcount_policy(my_policy) - Put together as a complete example this is what an example leaderboard - submission script looks like: + Put together as a complete example, a leaderboard submission script may look + like: .. code-block:: python # my_policy.py - from compiler_gym.leaderboard.llvm_codesize import eval_llvm_codesize_policy + from compiler_gym.leaderboard.llvm_instcount import eval_llvm_instcount_policy from compiler_gym.envs import LlvmEnv def my_policy(env: LlvmEnv) -> None: + env.observation_space = "InstCount" # we're going to use instcount space pass # ... do fun stuff! if __name__ == "__main__": - eval_llvm_codesize_policy(my_policy) + eval_llvm_instcount_policy(my_policy) - The :func:`eval_llvm_codesize_policy() - ` helper + The :func:`eval_llvm_instcount_policy() + ` helper defines a number of commandline flags that can be overriden to control the behavior of the evaluation. For example the flag :code:`--n` determines the number of times the policy is run on each benchmark (default is 10), and diff --git a/docs/source/compiler_gym/leaderboard.rst b/docs/source/compiler_gym/leaderboard.rst index 979a7ed20..0e97d5ce2 100644 --- a/docs/source/compiler_gym/leaderboard.rst +++ b/docs/source/compiler_gym/leaderboard.rst @@ -1,16 +1,10 @@ compiler_gym.leaderboard ======================== -We provide `leaderboards -`_ to track the -performance of user-submitted algorithms on CompilerGym tasks. The goal of the -leaderboards is to provide a venue for researchers to promote their work, and to -provide a common benchmark for evaluating approaches. The -:code:`compiler_gym.leaderboard` module contains helper submodules that can be -used for preparing leaderboard submissions. +.. automodule:: compiler_gym.leaderboard -LLVM Codesize -------------- +LLVM Instruction Count +---------------------- -.. automodule:: compiler_gym.leaderboard.llvm_codesize +.. automodule:: compiler_gym.leaderboard.llvm_instcount :members: diff --git a/leaderboard/llvm_codesize/e_greedy/BUILD b/leaderboard/llvm_instcount/e_greedy/BUILD similarity index 83% rename from leaderboard/llvm_codesize/e_greedy/BUILD rename to leaderboard/llvm_instcount/e_greedy/BUILD index 7d5257445..cb84a335f 100644 --- a/leaderboard/llvm_codesize/e_greedy/BUILD +++ b/leaderboard/llvm_instcount/e_greedy/BUILD @@ -8,7 +8,7 @@ py_library( name = "e_greedy", srcs = ["e_greedy.py"], deps = [ - "//compiler_gym/leaderboard:llvm_codesize", + "//compiler_gym/leaderboard:llvm_instcount", ], ) @@ -18,7 +18,7 @@ py_test( srcs = ["e_greedy_test.py"], deps = [ ":e_greedy", - "//compiler_gym/leaderboard:llvm_codesize", + "//compiler_gym/leaderboard:llvm_instcount", "//tests:test_main", "//tests/pytest_plugins:llvm", ], diff --git a/leaderboard/llvm_codesize/e_greedy/README.md b/leaderboard/llvm_instcount/e_greedy/README.md similarity index 100% rename from leaderboard/llvm_codesize/e_greedy/README.md rename to leaderboard/llvm_instcount/e_greedy/README.md diff --git a/leaderboard/llvm_codesize/e_greedy/e_greedy.py b/leaderboard/llvm_instcount/e_greedy/e_greedy.py similarity index 97% rename from leaderboard/llvm_codesize/e_greedy/e_greedy.py rename to leaderboard/llvm_instcount/e_greedy/e_greedy.py index bb7570a59..a61f015e8 100644 --- a/leaderboard/llvm_codesize/e_greedy/e_greedy.py +++ b/leaderboard/llvm_instcount/e_greedy/e_greedy.py @@ -11,7 +11,7 @@ from absl import flags from compiler_gym.envs import CompilerEnv, LlvmEnv -from compiler_gym.leaderboard.llvm_codesize import eval_llvm_codesize_policy +from compiler_gym.leaderboard.llvm_instcount import eval_llvm_instcount_policy flags.DEFINE_float( "epsilon", 0, "The ratio of patience to the size of the action space. " @@ -108,4 +108,4 @@ def e_greedy_search(env: LlvmEnv) -> None: if __name__ == "__main__": - eval_llvm_codesize_policy(e_greedy_search) + eval_llvm_instcount_policy(e_greedy_search) diff --git a/leaderboard/llvm_codesize/e_greedy/e_greedy_test.py b/leaderboard/llvm_instcount/e_greedy/e_greedy_test.py similarity index 83% rename from leaderboard/llvm_codesize/e_greedy/e_greedy_test.py rename to leaderboard/llvm_instcount/e_greedy/e_greedy_test.py index 3b771baa5..f866072a0 100644 --- a/leaderboard/llvm_codesize/e_greedy/e_greedy_test.py +++ b/leaderboard/llvm_instcount/e_greedy/e_greedy_test.py @@ -2,15 +2,15 @@ # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. -"""Tests for //leaderboard/llvm_codesize/e_greedy.""" +"""Tests for //leaderboard/llvm_instcount/e_greedy.""" from concurrent.futures import ThreadPoolExecutor import pytest from absl import flags from compiler_gym.envs import LlvmEnv -from compiler_gym.leaderboard.llvm_codesize import eval_llvm_codesize_policy -from leaderboard.llvm_codesize.e_greedy.e_greedy import ( +from compiler_gym.leaderboard.llvm_instcount import eval_llvm_instcount_policy +from leaderboard.llvm_instcount.e_greedy.e_greedy import ( e_greedy_search, select_best_action, ) @@ -33,7 +33,7 @@ def test_random_search(): ] ) with pytest.raises(SystemExit): - eval_llvm_codesize_policy(e_greedy_search) + eval_llvm_instcount_policy(e_greedy_search) def test_select_best_action_closed_environment(env: LlvmEnv): diff --git a/leaderboard/llvm_codesize/e_greedy/results_e0.csv b/leaderboard/llvm_instcount/e_greedy/results_e0.csv similarity index 100% rename from leaderboard/llvm_codesize/e_greedy/results_e0.csv rename to leaderboard/llvm_instcount/e_greedy/results_e0.csv diff --git a/leaderboard/llvm_codesize/e_greedy/results_e10.csv b/leaderboard/llvm_instcount/e_greedy/results_e10.csv similarity index 100% rename from leaderboard/llvm_codesize/e_greedy/results_e10.csv rename to leaderboard/llvm_instcount/e_greedy/results_e10.csv diff --git a/tests/leaderboard/BUILD b/tests/leaderboard/BUILD index 9f0fdff0f..2e43ba332 100644 --- a/tests/leaderboard/BUILD +++ b/tests/leaderboard/BUILD @@ -5,10 +5,10 @@ load("@rules_python//python:defs.bzl", "py_test") py_test( - name = "llvm_codesize_test", - srcs = ["llvm_codesize_test.py"], + name = "llvm_instcount_test", + srcs = ["llvm_instcount_test.py"], deps = [ - "//compiler_gym/leaderboard:llvm_codesize", + "//compiler_gym/leaderboard:llvm_instcount", "//tests:test_main", "//tests/pytest_plugins:common", ], diff --git a/tests/leaderboard/llvm_codesize_test.py b/tests/leaderboard/llvm_instcount_test.py similarity index 81% rename from tests/leaderboard/llvm_codesize_test.py rename to tests/leaderboard/llvm_instcount_test.py index 4b3759f35..d1d043204 100644 --- a/tests/leaderboard/llvm_codesize_test.py +++ b/tests/leaderboard/llvm_instcount_test.py @@ -2,13 +2,13 @@ # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. -"""Tests for //compiler_gym/leaderboard:llvm_codesize.""" +"""Tests for //compiler_gym/leaderboard:llvm_instcount.""" from pathlib import Path import pytest from absl import flags -from compiler_gym.leaderboard.llvm_codesize import eval_llvm_codesize_policy +from compiler_gym.leaderboard.llvm_instcount import eval_llvm_instcount_policy from tests.test_main import main FLAGS = flags.FLAGS @@ -21,14 +21,14 @@ def null_policy(env) -> None: pass -def test_eval_llvm_codesize_policy(): +def test_eval_llvm_instcount_policy(): FLAGS.unparse_flags() FLAGS(["argv0", "--n=1", "--max_benchmarks=1", "--novalidate"]) with pytest.raises(SystemExit): - eval_llvm_codesize_policy(null_policy) + eval_llvm_instcount_policy(null_policy) -def test_eval_llvm_codesize_policy_resume(tmpwd): +def test_eval_llvm_instcount_policy_resume(tmpwd): FLAGS.unparse_flags() # Run eval on a single benchmark. @@ -43,7 +43,7 @@ def test_eval_llvm_codesize_policy_resume(tmpwd): ] ) with pytest.raises(SystemExit): - eval_llvm_codesize_policy(null_policy) + eval_llvm_instcount_policy(null_policy) # Check that the log has a single entry (and a header row.) assert Path("test.csv").is_file() @@ -64,7 +64,7 @@ def test_eval_llvm_codesize_policy_resume(tmpwd): ] ) with pytest.raises(SystemExit): - eval_llvm_codesize_policy(null_policy) + eval_llvm_instcount_policy(null_policy) # Check that the log extends the original. assert Path("test.csv").is_file() @@ -86,7 +86,7 @@ def test_eval_llvm_codesize_policy_resume(tmpwd): ] ) with pytest.raises(SystemExit): - eval_llvm_codesize_policy(null_policy) + eval_llvm_instcount_policy(null_policy) # Check that the log extends the original. assert Path("test.csv").is_file() @@ -96,11 +96,11 @@ def test_eval_llvm_codesize_policy_resume(tmpwd): assert len(log.rstrip().split("\n")) == 5 -def test_eval_llvm_codesize_policy_invalid_flag(): +def test_eval_llvm_instcount_policy_invalid_flag(): FLAGS.unparse_flags() FLAGS(["argv0", "--n=-1"]) with pytest.raises(AssertionError): - eval_llvm_codesize_policy(null_policy) + eval_llvm_instcount_policy(null_policy) if __name__ == "__main__": From e55f23e79bc23679952f7bc20aafd538029827c1 Mon Sep 17 00:00:00 2001 From: Chris Cummins Date: Wed, 31 Mar 2021 13:12:49 +0100 Subject: [PATCH 6/6] Add leaderboard package as a dependency of //compiler_gym. --- BUILD.bazel | 1 - compiler_gym/BUILD | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/BUILD.bazel b/BUILD.bazel index 411b51d32..e71d72af8 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -22,7 +22,6 @@ py_library( "//compiler_gym/bin", "//compiler_gym/datasets", "//compiler_gym/envs", - "//compiler_gym/leaderboard", "//compiler_gym/service", "//compiler_gym/spaces", "//compiler_gym/views", diff --git a/compiler_gym/BUILD b/compiler_gym/BUILD index ddbbea72f..deeaf4ff6 100644 --- a/compiler_gym/BUILD +++ b/compiler_gym/BUILD @@ -15,6 +15,7 @@ py_library( ":random_search", ":validate", "//compiler_gym/envs", + "//compiler_gym/leaderboard", "//compiler_gym/util", ], )