From 93bca14fcffd8f682c9391ee33c29738ab758680 Mon Sep 17 00:00:00 2001 From: Jirka Borovec <6035284+Borda@users.noreply.github.com> Date: Mon, 2 Oct 2023 12:06:27 +0200 Subject: [PATCH] ci: switch to custom docker images (#2123) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> (cherry picked from commit 886c09b01423a5013f289e95672373bfbc33de85) --- .azure/gpu-integrations.yml | 6 +- .azure/gpu-unittests.yml | 56 +++------------ .devcontainer/Dockerfile | 4 +- .devcontainer/devcontainer.json | 2 +- .github/actions/pull-caches/action.yml | 2 +- .github/assistant.py | 4 +- .github/workflows/ci-integrate.yml | 4 +- .github/workflows/ci-tests.yml | 1 - .github/workflows/docker-build.yml | 57 ++++++++++++--- dockers/README.md | 54 ++++++++++++++ dockers/ubuntu-cuda/Dockerfile | 98 ++++++++++++++++++++++++++ requirements.txt | 8 +-- requirements/base.txt | 7 ++ requirements/devel.txt | 2 +- requirements/integrate.txt | 2 +- setup.py | 24 ++++--- 16 files changed, 243 insertions(+), 88 deletions(-) create mode 100644 dockers/README.md create mode 100644 dockers/ubuntu-cuda/Dockerfile create mode 100644 requirements/base.txt diff --git a/.azure/gpu-integrations.yml b/.azure/gpu-integrations.yml index 3c88259cb8f..077b6b236d6 100644 --- a/.azure/gpu-integrations.yml +++ b/.azure/gpu-integrations.yml @@ -65,22 +65,22 @@ jobs: set -e pip install -q packaging fire requests wget python -m wget https://raw.githubusercontent.com/Lightning-AI/utilities/main/scripts/adjust-torch-versions.py - python adjust-torch-versions.py requirements.txt $(torch-ver) + python adjust-torch-versions.py requirements/base.txt $(torch-ver) python adjust-torch-versions.py requirements/integrate.txt $(torch-ver) + # FixMe: this shall not be for all integrations/cases python .github/assistant.py set-oldest-versions --req_files='["requirements/integrate.txt"]' cat requirements/integrate.txt displayName: "Adjust versions" - bash: | - set -ex pip install -q -r requirements/integrate.txt # force reinstall TM as it could be overwritten by integration's dependencies pip install . -U -r requirements/test.txt --find-links ${TORCH_URL} - pip list displayName: "Install package & integrations" - bash: | set -e + pip list python -c "from torch import __version__ as ver ; assert str(ver).split('+')[0] == '$(torch-ver)', f'PyTorch: {ver}'" python -c "import torch ; mgpu = torch.cuda.device_count() ; assert mgpu >= 2, f'found GPUs: {mgpu}'" displayName: "Sanity check" diff --git a/.azure/gpu-unittests.yml b/.azure/gpu-unittests.yml index 9ed7be26154..b22d2ae5191 100644 --- a/.azure/gpu-unittests.yml +++ b/.azure/gpu-unittests.yml @@ -19,17 +19,17 @@ jobs: matrix: "PyTorch | old": # Torch does not have build wheels with old Torch versions for newer CUDA - docker-image: "nvidia/cuda:11.1.1-cudnn8-devel-ubuntu20.04" + docker-image: "pytorchlightning/torchmetrics:ubuntu20.04-cuda11.1.1-py3.8-torch1.8.1" agent-pool: "lit-rtx-3090" torch-ver: "1.8.1" "PyTorch | 1.X": - docker-image: "pytorch/pytorch:1.13.1-cuda11.6-cudnn8-runtime" + docker-image: "pytorchlightning/torchmetrics:ubuntu22.04-cuda11.8.0-py3.9-torch1.13" agent-pool: "lit-rtx-3090" torch-ver: "1.13.1" "PyTorch | 2.X": - docker-image: "pytorch/pytorch:2.0.0-cuda11.7-cudnn8-runtime" + docker-image: "pytorchlightning/torchmetrics:ubuntu22.04-cuda11.8.0-py3.10-torch2.0" agent-pool: "lit-rtx-3090" - torch-ver: "2.0.0" + torch-ver: "2.0.1" # how long to run the job before automatically cancelling timeoutInMinutes: "120" # how much time to give 'run always even if cancelled tasks' before stopping them @@ -51,37 +51,12 @@ jobs: container: image: "$(docker-image)" - options: "--gpus=all --shm-size=8g -v /usr/bin/docker:/tmp/docker:ro -v /var/tmp:/var/tmp" + options: "--gpus=all --shm-size=8g -v /var/tmp:/var/tmp" workspace: clean: all steps: - - script: | - set -ex - container_id=$(head -1 /proc/self/cgroup|cut -d/ -f3) - echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections - /tmp/docker exec -t -u 0 $container_id \ - sh -c "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::="--force-confold" -y install sudo" - echo "##vso[task.setvariable variable=CONTAINER_ID]$container_id" - displayName: "Install Sudo in container (thanks Microsoft!)" - - - script: | - sudo apt-get update -q --fix-missing - sudo apt-get install -q -y --no-install-recommends \ - build-essential \ - wget \ - python${PYTHON_VERSION} \ - python${PYTHON_VERSION}-dev \ - python${PYTHON_VERSION}-distutils - sudo update-alternatives --install /usr/bin/python python /usr/bin/python${PYTHON_VERSION} 1 - wget https://bootstrap.pypa.io/get-pip.py --progress=bar:force:noscroll --no-check-certificate - python get-pip.py - env: - PYTHON_VERSION: "3.8" - condition: startsWith(variables['docker-image'], 'nvidia/cuda:') - displayName: "install python & pip" - - bash: | echo "##vso[task.setvariable variable=CUDA_VISIBLE_DEVICES]$(DEVICES)" CUDA_version=$(nvcc --version | sed -n 's/^.*release \([0-9]\+\.[0-9]\+\).*$/\1/p') @@ -96,7 +71,6 @@ jobs: whereis nvidia nvidia-smi echo $CUDA_VISIBLE_DEVICES - echo $CONTAINER_ID echo $TORCH_URL python --version pip --version @@ -105,29 +79,22 @@ jobs: displayName: "Image info & NVIDIA" - bash: | - pip install -q packaging wget - python -m wget https://raw.githubusercontent.com/Lightning-AI/utilities/main/scripts/adjust-torch-versions.py + pip install -q packaging + wget https://raw.githubusercontent.com/Lightning-AI/utilities/main/scripts/adjust-torch-versions.py python adjust-torch-versions.py requirements.txt $(torch-ver) for fpath in `ls requirements/*.txt`; do python adjust-torch-versions.py $fpath $(torch-ver) done + # FixMe: missing setting minumal configurations for testing displayName: "Adjust versions" - bash: | - set -ex - sudo apt-get update -qq --fix-missing - sudo apt-get install -y --no-install-recommends \ - build-essential gcc g++ cmake ffmpeg git libsndfile1 unzip - # pip install pip -U - pip install -q "numpy<1.24" # trying to resolve pesq installation - pip install . -U -r ./requirements/devel.txt \ - --prefer-binary --find-links=${TORCH_URL} - pip install mkl-service==2.4.0 # needed for the gpu multiprocessing - pip list + pip install . -U -r ./requirements/devel.txt --prefer-binary --find-links=${TORCH_URL} displayName: "Install environment" - bash: | set -e + pip list python -c "from torch import __version__ as ver ; assert str(ver).split('+')[0] == '$(torch-ver)', f'PyTorch: {ver}'" python -c "import torch ; mgpu = torch.cuda.device_count() ; assert mgpu >= 2, f'found GPUs: {mgpu}'" displayName: "Sanity check" @@ -149,8 +116,7 @@ jobs: displayName: "DocTesting" - bash: | - # wget is simpler but does not work on Windows - python -c "from urllib.request import urlretrieve ; urlretrieve('https://pl-public-data.s3.amazonaws.com/metrics/data.zip', 'data.zip')" + wget https://pl-public-data.s3.amazonaws.com/metrics/data.zip unzip -o data.zip ls -l _data/* workingDirectory: tests diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 87e2268e20a..b6675068974 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -11,7 +11,6 @@ RUN if [ "${NODE_VERSION}" != "none" ]; then \ fi COPY requirements/ /tmp/pip-tmp/requirements/ -COPY requirements.txt /tmp/pip-tmp/ RUN \ pip3 install awscli && \ aws s3 sync --no-sign-request s3://sphinx-packages/ dist/ && \ @@ -23,8 +22,7 @@ RUN \ rm -rf /tmp/pip-tmp # [Optional] If your pip requirements rarely change, uncomment this section to add them to the image. -# COPY requirements.txt /tmp/pip-tmp/ -# RUN pip3 --disable-pip-version-check --no-cache-dir install -r /tmp/pip-tmp/requirements.txt \ +# RUN pip3 --disable-pip-version-check --no-cache-dir install -r /tmp/pip-tmp/requirements/base.txt \ # && rm -rf /tmp/pip-tmp # [Optional] Uncomment this section to install additional OS packages. diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 054c24626fc..438c5aa66d4 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -2,7 +2,7 @@ // https://github.com/microsoft/vscode-dev-containers/tree/v0.194.0/containers/python-3 { "name": "PyTorch Lightning Metrics", - "image": "pytorchlightning/metrics-dev", + "image": "pytorchlightning/torchmetrics:devcontainer-py3.9", // If you want to use a different Python version, uncomment the build object below // "build": { // "dockerfile": "Dockerfile", diff --git a/.github/actions/pull-caches/action.yml b/.github/actions/pull-caches/action.yml index 211de8371cb..91afb6f4be0 100644 --- a/.github/actions/pull-caches/action.yml +++ b/.github/actions/pull-caches/action.yml @@ -26,7 +26,7 @@ runs: if: inputs.pytorch-version != '' run: | curl https://raw.githubusercontent.com/Lightning-AI/utilities/main/scripts/adjust-torch-versions.py -o adjust-torch-versions.py - python adjust-torch-versions.py requirements.txt ${{ inputs.pytorch-version }} + python adjust-torch-versions.py requirements/base.txt ${{ inputs.pytorch-version }} shell: bash - name: Set min. dependencies diff --git a/.github/assistant.py b/.github/assistant.py index 171e4ff8df1..68f9eeef8f7 100644 --- a/.github/assistant.py +++ b/.github/assistant.py @@ -68,10 +68,10 @@ def prune_packages(req_file: str, *pkgs: str) -> None: fp.writelines(lines) @staticmethod - def set_min_torch_by_python(fpath: str = "requirements.txt") -> None: + def set_min_torch_by_python(fpath: str = "requirements/base.txt") -> None: """Set minimal torch version according to Python actual version. - >>> AssistantCLI.set_min_torch_by_python("../requirements.txt") + >>> AssistantCLI.set_min_torch_by_python("../requirements/base.txt") """ py_ver = f"{sys.version_info.major}.{sys.version_info.minor}" diff --git a/.github/workflows/ci-integrate.yml b/.github/workflows/ci-integrate.yml index c251127d674..629e28e10fc 100644 --- a/.github/workflows/ci-integrate.yml +++ b/.github/workflows/ci-integrate.yml @@ -66,9 +66,9 @@ jobs: curl https://raw.githubusercontent.com/Lightning-AI/utilities/main/scripts/adjust-torch-versions.py -o adjust-torch-versions.py pip install -r requirements/test.txt -r requirements/integrate.txt \ --find-links $PYTORCH_URL -f $PYPI_CACHE --upgrade-strategy eager - python adjust-torch-versions.py requirements.txt + python adjust-torch-versions.py requirements/base.txt python adjust-torch-versions.py requirements/image.txt - cat requirements.txt + cat requirements/base.txt pip install -e . --find-links $PYTORCH_URL -f $PYPI_CACHE pip list diff --git a/.github/workflows/ci-tests.yml b/.github/workflows/ci-tests.yml index 77c5f8ded7b..b99f8f32a39 100644 --- a/.github/workflows/ci-tests.yml +++ b/.github/workflows/ci-tests.yml @@ -116,7 +116,6 @@ jobs: run: | curl https://raw.githubusercontent.com/Lightning-AI/utilities/main/scripts/adjust-torch-versions.py -o adjust-torch-versions.py pip install -q cython # needed for installing `pycocotools` in latest config - python adjust-torch-versions.py requirements.txt for fpath in `ls requirements/*.txt`; do python adjust-torch-versions.py $fpath done diff --git a/.github/workflows/docker-build.yml b/.github/workflows/docker-build.yml index 7538970d3c6..6716d422f47 100644 --- a/.github/workflows/docker-build.yml +++ b/.github/workflows/docker-build.yml @@ -1,4 +1,4 @@ -name: "Build & Push Docker" +name: "Build (& Push) Dockers" on: # Trigger the workflow on push or pull request, but only for the master branch push: @@ -7,10 +7,10 @@ on: # Trigger the workflow on push or pull request, but only for the master bran branches: [master] paths: - "requirements/*" - - ".devcontainer/*" - - "environment.yml" - "requirements.txt" - - ".github/workflows/*docker*.yml" + - ".devcontainer/*" + - "dockers/**" + - ".github/workflows/docker-build.yml" - "setup.py" workflow_dispatch: {} @@ -19,7 +19,7 @@ concurrency: cancel-in-progress: ${{ github.ref != 'refs/heads/master' }} env: - PUSH_RELEASE: ${{ github.ref == 'refs/heads/master' || github.event_name == 'workflow_dispatch' }} + PUSH_DOCKERHUB: ${{ github.ref == 'refs/heads/master' || github.event_name == 'workflow_dispatch' || github.event_name == 'schedule' }} jobs: build-Devcontainer: @@ -27,13 +27,13 @@ jobs: strategy: fail-fast: false matrix: - python_version: ["3.9"] + python: ["3.9", "3.10"] steps: - name: Checkout uses: actions/checkout@v4 - name: Login to DockerHub - if: env.PUSH_RELEASE == 'true' && github.repository_owner == 'Lightning-AI' + if: env.PUSH_DOCKERHUB == 'true' && github.repository_owner == 'Lightning-AI' uses: docker/login-action@v3 with: username: ${{ secrets.DOCKER_USERNAME }} @@ -44,8 +44,45 @@ jobs: uses: docker/build-push-action@v5 with: build-args: | - VARIANT=${{ matrix.python_version }} + VARIANT=${{ matrix.python }} file: .devcontainer/Dockerfile - push: ${{ env.PUSH_RELEASE }} - tags: pytorchlightning/metrics-dev + push: ${{ env.PUSH_DOCKERHUB }} + tags: "pytorchlightning/torchmetrics:devcontainer-py${{ matrix.python }}" timeout-minutes: 50 + + build-cuda: + if: github.event.pull_request.draft == false + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + include: + # These are the base images for PL release docker images, + # so include at least all of the combinations in release-dockers.yml. + - { python: "3.8", pytorch: "1.8.1", cuda: "11.1.1", ubuntu: "20.04" } + - { python: "3.9", pytorch: "1.10", cuda: "11.8.0", ubuntu: "22.04" } + - { python: "3.9", pytorch: "1.11", cuda: "11.8.0", ubuntu: "22.04" } + - { python: "3.9", pytorch: "1.13", cuda: "11.8.0", ubuntu: "22.04" } + - { python: "3.10", pytorch: "2.0", cuda: "11.8.0", ubuntu: "22.04" } + steps: + - uses: actions/checkout@v4 + + - name: Login to DockerHub + uses: docker/login-action@v3 + if: env.PUSH_DOCKERHUB == 'true' && github.repository_owner == 'Lightning-AI' + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + + - name: Build (and Push) Devcontainer + uses: docker/build-push-action@v5 + with: + build-args: | + UBUNTU_VERSION=${{ matrix.ubuntu }} + PYTHON_VERSION=${{ matrix.python }} + PYTORCH_VERSION=${{ matrix.pytorch }} + CUDA_VERSION=${{ matrix.cuda }} + file: dockers/ubuntu-cuda/Dockerfile + push: ${{ env.PUSH_DOCKERHUB }} + tags: "pytorchlightning/torchmetrics:ubuntu${{ matrix.ubuntu }}-cuda${{ matrix.cuda }}-py${{ matrix.python }}-torch${{ matrix.pytorch }}" + timeout-minutes: 55 diff --git a/dockers/README.md b/dockers/README.md new file mode 100644 index 00000000000..29409c89f89 --- /dev/null +++ b/dockers/README.md @@ -0,0 +1,54 @@ +# Docker images + +## Build images from Dockerfiles + +You can build it on your own, note it takes lots of time, be prepared. + +```bash +git clone https://github.com/Lightning-AI/torchmetrics.git + +# build with the default arguments +docker image build -t torchmetrics:latest -f dockers/ubuntu-cuda/Dockerfile . + +# build with specific arguments +docker image build -t torchmetrics:ubuntu-cuda11.7.1-py3.9-torch1.13 \ + -f dockers/base-cuda/Dockerfile \ + --build-arg PYTHON_VERSION=3.9 \ + --build-arg PYTORCH_VERSION=1.13 \ + --build-arg CUDA_VERSION=11.7.1 \ + . +``` + +To run your docker use + +```bash +docker image list +docker run --rm -it torchmetrics:latest bash +``` + +and if you do not need it anymore, just clean it: + +```bash +docker image list +docker image rm torchmetrics:latest +``` + +## Run docker image with GPUs + +To run docker image with access to your GPUs, you need to install + +```bash +# Add the package repositories +distribution=$(. /etc/os-release;echo $ID$VERSION_ID) +curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add - +curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list + +sudo apt-get update && sudo apt-get install -y nvidia-container-toolkit +sudo systemctl restart docker +``` + +and later run the docker image with `--gpus all`. For example, + +```bash +docker run --rm -it --gpus all torchmetrics:ubuntu-cuda11.7.1-py3.9-torch1.12 +``` diff --git a/dockers/ubuntu-cuda/Dockerfile b/dockers/ubuntu-cuda/Dockerfile new file mode 100644 index 00000000000..b382279a869 --- /dev/null +++ b/dockers/ubuntu-cuda/Dockerfile @@ -0,0 +1,98 @@ +# Copyright The Lightning AI team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +ARG UBUNTU_VERSION=22.04 +ARG CUDA_VERSION=11.7.1 + + +FROM nvidia/cuda:${CUDA_VERSION}-runtime-ubuntu${UBUNTU_VERSION} + +ARG PYTHON_VERSION=3.10 +ARG PYTORCH_VERSION=2.0 + +SHELL ["/bin/bash", "-c"] +# https://techoverflow.net/2019/05/18/how-to-fix-configuring-tzdata-interactive-input-when-building-docker-images/ +ENV \ + DEBIAN_FRONTEND="noninteractive" \ + TZ="Etc/UTC" \ + PATH="$PATH:/root/.local/bin" \ + CUDA_TOOLKIT_ROOT_DIR="/usr/local/cuda" \ + MKL_THREADING_LAYER="GNU" \ + # MAKEFLAGS="-j$(nproc)" + MAKEFLAGS="-j2" + +RUN \ + apt-get -y update --fix-missing && \ + apt-get install -y --no-install-recommends --allow-downgrades --allow-change-held-packages \ + build-essential \ + pkg-config \ + cmake \ + git \ + wget \ + curl \ + unzip \ + g++ \ + cmake \ + ffmpeg \ + git \ + libsndfile1 \ + ca-certificates \ + software-properties-common \ + libopenmpi-dev \ + openmpi-bin \ + ssh \ + && \ + # Install python + add-apt-repository ppa:deadsnakes/ppa && \ + apt-get install -y \ + python${PYTHON_VERSION} \ + python${PYTHON_VERSION}-distutils \ + python${PYTHON_VERSION}-dev \ + && \ + update-alternatives --install /usr/bin/python${PYTHON_VERSION%%.*} python${PYTHON_VERSION%%.*} /usr/bin/python${PYTHON_VERSION} 1 && \ + update-alternatives --install /usr/bin/python python /usr/bin/python${PYTHON_VERSION} 1 && \ + curl https://bootstrap.pypa.io/get-pip.py | python && \ + # Cleaning + apt-get autoremove -y && \ + apt-get clean && \ + rm -rf /root/.cache && \ + rm -rf /var/lib/apt/lists/* + +ENV PYTHONPATH="/usr/lib/python${PYTHON_VERSION}/site-packages" + +COPY requirements/ requirements/ + +RUN \ + # set particular PyTorch version + pip install -q wget packaging && \ + python -m wget https://raw.githubusercontent.com/Lightning-AI/utilities/main/scripts/adjust-torch-versions.py && \ + for fpath in `ls requirements/*.txt`; do \ + python ./adjust-torch-versions.py $fpath ${PYTORCH_VERSION}; \ + done && \ + # trying to resolve pesq installation issue + pip install -q "numpy<1.24" && \ + # needed for the gpu multiprocessing + pip install "mkl-service==2.4.0" && \ + CUDA_VERSION_MM=${CUDA_VERSION%.*} && \ + CU_VERSION_MM=${CUDA_VERSION_MM//'.'/''} && \ + pip install --no-cache-dir -r requirements/devel.txt \ + --find-links "https://download.pytorch.org/whl/cu${CU_VERSION_MM}/torch_stable.html" && \ + rm -rf requirements/ + +RUN \ + # Show what we have + pip --version && \ + pip list && \ + python -c "import sys; ver = sys.version_info ; assert f'{ver.major}.{ver.minor}' == '$PYTHON_VERSION', ver" && \ + python -c "import torch; assert torch.__version__.startswith('$PYTORCH_VERSION'), torch.__version__" diff --git a/requirements.txt b/requirements.txt index 536c920e6f5..5603c3777cb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1 @@ -# NOTE: the upper bound for the package version is only set for CI stability, and it is dropped while installing this package -# in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment - -numpy >1.20.0 -torch >=1.8.1, <=2.0.1 -typing-extensions; python_version < '3.9' -lightning-utilities >=0.8.0, <0.10.0 +-r requirements/base.txt diff --git a/requirements/base.txt b/requirements/base.txt new file mode 100644 index 00000000000..536c920e6f5 --- /dev/null +++ b/requirements/base.txt @@ -0,0 +1,7 @@ +# NOTE: the upper bound for the package version is only set for CI stability, and it is dropped while installing this package +# in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment + +numpy >1.20.0 +torch >=1.8.1, <=2.0.1 +typing-extensions; python_version < '3.9' +lightning-utilities >=0.8.0, <0.10.0 diff --git a/requirements/devel.txt b/requirements/devel.txt index a9e82526941..a6cb48591ca 100644 --- a/requirements/devel.txt +++ b/requirements/devel.txt @@ -1,5 +1,5 @@ # use mandatory dependencies --r ../requirements.txt +-r base.txt # add the testing dependencies -r test.txt diff --git a/requirements/integrate.txt b/requirements/integrate.txt index 5f20e780ecd..3e21166acef 100644 --- a/requirements/integrate.txt +++ b/requirements/integrate.txt @@ -1 +1 @@ -pytorch-lightning >=1.6.0, <3.0.0 +pytorch-lightning >=1.6.0, <2.1.0 diff --git a/setup.py b/setup.py index b8c27ec4b5e..65f457da2d4 100755 --- a/setup.py +++ b/setup.py @@ -96,11 +96,11 @@ def _parse_requirements(strs: Union[str, Iterable[str]]) -> Iterator[_Requiremen def _load_requirements( - path_dir: str, file_name: str = "requirements.txt", unfreeze: bool = not _FREEZE_REQUIREMENTS + path_dir: str, file_name: str = "base.txt", unfreeze: bool = not _FREEZE_REQUIREMENTS ) -> List[str]: """Load requirements from a file. - >>> _load_requirements(_PATH_ROOT) + >>> _load_requirements(_PATH_REQUIRE) ['numpy...', 'torch..."] """ @@ -158,10 +158,12 @@ def _load_py_module(fname: str, pkg: str = "torchmetrics"): homepage=ABOUT.__homepage__, version=f"v{ABOUT.__version__}", ) -BASE_REQUIREMENTS = _load_requirements(path_dir=_PATH_ROOT, file_name="requirements.txt") +BASE_REQUIREMENTS = _load_requirements(path_dir=_PATH_REQUIRE, file_name="base.txt") -def _prepare_extras(skip_files: Tuple[str] = ("devel.txt", "doctest.txt", "integrate.txt", "docs.txt")) -> dict: +def _prepare_extras( + skip_files: Tuple[str] = ("base.txt", "devel.txt", "doctest.txt", "integrate.txt", "docs.txt") +) -> dict: # find all extra requirements _load_req = partial(_load_requirements, path_dir=_PATH_REQUIRE) found_req_files = sorted(os.path.basename(p) for p in glob.glob(os.path.join(_PATH_REQUIRE, "*.txt"))) @@ -169,17 +171,17 @@ def _prepare_extras(skip_files: Tuple[str] = ("devel.txt", "doctest.txt", "integ found_req_files = [n for n in found_req_files if n not in skip_files] found_req_names = [os.path.splitext(req)[0] for req in found_req_files] # define basic and extra extras - extras_req = { - name: _load_req(file_name=fname) for name, fname in zip(found_req_names, found_req_files) if "_test" not in name - } + extras_req = {"_tests": []} for name, fname in zip(found_req_names, found_req_files): - if "_test" in name: - extras_req["test"] += _load_req(file_name=fname) + if name.endswith("_test"): + extras_req["_tests"] += _load_req(file_name=fname) + else: + extras_req[name] = _load_req(file_name=fname) # filter the uniques extras_req = {n: list(set(req)) for n, req in extras_req.items()} # create an 'all' keyword that install all possible dependencies - extras_req["all"] = list(chain([pkgs for k, pkgs in extras_req.items() if k not in ("test", "docs")])) - extras_req["dev"] = extras_req["all"] + extras_req["test"] + extras_req["all"] = list(chain([pkgs for k, pkgs in extras_req.items() if k not in ("_test", "_tests")])) + extras_req["dev"] = extras_req["all"] + extras_req["_tests"] return extras_req