diff --git a/.github/workflows/docker-bases.yml b/.github/workflows/docker-bases.yml index d2bed471803..96846320fc5 100644 --- a/.github/workflows/docker-bases.yml +++ b/.github/workflows/docker-bases.yml @@ -162,47 +162,6 @@ jobs: build-args: 'arch=nvc-host' tags: 'devitocodes/bases:cpu-nvc' -####################################################### -################### Nvidia clang ###################### -####################################################### - deploy-nvidia-clang-base: - name: "nvidia-clang-base" - runs-on: ["self-hosted", "nvidiagpu"] - env: - DOCKER_BUILDKIT: "1" - - steps: - - name: Checkout devito - uses: actions/checkout@v3 - - - name: Check event name - run: echo ${{ github.event_name }} - - - name: Set up QEMU - uses: docker/setup-qemu-action@v2 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 - - - name: Login to DockerHub - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKER_USERNAME }} - password: ${{ secrets.DOCKER_PASSWORD }} - - - name: cleanup - run: docker system prune -a -f - - - name: Nvidia clang image - uses: docker/build-push-action@v3 - with: - context: . - file: './docker/Dockerfile.nvidia' - push: true - target: 'clang' - build-args: 'arch=clang' - tags: 'devitocodes/bases:nvidia-clang' - ####################################################### ##################### AMD ############################# ####################################################### @@ -252,4 +211,4 @@ jobs: target: 'hip' build-args: | arch=hip - tags: devitocodes/bases:amd-hip \ No newline at end of file + tags: devitocodes/bases:amd-hip diff --git a/.github/workflows/docker-devito.yml b/.github/workflows/docker-devito.yml index 459093ed6ef..08d63fe5973 100644 --- a/.github/workflows/docker-devito.yml +++ b/.github/workflows/docker-devito.yml @@ -24,9 +24,9 @@ jobs: test: 'tests/test_gpu_openacc.py tests/test_gpu_common.py' runner: ["self-hosted", "nvidiagpu"] - - base: 'bases:nvidia-clang' - tag: 'nvidia-clang' - flag: '--gpus all' + - base: 'bases:nvidia-nvc' + tag: 'nvidia-nvc-omp' + flag: '--gpus all --env DEVITO_LABGUAGE=openmp' test: 'tests/test_gpu_openmp.py tests/test_gpu_common.py' runner: ["self-hosted", "nvidiagpu"] diff --git a/.github/workflows/pytest-gpu.yml b/.github/workflows/pytest-gpu.yml index e52134add2f..9efad9cb341 100644 --- a/.github/workflows/pytest-gpu.yml +++ b/.github/workflows/pytest-gpu.yml @@ -53,13 +53,6 @@ jobs: test_examples: ["examples/seismic/tti/tti_example.py examples/seismic/acoustic/acoustic_example.py examples/seismic/viscoacoustic/viscoacoustic_example.py examples/seismic/viscoelastic/viscoelastic_example.py examples/seismic/elastic/elastic_example.py"] include: - - name: pytest-gpu-omp-nvidia - test_files: "tests/test_adjoint.py tests/test_gpu_common.py tests/test_gpu_openmp.py" - base: "devitocodes/bases:nvidia-clang" - tags: ["self-hosted", "nvidiagpu"] - test_drive_cmd: "nvidia-smi" - flags: '--gpus all --rm --name testrun-clang-nvidia' - - name: pytest-gpu-acc-nvidia test_files: "tests/test_adjoint.py tests/test_gpu_common.py tests/test_gpu_openacc.py" base: "devitocodes/bases:nvidia-nvc" @@ -67,6 +60,13 @@ jobs: test_drive_cmd: "nvidia-smi" flags: '--gpus all --rm --name testrun-nvc' + # - name: pytest-gpu-omp-nvidia + # test_files: "tests/test_adjoint.py tests/test_gpu_common.py tests/test_gpu_openmp.py" + # base: "devitocodes/bases:nvidia-nvc" + # tags: ["self-hosted", "nvidiagpu"] + # test_drive_cmd: "nvidia-smi" + # flags: '--gpus all --rm --name testrun-nvc-omp-nvidia --env DEVITO_LANGUAGE=openmp' + - name: pytest-gpu-omp-amd test_files: "tests/test_adjoint.py tests/test_gpu_common.py tests/test_gpu_openmp.py" tags: ["self-hosted", "amdgpu"] diff --git a/docker/Dockerfile.nvidia b/docker/Dockerfile.nvidia index b2bfb62fde0..b82b5fcd1e0 100644 --- a/docker/Dockerfile.nvidia +++ b/docker/Dockerfile.nvidia @@ -8,11 +8,16 @@ ARG arch="nvc" ######################################################################## # Build base image with apt setup and common env ######################################################################## -FROM ${pyversion}-slim-bullseye as sdk-base +FROM ubuntu:22.04 as sdk-base ENV DEBIAN_FRONTEND noninteractive -RUN apt-get update -y && apt-get install -y -q gpg apt-utils curl wget vim libnuma-dev tmux numactl +RUN apt-get update -y && \ + apt-get install -y -q gpg apt-utils curl wget vim libnuma-dev tmux numactl + +#Install python +RUN apt-get update && \ + apt-get install -y dh-autoreconf python3-venv python3-dev python3-pip # nodesource: nvdashboard requires nodejs>=10 RUN curl https://developer.download.nvidia.com/hpc-sdk/ubuntu/DEB-GPG-KEY-NVIDIA-HPC-SDK | gpg --yes --dearmor -o /usr/share/keyrings/nvidia-hpcsdk-archive-keyring.gpg @@ -81,7 +86,7 @@ RUN export NVARCH=$(ls -1 /opt/nvidia/hpc_sdk/Linux_x86_64/ | grep '\.' | head - ln -sf /opt/nvidia/hpc_sdk/Linux_x86_64/comm_libs/${CUDA_V}/nccl /opt/nvhpc/comm_libs/nccl # Starting nvhpc 23.5 and cuda 12.1, hpcx and openmpi are inside the cuda version folder, only the bin is in the comm_libs path -RUN export CUDA_V=$(ls /opt/nvhpc/${NVARCH}/cuda/ | grep '\.') && \ +RUN export CUDA_V=$(nvcc --version | sed -n 's/^.*release \([0-9]\+\.[0-9]\+\).*$/\1/p') && \ ls /opt/nvhpc/comm_libs/${CUDA_V}/hpcx/ &&\ if [ -d /opt/nvhpc/comm_libs/${CUDA_V}/hpcx ]; then \ rm -rf /opt/nvhpc/comm_libs/hpcx && rm -rf /opt/nvhpc/comm_libs/openmpi4 && \ diff --git a/docker/README.md b/docker/README.md index 10e8996a5db..3df6b0043bc 100644 --- a/docker/README.md +++ b/docker/README.md @@ -10,7 +10,7 @@ Devito provides several images that target different architectures and compilers We provide two CPU images: - `devito:gcc-*` with the standard GNU gcc compiler. -- `devito:icx-*` with the Intel C compiler for Intel architectures. +- `devito:icx-*` with the Intel C compiler for Intel architectures also configured with `intelpython3`. These images provide a working environment for any CPU architecture and come with [Devito], `gcc/icx` and `mpi` preinstalled, and utilities such as `jupyter` for usability and exploration of the package. @@ -40,10 +40,9 @@ In addition, the following legacy tags are available: ### [Devito] on GPU -Second, we provide three images to run [Devito] on GPUs, tagged `devito:nvidia-nvc-*`, `devito:nvidia-clang-*`, and `devito:amd-*`. +Second, we provide three images to run [Devito] on GPUs, tagged `devito:nvidia-nvc-*`, and `devito:amd-*`. - `devito:nvidia-nvc-*` is intended to be used on NVidia GPUs. It comes with the configuration to use the `nvc` compiler for `openacc` offloading. This image also comes with CUDA-aware MPI for multi-GPU deployment. -- `devito:nvidia-clang-*` is intended to be used on NVidia GPUs. It comes with the configuration to use the `clang` compiler for `openmp` offloading. This image also comes with CUDA-aware MPI for multi-GPU deployment. - `devito:amd-*` is intended to be used on AMD GPUs. It comes with the configuration to use the `aoompcc` compiler for `openmp` offloading. This image also comes with ROCm-aware MPI for multi-GPU deployment. This image can also be used on AMD CPUs since the ROCm compilers are preinstalled. #### NVidia @@ -93,19 +92,12 @@ To build the GPU image with `openacc` offloading and the `nvc` compiler, run: docker build --build-arg base=devitocodes/bases:nvidia-nvc --network=host --file docker/Dockerfile.devito --tag devito . ``` -or if you wish to use the `clang` compiler with `openmp` offloading: - -```bash -docker build --build-arg base=devitocodes/bases:nvidia-clang --network=host --file docker/Dockerfile --tag devito . -``` - -and finally, for AMD architectures: +or if you wish to use the `amdclang` compiler with `openmp` offloading for AMD architectures: ```bash docker build --build-arg base=devitocodes/bases:amd --network=host --file docker/Dockerfile --tag devito . ``` - ## Debugging a base image To build the base image yourself locally, you need to run the standard build command using the provided Dockerfile. diff --git a/requirements-nvidia.txt b/requirements-nvidia.txt index 5f749fc61c4..2e7534ea43e 100644 --- a/requirements-nvidia.txt +++ b/requirements-nvidia.txt @@ -1,4 +1,4 @@ -cupy-cuda110 +cupy-cuda12x dask-cuda jupyterlab>=3 jupyterlab-nvdashboard