From 11df96923c55739218f2d620273fdcbf51596b75 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Mon, 21 Apr 2025 10:20:43 -0700 Subject: [PATCH 1/8] add docker for torch nightly Signed-off-by: Yang Wang --- docker/Dockerfile.nightly_torch | 335 ++++++++++++++++++++++++++++ requirements/nightly_torch_test.txt | 39 ++++ 2 files changed, 374 insertions(+) create mode 100644 docker/Dockerfile.nightly_torch create mode 100644 requirements/nightly_torch_test.txt diff --git a/docker/Dockerfile.nightly_torch b/docker/Dockerfile.nightly_torch new file mode 100644 index 000000000000..6a01e251d156 --- /dev/null +++ b/docker/Dockerfile.nightly_torch @@ -0,0 +1,335 @@ +# The vLLM Dockerfile is used to construct vLLM image that can be directly used for testing + +# for torch nightly, cuda >=12.6 is required, +# use 12.8 due to FlashAttention issue with cuda 12.6 (https://github.com/vllm-project/vllm/issues/15435#issuecomment-2775924628) +ARG CUDA_VERSION=12.8.0 +# +#################### BASE BUILD IMAGE #################### +# prepare basic build environment +FROM nvidia/cuda:${CUDA_VERSION}-devel-ubuntu20.04 AS base +ARG CUDA_VERSION=12.8.0 +ARG PYTHON_VERSION=3.12 +ARG TARGETPLATFORM +ENV DEBIAN_FRONTEND=noninteractive +# Install Python and other dependencies +RUN echo 'tzdata tzdata/Areas select America' | debconf-set-selections \ + && echo 'tzdata tzdata/Zones/America select Los_Angeles' | debconf-set-selections \ + && apt-get update -y \ + && apt-get install -y ccache software-properties-common git curl sudo \ + && add-apt-repository ppa:deadsnakes/ppa \ + && apt-get update -y \ + && apt-get install -y python${PYTHON_VERSION} python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv \ + && update-alternatives --install /usr/bin/python3 python3 /usr/bin/python${PYTHON_VERSION} 1 \ + && update-alternatives --set python3 /usr/bin/python${PYTHON_VERSION} \ + && ln -sf /usr/bin/python${PYTHON_VERSION}-config /usr/bin/python3-config \ + && curl -sS https://bootstrap.pypa.io/get-pip.py | python${PYTHON_VERSION} \ + && python3 --version \ + && python3 -m pip --version +# Install uv for faster pip installs +RUN --mount=type=cache,target=/root/.cache/uv \ + python3 -m pip install uv + +# This timeout (in seconds) is necessary when installing some dependencies via uv since it's likely to time out +# Reference: https://github.com/astral-sh/uv/pull/1694 +ENV UV_HTTP_TIMEOUT=500 + +# Upgrade to GCC 10 to avoid https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92519 +# as it was causing spam when compiling the CUTLASS kernels +RUN apt-get install -y gcc-10 g++-10 +RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 110 --slave /usr/bin/g++ g++ /usr/bin/g++-10 +RUN < torch_build_versions.txt +RUN cat torch_build_versions.txt + +# cuda arch list used by torch +# can be useful for `test` +# explicitly set the list to avoid issues with torch 2.2 +# see https://github.com/pytorch/pytorch/pull/123243 + +# Override the arch list for flash-attn to reduce the binary size +ARG vllm_fa_cmake_gpu_arches='80-real;90-real' +ENV VLLM_FA_CMAKE_GPU_ARCHES=${vllm_fa_cmake_gpu_arches} +#################### BASE BUILD IMAGE #################### + +#################### WHEEL BUILD IMAGE #################### +FROM base AS build +ARG TARGETPLATFORM + +# This timeout (in seconds) is necessary when installing some dependencies via uv since it's likely to time out +# Reference: https://github.com/astral-sh/uv/pull/1694 +ENV UV_HTTP_TIMEOUT=500 + +COPY . . + +RUN python3 use_existing_torch.py + +RUN --mount=type=cache,target=/root/.cache/uv \ + uv pip install --system -r requirements/build.txt + +ARG GIT_REPO_CHECK=0 +RUN --mount=type=bind,source=.git,target=.git \ + if [ "$GIT_REPO_CHECK" != "0" ]; then bash tools/check_repo.sh ; fi + +# Max jobs used by Ninja to build extensions +ARG max_jobs=64 +ENV MAX_JOBS=${max_jobs} +ARG nvcc_threads=2 +ENV NVCC_THREADS=$nvcc_threads + +ARG USE_SCCACHE +ARG SCCACHE_BUCKET_NAME=vllm-build-sccache +ARG SCCACHE_REGION_NAME=us-west-2 +ARG SCCACHE_S3_NO_CREDENTIALS=0 + +# if USE_SCCACHE is set, use sccache to speed up compilation +RUN --mount=type=cache,target=/root/.cache/uv \ + --mount=type=bind,source=.git,target=.git \ + if [ "$USE_SCCACHE" = "1" ]; then \ + echo "Installing sccache..." \ + && curl -L -o sccache.tar.gz https://github.com/mozilla/sccache/releases/download/v0.8.1/sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz \ + && tar -xzf sccache.tar.gz \ + && sudo mv sccache-v0.8.1-x86_64-unknown-linux-musl/sccache /usr/bin/sccache \ + && rm -rf sccache.tar.gz sccache-v0.8.1-x86_64-unknown-linux-musl \ + && export SCCACHE_BUCKET=${SCCACHE_BUCKET_NAME} \ + && export SCCACHE_REGION=${SCCACHE_REGION_NAME} \ + && export SCCACHE_S3_NO_CREDENTIALS=${SCCACHE_S3_NO_CREDENTIALS} \ + && export SCCACHE_IDLE_TIMEOUT=0 \ + && export CMAKE_BUILD_TYPE=Release \ + && sccache --show-stats \ + && python3 setup.py bdist_wheel --dist-dir=dist --py-limited-api=cp38 \ + && sccache --show-stats; \ + fi + +ENV CCACHE_DIR=/root/.cache/ccache +RUN --mount=type=cache,target=/root/.cache/ccache \ + --mount=type=cache,target=/root/.cache/uv \ + --mount=type=bind,source=.git,target=.git \ + if [ "$USE_SCCACHE" != "1" ]; then \ + # Clean any existing CMake artifacts + rm -rf .deps && \ + mkdir -p .deps && \ + python3 setup.py bdist_wheel --dist-dir=dist --py-limited-api=cp38; \ + fi + +#################### WHEEL BUILD IMAGE #################### + +################### VLLM INSTALLED IMAGE #################### +# Setup clean environment for vLLM and its dependencies for test and api server using ubuntu22.04 with AOT flashinfer +FROM nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04 AS vllm-base +# prepare for environment starts +ARG CUDA_VERSION=12.4.1 +ARG PYTHON_VERSION=3.12 +WORKDIR /vllm-workspace +ENV DEBIAN_FRONTEND=noninteractive +ARG TARGETPLATFORM + +RUN PYTHON_VERSION_STR=$(echo ${PYTHON_VERSION} | sed 's/\.//g') && \ + echo "export PYTHON_VERSION_STR=${PYTHON_VERSION_STR}" >> /etc/environment + +# Install Python and other dependencies +RUN echo 'tzdata tzdata/Areas select America' | debconf-set-selections \ + && echo 'tzdata tzdata/Zones/America select Los_Angeles' | debconf-set-selections \ + && apt-get update -y \ + && apt-get install -y ccache software-properties-common git curl wget sudo vim python3-pip \ + && apt-get install -y ffmpeg libsm6 libxext6 libgl1 \ + && add-apt-repository ppa:deadsnakes/ppa \ + && apt-get update -y \ + && apt-get install -y python${PYTHON_VERSION} python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv libibverbs-dev \ + && update-alternatives --install /usr/bin/python3 python3 /usr/bin/python${PYTHON_VERSION} 1 \ + && update-alternatives --set python3 /usr/bin/python${PYTHON_VERSION} \ + && ln -sf /usr/bin/python${PYTHON_VERSION}-config /usr/bin/python3-config \ + && curl -sS https://bootstrap.pypa.io/get-pip.py | python${PYTHON_VERSION} \ + && python3 --version && python3 -m pip --version + +RUN --mount=type=cache,target=/root/.cache/uv \ + python3 -m pip install uv + +# This timeout (in seconds) is necessary when installing some dependencies via uv since it's likely to time out +# Reference: https://github.com/astral-sh/uv/pull/1694 +ENV UV_HTTP_TIMEOUT=500 + +# Workaround for https://github.com/openai/triton/issues/2507 and +# https://github.com/pytorch/pytorch/issues/107960 -- hopefully +# this won't be needed for future versions of this docker image +# or future versions of triton. +RUN ldconfig /usr/local/cuda-$(echo $CUDA_VERSION | cut -d. -f1,2)/compat/ + +# get the nightly torch version used in the build to make sure the version is the same +COPY --from=base /workspace/torch_build_versions.txt ./torch_build_versions.txt + +RUN --mount=type=cache,target=/root/.cache/uv \ + uv pip install --system $(cat torch_build_versions.txt | xargs) --index-url https://download.pytorch.org/whl/nightly/cu128 + +# install the vllm wheel +RUN --mount=type=bind,from=build,src=/workspace/dist,target=/vllm-workspace/vllm-dist \ + --mount=type=cache,target=/root/.cache/uv \ + uv pip install --system vllm-dist/*.whl --verbose + +# install xformers again for the new environment +RUN --mount=type=bind,from=base,src=/workspace/xformers-dist,target=/vllm-workspace/xformers-dist \ + --mount=type=cache,target=/root/.cache/uv \ + uv pip install --system /vllm-workspace/xformers-dist/*.whl --verbose + +ARG torch_cuda_arch_list='8.0;8.6;8.9;9.0' + +# install package for build flashinfer +# see issue: https://github.com/flashinfer-ai/flashinfer/issues/738 +RUN pip install setuptools==75.6.0 packaging==23.2 ninja==1.11.1.3 build==1.2.2.post1 + +# build flashinfer for torch nightly from source around 10 mins +# release version: v0.2.2.post1 +RUN --mount=type=cache,target=/root/.cache/ccache \ + --mount=type=cache,target=/root/.cache/uv \ + echo "git clone flashinfer..." \ + && git clone --recursive https://github.com/flashinfer-ai/flashinfer.git \ + && cd flashinfer \ + && git checkout v0.2.2.post1 \ + && git submodule update --init --recursive \ + && echo "finish git clone flashinfer..." \ + && rm -rf build \ + && export TORCH_CUDA_ARCH_LIST=${torch_cuda_arch_list} \ + && FLASHINFER_ENABLE_AOT=1 python3 setup.py bdist_wheel --dist-dir=../flashinfer-dist --verbose \ + && cd .. \ + && rm -rf flashinfer + +# install flashinfer +RUN --mount=type=cache,target=/root/.cache/uv \ + uv pip install --system flashinfer-dist/*.whl --verbose + +# install common packages +COPY requirements/common.txt requirements/common.txt +COPY use_existing_torch.py use_existing_torch.py +COPY pyproject.toml pyproject.toml + +COPY examples examples +COPY benchmarks benchmarks +COPY ./vllm/collect_env.py . + +RUN python3 use_existing_torch.py +RUN --mount=type=cache,target=/root/.cache/uv \ + uv pip install --system -r requirements/common.txt + +################### VLLM INSTALLED IMAGE #################### + + +#################### UNITTEST IMAGE ############################# +FROM vllm-base as test +COPY tests/ tests/ + +# install build and runtime dependencies without stable torch version +COPY requirements/nightly_torch_test.txt requirements/nightly_torch_test.txt + +# This timeout (in seconds) is necessary when installing some dependencies via uv since it's likely to time out +# Reference: https://github.com/astral-sh/uv/pull/1694 +ENV UV_HTTP_TIMEOUT=500 + +# install development dependencies (for testing) +RUN --mount=type=cache,target=/root/.cache/uv \ + uv pip install --system -e tests/vllm_test_utils + +# enable fast downloads from hf (for testing) +RUN --mount=type=cache,target=/root/.cache/uv \ + uv pip install --system hf_transfer +ENV HF_HUB_ENABLE_HF_TRANSFER 1 + +RUN --mount=type=cache,target=/root/.cache/uv \ + uv pip install --system -r requirements/nightly_torch_test.txt + +#################### UNITTEST IMAGE ############################# + +#################### UNITTEST IMAGE ############################# +FROM vllm-base as test-hard + +ADD . /vllm-workspace/ + +# This timeout (in seconds) is necessary when installing some dependencies via uv since it's likely to time out +# Reference: https://github.com/astral-sh/uv/pull/1694 +ENV UV_HTTP_TIMEOUT=500 + +# install development dependencies (for testing) +RUN --mount=type=cache,target=/root/.cache/uv \ + uv pip install --system -r requirements/dev.txt + +# install development dependencies (for testing) +RUN --mount=type=cache,target=/root/.cache/uv \ + uv pip install --system -e tests/vllm_test_utils + +# enable fast downloads from hf (for testing) +RUN --mount=type=cache,target=/root/.cache/uv \ + uv pip install --system hf_transfer +ENV HF_HUB_ENABLE_HF_TRANSFER 1 + +# Copy in the v1 package for testing (it isn't distributed yet) +COPY vllm/v1 /usr/local/lib/python3.12/dist-packages/vllm/v1 + +# doc requires source code +# we hide them inside `test_docs/` , so that this source code +# will not be imported by other tests +RUN mkdir test_docs +RUN mv docs test_docs/ +RUN mv vllm test_docs/ + +#################### UNITTEST IMAGE ############################# diff --git a/requirements/nightly_torch_test.txt b/requirements/nightly_torch_test.txt new file mode 100644 index 000000000000..2e21a5bac5e0 --- /dev/null +++ b/requirements/nightly_torch_test.txt @@ -0,0 +1,39 @@ +# Dependency that able to run entrypoints test +# pytest and its extensions +pytest +pytest-asyncio +pytest-forked +pytest-mock +pytest-rerunfailures +pytest-shard +pytest-timeout + +# librosa==0.10.2.post1 # required by audio tests in entrypoints/openai +librosa==0.10.2.post1 +audioread==3.0.1 +cffi==1.17.1 +decorator==5.2.1 +lazy-loader==0.4 +platformdirs==4.3.6 +pooch==1.8.2 +soundfile==0.13.1 +soxr==0.5.0.post1 +#vllm[video] # required by entrypoints/openai/test_video.py +decord==0.6.0 +#sentence-transformers # required by entrypoints/openai/test_score.py +sentence-transformers==3.4.1 +numba == 0.61.2; python_version > '3.9' +# testing utils +awscli +boto3 +botocore +datasets +ray >= 2.10.0 +peft +runai-model-streamer==0.11.0 +runai-model-streamer-s3==0.11.0 +tensorizer>=2.9.0 +lm-eval==0.4.8 +buildkite-test-collector==0.1.9 + +lm-eval[api]==0.4.8 # required for model evaluation test From 8d21643153d395cb1e45f69e8e5bba826c6ddbd7 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Mon, 21 Apr 2025 10:22:44 -0700 Subject: [PATCH 2/8] add comment Signed-off-by: Yang Wang --- docker/Dockerfile.nightly_torch | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/docker/Dockerfile.nightly_torch b/docker/Dockerfile.nightly_torch index 6a01e251d156..87142eb0e092 100644 --- a/docker/Dockerfile.nightly_torch +++ b/docker/Dockerfile.nightly_torch @@ -1,4 +1,4 @@ -# The vLLM Dockerfile is used to construct vLLM image that can be directly used for testing +# The vLLM Dockerfile is used to construct vLLM image against torch nightly that can be directly used for testing # for torch nightly, cuda >=12.6 is required, # use 12.8 due to FlashAttention issue with cuda 12.6 (https://github.com/vllm-project/vllm/issues/15435#issuecomment-2775924628) @@ -78,7 +78,8 @@ ARG torch_cuda_arch_list='8.0;8.6;8.9;9.0' ENV TORCH_CUDA_ARCH_LIST=${torch_cuda_arch_list} # Build xformers with cuda and torch nightly -ARG max_jobs=64 +# following official xformers guidance: https://github.com/facebookresearch/xformers#build +ARG max_jobs=16 ENV MAX_JOBS=${max_jobs} ARG XFORMERS_COMMIT=f2de641ef670510cadab099ce6954031f52f191c @@ -134,7 +135,7 @@ RUN --mount=type=bind,source=.git,target=.git \ if [ "$GIT_REPO_CHECK" != "0" ]; then bash tools/check_repo.sh ; fi # Max jobs used by Ninja to build extensions -ARG max_jobs=64 +ARG max_jobs=16 ENV MAX_JOBS=${max_jobs} ARG nvcc_threads=2 ENV NVCC_THREADS=$nvcc_threads From 814835f9671e2c3c500cf054b90e51db2f9ea5ca Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Mon, 21 Apr 2025 10:26:31 -0700 Subject: [PATCH 3/8] add comment Signed-off-by: Yang Wang --- docker/Dockerfile.nightly_torch | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docker/Dockerfile.nightly_torch b/docker/Dockerfile.nightly_torch index 87142eb0e092..f22d20cb08fa 100644 --- a/docker/Dockerfile.nightly_torch +++ b/docker/Dockerfile.nightly_torch @@ -79,6 +79,7 @@ ENV TORCH_CUDA_ARCH_LIST=${torch_cuda_arch_list} # Build xformers with cuda and torch nightly # following official xformers guidance: https://github.com/facebookresearch/xformers#build +# todo(elainewy): cache xformers build result for faster build ARG max_jobs=16 ENV MAX_JOBS=${max_jobs} ARG XFORMERS_COMMIT=f2de641ef670510cadab099ce6954031f52f191c @@ -240,8 +241,11 @@ ARG torch_cuda_arch_list='8.0;8.6;8.9;9.0' # see issue: https://github.com/flashinfer-ai/flashinfer/issues/738 RUN pip install setuptools==75.6.0 packaging==23.2 ninja==1.11.1.3 build==1.2.2.post1 + # build flashinfer for torch nightly from source around 10 mins # release version: v0.2.2.post1 +# todo(elainewy): cache flashinfer build result for faster build +ENV CCACHE_DIR=/root/.cache/ccache RUN --mount=type=cache,target=/root/.cache/ccache \ --mount=type=cache,target=/root/.cache/uv \ echo "git clone flashinfer..." \ From 978c19974f975c403fc9c8289d97fa72387e4ad0 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Mon, 21 Apr 2025 14:06:08 -0700 Subject: [PATCH 4/8] fix size Signed-off-by: Yang Wang --- requirements/nightly_torch_test.txt | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/requirements/nightly_torch_test.txt b/requirements/nightly_torch_test.txt index 2e21a5bac5e0..8ac0fbc151e4 100644 --- a/requirements/nightly_torch_test.txt +++ b/requirements/nightly_torch_test.txt @@ -9,19 +9,8 @@ pytest-shard pytest-timeout # librosa==0.10.2.post1 # required by audio tests in entrypoints/openai -librosa==0.10.2.post1 -audioread==3.0.1 -cffi==1.17.1 -decorator==5.2.1 -lazy-loader==0.4 -platformdirs==4.3.6 -pooch==1.8.2 -soundfile==0.13.1 -soxr==0.5.0.post1 -#vllm[video] # required by entrypoints/openai/test_video.py -decord==0.6.0 -#sentence-transformers # required by entrypoints/openai/test_score.py -sentence-transformers==3.4.1 +librosa +sentence-transformers numba == 0.61.2; python_version > '3.9' # testing utils awscli From 4d6508900b15d371e6905a687bb75edb191550e9 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Mon, 21 Apr 2025 14:07:08 -0700 Subject: [PATCH 5/8] fix size Signed-off-by: Yang Wang --- docker/Dockerfile.nightly_torch | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/Dockerfile.nightly_torch b/docker/Dockerfile.nightly_torch index f22d20cb08fa..3e72447cadc4 100644 --- a/docker/Dockerfile.nightly_torch +++ b/docker/Dockerfile.nightly_torch @@ -182,7 +182,7 @@ RUN --mount=type=cache,target=/root/.cache/ccache \ # Setup clean environment for vLLM and its dependencies for test and api server using ubuntu22.04 with AOT flashinfer FROM nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04 AS vllm-base # prepare for environment starts -ARG CUDA_VERSION=12.4.1 +ARG CUDA_VERSION=12.8.0 ARG PYTHON_VERSION=3.12 WORKDIR /vllm-workspace ENV DEBIAN_FRONTEND=noninteractive From e2d321bb406dbd0852914d2ec7530268bc35adff Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Mon, 21 Apr 2025 14:08:16 -0700 Subject: [PATCH 6/8] fix size Signed-off-by: Yang Wang --- requirements/nightly_torch_test.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements/nightly_torch_test.txt b/requirements/nightly_torch_test.txt index 8ac0fbc151e4..20372a9b2ef1 100644 --- a/requirements/nightly_torch_test.txt +++ b/requirements/nightly_torch_test.txt @@ -8,8 +8,8 @@ pytest-rerunfailures pytest-shard pytest-timeout -# librosa==0.10.2.post1 # required by audio tests in entrypoints/openai -librosa + +librosa # required by audio tests in entrypoints/openai sentence-transformers numba == 0.61.2; python_version > '3.9' # testing utils From 88d11a706a84d32e99675aeab80532d8a43e6e2c Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Mon, 21 Apr 2025 14:12:21 -0700 Subject: [PATCH 7/8] fix size Signed-off-by: Yang Wang --- docker/Dockerfile.nightly_torch | 33 --------------------------------- 1 file changed, 33 deletions(-) diff --git a/docker/Dockerfile.nightly_torch b/docker/Dockerfile.nightly_torch index 3e72447cadc4..0063712e4781 100644 --- a/docker/Dockerfile.nightly_torch +++ b/docker/Dockerfile.nightly_torch @@ -305,36 +305,3 @@ RUN --mount=type=cache,target=/root/.cache/uv \ #################### UNITTEST IMAGE ############################# -#################### UNITTEST IMAGE ############################# -FROM vllm-base as test-hard - -ADD . /vllm-workspace/ - -# This timeout (in seconds) is necessary when installing some dependencies via uv since it's likely to time out -# Reference: https://github.com/astral-sh/uv/pull/1694 -ENV UV_HTTP_TIMEOUT=500 - -# install development dependencies (for testing) -RUN --mount=type=cache,target=/root/.cache/uv \ - uv pip install --system -r requirements/dev.txt - -# install development dependencies (for testing) -RUN --mount=type=cache,target=/root/.cache/uv \ - uv pip install --system -e tests/vllm_test_utils - -# enable fast downloads from hf (for testing) -RUN --mount=type=cache,target=/root/.cache/uv \ - uv pip install --system hf_transfer -ENV HF_HUB_ENABLE_HF_TRANSFER 1 - -# Copy in the v1 package for testing (it isn't distributed yet) -COPY vllm/v1 /usr/local/lib/python3.12/dist-packages/vllm/v1 - -# doc requires source code -# we hide them inside `test_docs/` , so that this source code -# will not be imported by other tests -RUN mkdir test_docs -RUN mv docs test_docs/ -RUN mv vllm test_docs/ - -#################### UNITTEST IMAGE ############################# From a550b8a29df1ba2c47cc5fc058937539b06e90ba Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Mon, 21 Apr 2025 19:53:56 -0700 Subject: [PATCH 8/8] add in ci Signed-off-by: Yang Wang --- .buildkite/test-pipeline.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index f41d15c2324e..2ab17bdb7ebc 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -8,6 +8,7 @@ # Documentation # label(str): the name of the test. emoji allowed. # fast_check(bool): whether to run this on each commit on fastcheck pipeline. +# torch_nightly(bool): whether to run this on vllm against torch nightly pipeline. # fast_check_only(bool): run this test on fastcheck pipeline only # optional(bool): never run this test by default (i.e. need to unblock manually) unless it's scheduled nightly run. # command(str): the single command to run for tests. incompatible with commands. @@ -70,6 +71,7 @@ steps: - label: Basic Correctness Test # 30min #mirror_hardwares: [amd] fast_check: true + torch_nightly: true source_file_dependencies: - vllm/ - tests/basic_correctness/test_basic_correctness @@ -104,6 +106,7 @@ steps: - label: Entrypoints Test # 40min working_dir: "/vllm-workspace/tests" fast_check: true + torch_nightly: true #mirror_hardwares: [amd] source_file_dependencies: - vllm/