diff --git a/.github/workflows/image_openeuler.yml b/.github/workflows/image_openeuler.yml index 4654d4b338f..2421aa4b111 100644 --- a/.github/workflows/image_openeuler.yml +++ b/.github/workflows/image_openeuler.yml @@ -88,6 +88,8 @@ jobs: uses: docker/build-push-action@v6 with: platforms: linux/amd64,linux/arm64 + # use the current repo path as the build context, ensure .git is contained + context: . # only trigger when tag, branch/main push push: ${{ github.event_name == 'push' && github.repository_owner == 'vllm-project' }} labels: ${{ steps.meta.outputs.labels }} diff --git a/.github/workflows/image_ubuntu.yml b/.github/workflows/image_ubuntu.yml index c03d6cb8f1a..5cdf076839e 100644 --- a/.github/workflows/image_ubuntu.yml +++ b/.github/workflows/image_ubuntu.yml @@ -88,6 +88,8 @@ jobs: uses: docker/build-push-action@v6 with: platforms: linux/amd64,linux/arm64 + # use the current repo path as the build context, ensure .git is contained + context: . # only trigger when tag, branch/main push push: ${{ github.event_name == 'push' && github.repository_owner == 'vllm-project' }} labels: ${{ steps.meta.outputs.labels }} diff --git a/Dockerfile b/Dockerfile index 1c8b87e985e..a3a3fe945da 100644 --- a/Dockerfile +++ b/Dockerfile @@ -31,16 +31,16 @@ RUN apt-get update -y && \ WORKDIR /workspace -COPY . /workspace/vllm-ascend/ +COPY . /vllm-workspace/vllm-ascend/ RUN pip config set global.index-url ${PIP_INDEX_URL} # Install vLLM ARG VLLM_REPO=https://github.com/vllm-project/vllm.git ARG VLLM_TAG=v0.8.5 -RUN git clone --depth 1 $VLLM_REPO --branch $VLLM_TAG /workspace/vllm +RUN git clone --depth 1 $VLLM_REPO --branch $VLLM_TAG /vllm-workspace/vllm # In x86, triton will be installed by vllm. But in Ascend, triton doesn't work correctly. we need to uninstall it. -RUN VLLM_TARGET_DEVICE="empty" python3 -m pip install -v -e /workspace/vllm/ --extra-index https://download.pytorch.org/whl/cpu/ && \ +RUN VLLM_TARGET_DEVICE="empty" python3 -m pip install -v -e /vllm-workspace/vllm/ --extra-index https://download.pytorch.org/whl/cpu/ && \ python3 -m pip uninstall -y triton && \ python3 -m pip cache purge @@ -49,7 +49,7 @@ RUN VLLM_TARGET_DEVICE="empty" python3 -m pip install -v -e /workspace/vllm/ --e RUN source /usr/local/Ascend/ascend-toolkit/set_env.sh && \ source /usr/local/Ascend/nnal/atb/set_env.sh && \ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/`uname -i`-linux/devlib && \ - python3 -m pip install -v -e /workspace/vllm-ascend/ --extra-index https://download.pytorch.org/whl/cpu/ && \ + python3 -m pip install -v -e /vllm-workspace/vllm-ascend/ --extra-index https://download.pytorch.org/whl/cpu/ && \ python3 -m pip cache purge # Install modelscope (for fast download) and ray (for multinode) diff --git a/Dockerfile.openEuler b/Dockerfile.openEuler index 2525b366a67..3fde5aa2568 100644 --- a/Dockerfile.openEuler +++ b/Dockerfile.openEuler @@ -30,15 +30,15 @@ RUN pip config set global.index-url ${PIP_INDEX_URL} WORKDIR /workspace -COPY . /workspace/vllm-ascend/ +COPY . /vllm-workspace/vllm-ascend/ # Install vLLM ARG VLLM_REPO=https://github.com/vllm-project/vllm.git ARG VLLM_TAG=v0.8.5 -RUN git clone --depth 1 $VLLM_REPO --branch $VLLM_TAG /workspace/vllm +RUN git clone --depth 1 $VLLM_REPO --branch $VLLM_TAG /vllm-workspace/vllm # In x86, triton will be installed by vllm. But in Ascend, triton doesn't work correctly. we need to uninstall it. -RUN VLLM_TARGET_DEVICE="empty" python3 -m pip install -e /workspace/vllm/ --extra-index https://download.pytorch.org/whl/cpu/ && \ +RUN VLLM_TARGET_DEVICE="empty" python3 -m pip install -e /vllm-workspace/vllm/ --extra-index https://download.pytorch.org/whl/cpu/ && \ python3 -m pip uninstall -y triton && \ python3 -m pip cache purge @@ -46,7 +46,7 @@ RUN VLLM_TARGET_DEVICE="empty" python3 -m pip install -e /workspace/vllm/ --extr RUN source /usr/local/Ascend/ascend-toolkit/set_env.sh && \ source /usr/local/Ascend/nnal/atb/set_env.sh && \ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/`uname -i`-linux/devlib && \ - python3 -m pip install -v -e /workspace/vllm-ascend/ --extra-index https://download.pytorch.org/whl/cpu/ && \ + python3 -m pip install -v -e /vllm-workspace/vllm-ascend/ --extra-index https://download.pytorch.org/whl/cpu/ && \ python3 -m pip cache purge # Install modelscope (for fast download) and ray (for multinode)