Skip to content

Commit

Permalink
Revert: Nvidia OpenCL
Browse files Browse the repository at this point in the history
  • Loading branch information
jonafeucht committed Nov 8, 2024
1 parent f9f3991 commit 2636c9b
Show file tree
Hide file tree
Showing 2 changed files with 73 additions and 31 deletions.
38 changes: 16 additions & 22 deletions Dockerfile.opencl.nvidia
Original file line number Diff line number Diff line change
@@ -1,36 +1,30 @@
FROM python:3.12.7

ARG PYTORCH_OCL_VERSION=0.2.0
ARG TORCH_VERSION=2.5
ARG PYTHON_VERSION=cp312
ARG PLATFORM=linux_x86_64

ENV WHL_FILE=pytorch_ocl-${PYTORCH_OCL_VERSION}+torch${TORCH_VERSION}-${PYTHON_VERSION}-none-${PLATFORM}.whl
ENV WHL_URL=https://github.com/artyom-beilis/pytorch_dlprim/releases/download/${PYTORCH_OCL_VERSION}/${WHL_FILE}

ENV NVIDIA_VISIBLE_DEVICES all
ENV NVIDIA_DRIVER_CAPABILITIES compute,utility
ENV DEBIAN_FRONTEND="noninteractive"

WORKDIR /app

RUN apt update && \
apt full-upgrade -y && \
apt install python3 python3-full python3-pip python3-venv git wget ocl-icd-opencl-dev opencl-clhpp-headers opencl-c-headers opencl-headers ocl-icd-libopencl1 clinfo -y && \
python3 -m venv /app/venv && \
/app/venv/bin/pip install --upgrade pip

# Configure OpenCL ICD loaders
RUN mkdir -p /etc/OpenCL/vendors && \
echo "libnvidia-opencl.so.1" > /etc/OpenCL/vendors/nvidia.icd
apt install python3 python3-dev git ocl-icd-opencl-dev opencl-clhpp-headers opencl-c-headers ocl-icd-libopencl1 clinfo -y && \
pip3 install --upgrade pip

COPY requirements.txt /app/requirements.txt
RUN /app/venv/bin/pip install --no-cache-dir -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cpu
RUN pip3 install --no-cache-dir -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cpu

# Install OpenCL backend for PyTorch
RUN wget ${WHL_URL} && \
/app/venv/bin/pip install ${WHL_FILE} && \
rm ${WHL_FILE}
RUN wget https://github.com/artyom-beilis/pytorch_dlprim/releases/download/0.2.0/pytorch_ocl-0.2.0+torch2.5-cp312-none-linux_x86_64.whl && \
pip3 install pytorch_ocl-0.2.0+torch2.5-cp312-none-linux_x86_64.whl && \
rm pytorch_ocl-0.2.0+torch2.5-cp312-none-linux_x86_64.whl

# Install Nvidia OpenCL drivers
RUN mkdir -p /etc/OpenCL/vendors && \
echo "libnvidia-opencl.so.1" > /etc/OpenCL/vendors/nvidia.icd
ENV NVIDIA_VISIBLE_DEVICES all
ENV NVIDIA_DRIVER_CAPABILITIES compute,utility,display

RUN rm -f /etc/OpenCL/vendors/mesa.icd

COPY . /app

CMD ["/app/venv/bin/python", "-m", "fastapi", "run", "main.py", "--proxy-headers", "--host", "0.0.0.0", "--port", "8000"]
CMD ["fastapi", "run", "main.py", "--proxy-headers", "--host", "0.0.0.0", "--port", "8000"]
66 changes: 57 additions & 9 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,15 +9,16 @@

- For ease of use it's recommended to use the provided [docker-compose.yml](https://github.com/doppeltilde/image_video_classification/blob/main/docker-compose.yml).

**CPU Support:** Use the `latest` tag.
### **CPU Support**
Use the `latest` tag.
```yml
services:
image_video_classification:
image: ghcr.io/doppeltilde/image_video_classification:latest
ports:
- "8000:8000"
volumes:
- models:/root/.cache/huggingface/hub:rw
- ./models:/root/.cache/huggingface/hub:rw
environment:
- DEFAULT_MODEL_NAME
- BATCH_SIZE
Expand All @@ -26,20 +27,18 @@ services:
- USE_API_KEYS
- API_KEYS
restart: unless-stopped

volumes:
models:
```
**NVIDIA GPU Support:** Use the `latest-cuda` tag.
### **NVIDIA GPU Support**
**CUDA:**
```yml
services:
image_video_classification_cuda:
image: ghcr.io/doppeltilde/image_video_classification:latest-cuda
ports:
- "8000:8000"
volumes:
- models:/root/.cache/huggingface/hub:rw
- ./models:/root/.cache/huggingface/hub:rw
environment:
- DEFAULT_MODEL_NAME
- BATCH_SIZE
Expand All @@ -55,9 +54,58 @@ services:
- driver: nvidia
count: all
capabilities: [ gpu ]
```
**OpenCL:**
```yml
services:
image_video_classification_opencl:
image: ghcr.io/doppeltilde/image_video_classification:latest-opencl-nvidia
ports:
- "8000:8000"
volumes:
- ./models:/root/.cache/huggingface/hub:rw
environment:
- DEFAULT_MODEL_NAME
- BATCH_SIZE
- ACCESS_TOKEN
- DEFAULT_SCORE
- USE_API_KEYS
- API_KEYS
restart: unless-stopped
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [ gpu ]
```
volumes:
models:
### **AMD GPU Support**
**OpenCL:**
```yml
services:
image_video_classification_cuda:
image: ghcr.io/doppeltilde/image_video_classification:latest-cuda
ports:
- "8000:8000"
volumes:
- ./models:/root/.cache/huggingface/hub:rw
environment:
- DEFAULT_MODEL_NAME
- BATCH_SIZE
- ACCESS_TOKEN
- DEFAULT_SCORE
- USE_API_KEYS
- API_KEYS
restart: unless-stopped
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [ gpu ]
```
- Create a `.env` file and set the preferred values.
Expand Down

0 comments on commit 2636c9b

Please sign in to comment.