Skip to content

Commit 570d9ee

Browse files
committed
manual merge - breaks mudler#1746, manually fixing in next commit
2 parents b994c02 + 5d10184 commit 570d9ee

File tree

22 files changed

+242
-80
lines changed

22 files changed

+242
-80
lines changed

.github/workflows/image-pr.yml

+9-1
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,14 @@ jobs:
5959
image-type: 'extras'
6060
base-image: "rocm/dev-ubuntu-22.04:6.0-complete"
6161
runs-on: 'arc-runner-set'
62+
- build-type: 'sycl_f16'
63+
platforms: 'linux/amd64'
64+
tag-latest: 'false'
65+
base-image: "intel/oneapi-basekit:2024.0.1-devel-ubuntu22.04"
66+
tag-suffix: 'sycl-f16-ffmpeg'
67+
ffmpeg: 'true'
68+
image-type: 'extras'
69+
runs-on: 'arc-runner-set'
6270
core-image-build:
6371
uses: ./.github/workflows/image_build.yml
6472
with:
@@ -105,4 +113,4 @@ jobs:
105113
ffmpeg: 'true'
106114
image-type: 'core'
107115
runs-on: 'ubuntu-latest'
108-
base-image: "ubuntu:22.04"
116+
base-image: "ubuntu:22.04"

.github/workflows/image.yml

+16
Original file line numberDiff line numberDiff line change
@@ -120,6 +120,22 @@ jobs:
120120
image-type: 'extras'
121121
base-image: "rocm/dev-ubuntu-22.04:6.0-complete"
122122
runs-on: 'arc-runner-set'
123+
- build-type: 'sycl_f16'
124+
platforms: 'linux/amd64'
125+
tag-latest: 'false'
126+
base-image: "intel/oneapi-basekit:2024.0.1-devel-ubuntu22.04"
127+
tag-suffix: '-sycl-f16-ffmpeg'
128+
ffmpeg: 'true'
129+
image-type: 'extras'
130+
runs-on: 'arc-runner-set'
131+
- build-type: 'sycl_f32'
132+
platforms: 'linux/amd64'
133+
tag-latest: 'false'
134+
base-image: "intel/oneapi-basekit:2024.0.1-devel-ubuntu22.04"
135+
tag-suffix: '-sycl-f32-ffmpeg'
136+
ffmpeg: 'true'
137+
image-type: 'extras'
138+
runs-on: 'arc-runner-set'
123139
# Core images
124140
- build-type: 'sycl_f16'
125141
platforms: 'linux/amd64'

Dockerfile

+20-14
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,8 @@ ARG BASE_IMAGE=ubuntu:22.04
44
# extras or core
55
FROM ${BASE_IMAGE} as requirements-core
66

7+
USER root
8+
79
ARG GO_VERSION=1.21.7
810
ARG BUILD_TYPE
911
ARG CUDA_MAJOR_VERSION=11
@@ -21,7 +23,7 @@ RUN apt-get update && \
2123
apt-get install -y ca-certificates curl patch pip cmake git && apt-get clean
2224

2325
# Install Go
24-
RUN curl -L -s https://go.dev/dl/go$GO_VERSION.linux-$TARGETARCH.tar.gz | tar -v -C /usr/local -xz
26+
RUN curl -L -s https://go.dev/dl/go$GO_VERSION.linux-$TARGETARCH.tar.gz | tar -C /usr/local -xz
2527
ENV PATH $PATH:/usr/local/go/bin
2628

2729
COPY --chmod=644 custom-ca-certs/* /usr/local/share/ca-certificates/
@@ -79,6 +81,10 @@ RUN pip install --upgrade pip
7981
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
8082
RUN apt-get install -y espeak-ng espeak && apt-get clean
8183

84+
RUN if [ ! -e /usr/bin/python ]; then \
85+
ln -s /usr/bin/python3 /usr/bin/python \
86+
; fi
87+
8288
###################################
8389
###################################
8490

@@ -166,43 +172,43 @@ COPY --from=builder /build/backend-assets/grpc/stablediffusion ./backend-assets/
166172

167173
## Duplicated from Makefile to avoid having a big layer that's hard to push
168174
RUN if [ "${IMAGE_TYPE}" = "extras" ]; then \
169-
PATH=$PATH:/opt/conda/bin make -C backend/python/autogptq \
175+
make -C backend/python/autogptq \
170176
; fi
171177
RUN if [ "${IMAGE_TYPE}" = "extras" ]; then \
172-
PATH=$PATH:/opt/conda/bin make -C backend/python/bark \
178+
make -C backend/python/bark \
173179
; fi
174180
RUN if [ "${IMAGE_TYPE}" = "extras" ]; then \
175-
PATH=$PATH:/opt/conda/bin make -C backend/python/diffusers \
181+
make -C backend/python/diffusers \
176182
; fi
177183
RUN if [ "${IMAGE_TYPE}" = "extras" ]; then \
178-
PATH=$PATH:/opt/conda/bin make -C backend/python/vllm \
184+
make -C backend/python/vllm \
179185
; fi
180186
RUN if [ "${IMAGE_TYPE}" = "extras" ]; then \
181-
PATH=$PATH:/opt/conda/bin make -C backend/python/mamba \
187+
make -C backend/python/mamba \
182188
; fi
183189
RUN if [ "${IMAGE_TYPE}" = "extras" ]; then \
184-
PATH=$PATH:/opt/conda/bin make -C backend/python/sentencetransformers \
190+
make -C backend/python/sentencetransformers \
185191
; fi
186192
RUN if [ "${IMAGE_TYPE}" = "extras" ]; then \
187-
PATH=$PATH:/opt/conda/bin make -C backend/python/transformers \
193+
make -C backend/python/transformers \
188194
; fi
189195
RUN if [ "${IMAGE_TYPE}" = "extras" ]; then \
190-
PATH=$PATH:/opt/conda/bin make -C backend/python/vall-e-x \
196+
make -C backend/python/vall-e-x \
191197
; fi
192198
RUN if [ "${IMAGE_TYPE}" = "extras" ]; then \
193-
PATH=$PATH:/opt/conda/bin make -C backend/python/exllama \
199+
make -C backend/python/exllama \
194200
; fi
195201
RUN if [ "${IMAGE_TYPE}" = "extras" ]; then \
196-
PATH=$PATH:/opt/conda/bin make -C backend/python/exllama2 \
202+
make -C backend/python/exllama2 \
197203
; fi
198204
RUN if [ "${IMAGE_TYPE}" = "extras" ]; then \
199-
PATH=$PATH:/opt/conda/bin make -C backend/python/petals \
205+
make -C backend/python/petals \
200206
; fi
201207
RUN if [ "${IMAGE_TYPE}" = "extras" ]; then \
202-
PATH=$PATH:/opt/conda/bin make -C backend/python/transformers-musicgen \
208+
make -C backend/python/transformers-musicgen \
203209
; fi
204210
RUN if [ "${IMAGE_TYPE}" = "extras" ]; then \
205-
PATH=$PATH:/opt/conda/bin make -C backend/python/coqui \
211+
make -C backend/python/coqui \
206212
; fi
207213

208214
# Make sure the models directory exists

Makefile

+9-2
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ GOLLAMA_VERSION?=aeba71ee842819da681ea537e78846dc75949ac0
88

99
GOLLAMA_STABLE_VERSION?=50cee7712066d9e38306eccadcfbb44ea87df4b7
1010

11-
CPPLLAMA_VERSION?=e0843afe1b37890b631bc7d3d2da2ed36c862b91
11+
CPPLLAMA_VERSION?=e25fb4b18fcedb9bed6be4585cf842e9a669b28b
1212

1313
# gpt4all version
1414
GPT4ALL_REPO?=https://github.com/nomic-ai/gpt4all
@@ -28,7 +28,7 @@ BERT_VERSION?=6abe312cded14042f6b7c3cd8edf082713334a4d
2828
PIPER_VERSION?=d6b6275ba037dabdba4a8b65dfdf6b2a73a67f07
2929

3030
# stablediffusion version
31-
STABLEDIFFUSION_VERSION?=d5d2be8e7e395c2d73ceef61e6fe8d240f2cd831
31+
STABLEDIFFUSION_VERSION?=362df9da29f882dbf09ade61972d16a1f53c3485
3232

3333
# tinydream version
3434
TINYDREAM_VERSION?=772a9c0d9aaf768290e63cca3c904fe69faf677a
@@ -557,3 +557,10 @@ docker-image-intel:
557557
--build-arg IMAGE_TYPE=$(IMAGE_TYPE) \
558558
--build-arg GO_TAGS="none" \
559559
--build-arg BUILD_TYPE=sycl_f32 -t $(DOCKER_IMAGE) .
560+
561+
docker-image-intel-xpu:
562+
docker build \
563+
--build-arg BASE_IMAGE=intel/oneapi-basekit:2024.0.1-devel-ubuntu22.04 \
564+
--build-arg IMAGE_TYPE=$(IMAGE_TYPE) \
565+
--build-arg GO_TAGS="none" \
566+
--build-arg BUILD_TYPE=sycl_f32 -t $(DOCKER_IMAGE) .

backend/python/common-env/transformers/Makefile

+7
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,13 @@ ifeq ($(BUILD_TYPE), hipblas)
88
CONDA_ENV_PATH = "transformers-rocm.yml"
99
endif
1010

11+
# Intel GPU are supposed to have dependencies installed in the main python
12+
# environment, so we skip conda installation for SYCL builds.
13+
# https://github.com/intel/intel-extension-for-pytorch/issues/538
14+
ifneq (,$(findstring sycl,$(BUILD_TYPE)))
15+
export SKIP_CONDA=1
16+
endif
17+
1118
.PHONY: transformers
1219
transformers:
1320
@echo "Installing $(CONDA_ENV_PATH)..."
Original file line numberDiff line numberDiff line change
@@ -1,24 +1,38 @@
11
#!/bin/bash
22
set -ex
33

4+
SKIP_CONDA=${SKIP_CONDA:-0}
5+
46
# Check if environment exist
57
conda_env_exists(){
68
! conda list --name "${@}" >/dev/null 2>/dev/null
79
}
810

9-
if conda_env_exists "transformers" ; then
10-
echo "Creating virtual environment..."
11-
conda env create --name transformers --file $1
12-
echo "Virtual environment created."
13-
else
14-
echo "Virtual environment already exists."
11+
if [ $SKIP_CONDA -eq 1 ]; then
12+
echo "Skipping conda environment installation"
13+
else
14+
export PATH=$PATH:/opt/conda/bin
15+
if conda_env_exists "transformers" ; then
16+
echo "Creating virtual environment..."
17+
conda env create --name transformers --file $1
18+
echo "Virtual environment created."
19+
else
20+
echo "Virtual environment already exists."
21+
fi
1522
fi
1623

17-
if [ "$PIP_CACHE_PURGE" = true ] ; then
18-
export PATH=$PATH:/opt/conda/bin
24+
if [ -d "/opt/intel" ]; then
25+
# Intel GPU: If the directory exists, we assume we are using the intel image
26+
# (no conda env)
27+
# https://github.com/intel/intel-extension-for-pytorch/issues/538
28+
pip install intel-extension-for-transformers datasets sentencepiece tiktoken neural_speed
29+
fi
1930

20-
# Activate conda environment
21-
source activate transformers
31+
if [ "$PIP_CACHE_PURGE" = true ] ; then
32+
if [ $SKIP_CONDA -eq 0 ]; then
33+
# Activate conda environment
34+
source activate transformers
35+
fi
2236

2337
pip cache purge
2438
fi

backend/python/diffusers/Makefile

+7
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,13 @@ ifeq ($(BUILD_TYPE), hipblas)
44
export CONDA_ENV_PATH = "diffusers-rocm.yml"
55
endif
66

7+
# Intel GPU are supposed to have dependencies installed in the main python
8+
# environment, so we skip conda installation for SYCL builds.
9+
# https://github.com/intel/intel-extension-for-pytorch/issues/538
10+
ifneq (,$(findstring sycl,$(BUILD_TYPE)))
11+
export SKIP_CONDA=1
12+
endif
13+
714
.PHONY: diffusers
815
diffusers:
916
@echo "Installing $(CONDA_ENV_PATH)..."

backend/python/diffusers/backend_diffusers.py

+19-6
Original file line numberDiff line numberDiff line change
@@ -21,21 +21,26 @@
2121
from diffusers import StableDiffusionImg2ImgPipeline, AutoPipelineForText2Image, ControlNetModel, StableVideoDiffusionPipeline
2222
from diffusers.pipelines.stable_diffusion import safety_checker
2323
from diffusers.utils import load_image,export_to_video
24-
from compel import Compel
24+
from compel import Compel, ReturnedEmbeddingsType
2525

2626
from transformers import CLIPTextModel
2727
from safetensors.torch import load_file
2828

2929

3030
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
31-
COMPEL=os.environ.get("COMPEL", "1") == "1"
31+
COMPEL=os.environ.get("COMPEL", "0") == "1"
32+
XPU=os.environ.get("XPU", "0") == "1"
3233
CLIPSKIP=os.environ.get("CLIPSKIP", "1") == "1"
3334
SAFETENSORS=os.environ.get("SAFETENSORS", "1") == "1"
3435
CHUNK_SIZE=os.environ.get("CHUNK_SIZE", "8")
3536
FPS=os.environ.get("FPS", "7")
3637
DISABLE_CPU_OFFLOAD=os.environ.get("DISABLE_CPU_OFFLOAD", "0") == "1"
3738
FRAMES=os.environ.get("FRAMES", "64")
3839

40+
if XPU:
41+
import intel_extension_for_pytorch as ipex
42+
print(ipex.xpu.get_device_name(0))
43+
3944
# If MAX_WORKERS are specified in the environment use it, otherwise default to 1
4045
MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1'))
4146

@@ -231,8 +236,13 @@ def LoadModel(self, request, context):
231236
if request.SchedulerType != "":
232237
self.pipe.scheduler = get_scheduler(request.SchedulerType, self.pipe.scheduler.config)
233238

234-
if not self.img2vid:
235-
self.compel = Compel(tokenizer=self.pipe.tokenizer, text_encoder=self.pipe.text_encoder)
239+
if COMPEL:
240+
self.compel = Compel(
241+
tokenizer=[self.pipe.tokenizer, self.pipe.tokenizer_2 ],
242+
text_encoder=[self.pipe.text_encoder, self.pipe.text_encoder_2],
243+
returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED,
244+
requires_pooled=[False, True]
245+
)
236246

237247

238248
if request.ControlNet:
@@ -247,6 +257,8 @@ def LoadModel(self, request, context):
247257
self.pipe.to('cuda')
248258
if self.controlnet:
249259
self.controlnet.to('cuda')
260+
if XPU:
261+
self.pipe = self.pipe.to("xpu")
250262
# Assume directory from request.ModelFile.
251263
# Only if request.LoraAdapter it's not an absolute path
252264
if request.LoraAdapter and request.ModelFile != "" and not os.path.isabs(request.LoraAdapter) and request.LoraAdapter:
@@ -386,8 +398,9 @@ def GenerateImage(self, request, context):
386398

387399
image = {}
388400
if COMPEL:
389-
conditioning = self.compel.build_conditioning_tensor(prompt)
390-
kwargs["prompt_embeds"]= conditioning
401+
conditioning, pooled = self.compel.build_conditioning_tensor(prompt)
402+
kwargs["prompt_embeds"] = conditioning
403+
kwargs["pooled_prompt_embeds"] = pooled
391404
# pass the kwargs dictionary to the self.pipe method
392405
image = self.pipe(
393406
guidance_scale=self.cfg_scale,

backend/python/diffusers/install.sh

+36-10
Original file line numberDiff line numberDiff line change
@@ -1,24 +1,50 @@
11
#!/bin/bash
22
set -ex
33

4+
SKIP_CONDA=${SKIP_CONDA:-0}
5+
46
# Check if environment exist
57
conda_env_exists(){
68
! conda list --name "${@}" >/dev/null 2>/dev/null
79
}
810

9-
if conda_env_exists "diffusers" ; then
10-
echo "Creating virtual environment..."
11-
conda env create --name diffusers --file $1
12-
echo "Virtual environment created."
13-
else
14-
echo "Virtual environment already exists."
11+
if [ $SKIP_CONDA -eq 1 ]; then
12+
echo "Skipping conda environment installation"
13+
else
14+
export PATH=$PATH:/opt/conda/bin
15+
if conda_env_exists "diffusers" ; then
16+
echo "Creating virtual environment..."
17+
conda env create --name diffusers --file $1
18+
echo "Virtual environment created."
19+
else
20+
echo "Virtual environment already exists."
21+
fi
1522
fi
1623

17-
if [ "$PIP_CACHE_PURGE" = true ] ; then
18-
export PATH=$PATH:/opt/conda/bin
24+
if [ -d "/opt/intel" ]; then
25+
# Intel GPU: If the directory exists, we assume we are using the Intel image
26+
# https://github.com/intel/intel-extension-for-pytorch/issues/538
27+
pip install torch==2.1.0a0 \
28+
torchvision==0.16.0a0 \
29+
torchaudio==2.1.0a0 \
30+
intel-extension-for-pytorch==2.1.10+xpu \
31+
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
32+
33+
pip install google-api-python-client \
34+
grpcio \
35+
grpcio-tools \
36+
diffusers==0.24.0 \
37+
transformers>=4.25.1 \
38+
accelerate \
39+
compel==2.0.2 \
40+
Pillow
41+
fi
1942

20-
# Activate conda environment
21-
source activate diffusers
43+
if [ "$PIP_CACHE_PURGE" = true ] ; then
44+
if [ $SKIP_CONDA -ne 1 ]; then
45+
# Activate conda environment
46+
source activate diffusers
47+
fi
2248

2349
pip cache purge
2450
fi

backend/python/diffusers/run.sh

+9-4
Original file line numberDiff line numberDiff line change
@@ -3,10 +3,15 @@
33
##
44
## A bash script wrapper that runs the diffusers server with conda
55

6-
export PATH=$PATH:/opt/conda/bin
7-
8-
# Activate conda environment
9-
source activate diffusers
6+
if [ -d "/opt/intel" ]; then
7+
# Assumes we are using the Intel oneAPI container image
8+
# https://github.com/intel/intel-extension-for-pytorch/issues/538
9+
export XPU=1
10+
else
11+
export PATH=$PATH:/opt/conda/bin
12+
# Activate conda environment
13+
source activate diffusers
14+
fi
1015

1116
# get the directory where the bash script is located
1217
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"

backend/python/exllama/install.sh

+5
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,11 @@ set -ex
33

44
export PATH=$PATH:/opt/conda/bin
55

6+
if [ "$BUILD_TYPE" != "cublas" ]; then
7+
echo "[exllama] Attention!!! Nvidia GPU is required - skipping installation"
8+
exit 0
9+
fi
10+
611
# Check if environment exist
712
conda_env_exists(){
813
! conda list --name "${@}" >/dev/null 2>/dev/null

0 commit comments

Comments
 (0)