diff --git a/docker/Dockerfile.rocm b/docker/Dockerfile.rocm index 8b5a3992d40..ca7625f7db9 100644 --- a/docker/Dockerfile.rocm +++ b/docker/Dockerfile.rocm @@ -1,5 +1,5 @@ # Usage (to build SGLang ROCm docker image): -# docker build --build-arg SGL_BRANCH=v0.4.5 -t v0.4.5-rocm630 -f Dockerfile.rocm . +# docker build --build-arg SGL_BRANCH=v0.4.5.post1 -t v0.4.5.post1-rocm630 -f Dockerfile.rocm . # default base image ARG BASE_IMAGE="rocm/sgl-dev:vllm20250114" diff --git a/docs/developer/setup_github_runner.md b/docs/developer/setup_github_runner.md index eefe2dc6c20..cd852f3ea3b 100644 --- a/docs/developer/setup_github_runner.md +++ b/docs/developer/setup_github_runner.md @@ -11,9 +11,9 @@ docker pull nvidia/cuda:12.1.1-devel-ubuntu22.04 # Nvidia docker run --shm-size 128g -it -v /tmp/huggingface:/hf_home --gpus all nvidia/cuda:12.1.1-devel-ubuntu22.04 /bin/bash # AMD -docker run --rm --device=/dev/kfd --device=/dev/dri --group-add video --shm-size 128g -it -v /tmp/huggingface:/hf_home lmsysorg/sglang:v0.4.5-rocm630 /bin/bash +docker run --rm --device=/dev/kfd --device=/dev/dri --group-add video --shm-size 128g -it -v /tmp/huggingface:/hf_home lmsysorg/sglang:v0.4.5.post1-rocm630 /bin/bash # AMD just the last 2 GPUs -docker run --rm --device=/dev/kfd --device=/dev/dri/renderD176 --device=/dev/dri/renderD184 --group-add video --shm-size 128g -it -v /tmp/huggingface:/hf_home lmsysorg/sglang:v0.4.5-rocm630 /bin/bash +docker run --rm --device=/dev/kfd --device=/dev/dri/renderD176 --device=/dev/dri/renderD184 --group-add video --shm-size 128g -it -v /tmp/huggingface:/hf_home lmsysorg/sglang:v0.4.5.post1-rocm630 /bin/bash ``` ### Step 2: Configure the runner by `config.sh` diff --git a/docs/start/install.md b/docs/start/install.md index fb3b9f1001d..b5a40ee3a32 100644 --- a/docs/start/install.md +++ b/docs/start/install.md @@ -11,7 +11,7 @@ It is recommended to use uv to install the dependencies for faster installation: ```bash pip install --upgrade pip pip install uv -uv pip install "sglang[all]>=0.4.5" --find-links https://flashinfer.ai/whl/cu124/torch2.5/flashinfer-python +uv pip install "sglang[all]>=0.4.5.post1" --find-links https://flashinfer.ai/whl/cu124/torch2.5/flashinfer-python ``` **Quick Fixes to Common Problems** @@ -29,7 +29,7 @@ uv pip install "sglang[all]>=0.4.5" --find-links https://flashinfer.ai/whl/cu124 ```bash # Use the last release branch -git clone -b v0.4.5 https://github.com/sgl-project/sglang.git +git clone -b v0.4.5.post1 https://github.com/sgl-project/sglang.git cd sglang pip install --upgrade pip @@ -44,7 +44,7 @@ Note: For AMD ROCm system with Instinct/MI GPUs, do following instead: ```bash # Use the last release branch -git clone -b v0.4.5 https://github.com/sgl-project/sglang.git +git clone -b v0.4.5.post1 https://github.com/sgl-project/sglang.git cd sglang pip install --upgrade pip @@ -73,7 +73,7 @@ docker run --gpus all \ Note: For AMD ROCm system with Instinct/MI GPUs, it is recommended to use `docker/Dockerfile.rocm` to build images, example and usage as below: ```bash -docker build --build-arg SGL_BRANCH=v0.4.5 -t v0.4.5-rocm630 -f Dockerfile.rocm . +docker build --build-arg SGL_BRANCH=v0.4.5.post1 -t v0.4.5.post1-rocm630 -f Dockerfile.rocm . alias drun='docker run -it --rm --network=host --device=/dev/kfd --device=/dev/dri --ipc=host \ --shm-size 16G --group-add video --cap-add=SYS_PTRACE --security-opt seccomp=unconfined \ @@ -82,11 +82,11 @@ alias drun='docker run -it --rm --network=host --device=/dev/kfd --device=/dev/d drun -p 30000:30000 \ -v ~/.cache/huggingface:/root/.cache/huggingface \ --env "HF_TOKEN=" \ - v0.4.5-rocm630 \ + v0.4.5.post1-rocm630 \ python3 -m sglang.launch_server --model-path meta-llama/Llama-3.1-8B-Instruct --host 0.0.0.0 --port 30000 # Till flashinfer backend available, --attention-backend triton --sampling-backend pytorch are set by default -drun v0.4.5-rocm630 python3 -m sglang.bench_one_batch --batch-size 32 --input 1024 --output 128 --model amd/Meta-Llama-3.1-8B-Instruct-FP8-KV --tp 8 --quantization fp8 +drun v0.4.5.post1-rocm630 python3 -m sglang.bench_one_batch --batch-size 32 --input 1024 --output 128 --model amd/Meta-Llama-3.1-8B-Instruct-FP8-KV --tp 8 --quantization fp8 ``` ## Method 4: Using docker compose diff --git a/python/pyproject.toml b/python/pyproject.toml index 5003358c57d..0b22b8cb982 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "sglang" -version = "0.4.5" +version = "0.4.5.post1" description = "SGLang is yet another fast serving framework for large language models and vision language models." readme = "README.md" requires-python = ">=3.8" diff --git a/python/sglang/version.py b/python/sglang/version.py index 98a433b3105..f4a80302345 100644 --- a/python/sglang/version.py +++ b/python/sglang/version.py @@ -1 +1 @@ -__version__ = "0.4.5" +__version__ = "0.4.5.post1"