From 0b59111405b27348b8c068bc15538e575d04aa79 Mon Sep 17 00:00:00 2001 From: zhyncs Date: Mon, 23 Jun 2025 22:33:32 -0700 Subject: [PATCH] chore: bump v0.4.8 --- benchmark/deepseek_v3/README.md | 2 +- docker/Dockerfile.rocm | 2 +- docs/references/setup_github_runner.md | 4 ++-- docs/start/install.md | 12 ++++++------ python/pyproject.toml | 2 +- python/sglang/version.py | 2 +- 6 files changed, 12 insertions(+), 12 deletions(-) diff --git a/benchmark/deepseek_v3/README.md b/benchmark/deepseek_v3/README.md index a2b0241f1d0..6a407c3ecab 100644 --- a/benchmark/deepseek_v3/README.md +++ b/benchmark/deepseek_v3/README.md @@ -33,7 +33,7 @@ Add [performance optimization options](#performance-optimization-options) as nee ```bash # Installation -pip install "sglang[all]>=0.4.7.post1" +pip install "sglang[all]>=0.4.8" # Launch python3 -m sglang.launch_server --model deepseek-ai/DeepSeek-V3 --tp 8 --trust-remote-code diff --git a/docker/Dockerfile.rocm b/docker/Dockerfile.rocm index f33de182d95..7f95410cb37 100644 --- a/docker/Dockerfile.rocm +++ b/docker/Dockerfile.rocm @@ -1,5 +1,5 @@ # Usage (to build SGLang ROCm docker image): -# docker build --build-arg SGL_BRANCH=v0.4.7.post1 -t v0.4.7.post1-rocm630 -f Dockerfile.rocm . +# docker build --build-arg SGL_BRANCH=v0.4.8 -t v0.4.8-rocm630 -f Dockerfile.rocm . # default base image ARG BASE_IMAGE="rocm/sgl-dev:vllm20250114" diff --git a/docs/references/setup_github_runner.md b/docs/references/setup_github_runner.md index b00c79b744a..fce520ebb8a 100644 --- a/docs/references/setup_github_runner.md +++ b/docs/references/setup_github_runner.md @@ -11,9 +11,9 @@ docker pull nvidia/cuda:12.1.1-devel-ubuntu22.04 # Nvidia docker run --shm-size 128g -it -v /tmp/huggingface:/hf_home --gpus all nvidia/cuda:12.1.1-devel-ubuntu22.04 /bin/bash # AMD -docker run --rm --device=/dev/kfd --device=/dev/dri --group-add video --shm-size 128g -it -v /tmp/huggingface:/hf_home lmsysorg/sglang:v0.4.7.post1-rocm630 /bin/bash +docker run --rm --device=/dev/kfd --device=/dev/dri --group-add video --shm-size 128g -it -v /tmp/huggingface:/hf_home lmsysorg/sglang:v0.4.8-rocm630 /bin/bash # AMD just the last 2 GPUs -docker run --rm --device=/dev/kfd --device=/dev/dri/renderD176 --device=/dev/dri/renderD184 --group-add video --shm-size 128g -it -v /tmp/huggingface:/hf_home lmsysorg/sglang:v0.4.7.post1-rocm630 /bin/bash +docker run --rm --device=/dev/kfd --device=/dev/dri/renderD176 --device=/dev/dri/renderD184 --group-add video --shm-size 128g -it -v /tmp/huggingface:/hf_home lmsysorg/sglang:v0.4.8-rocm630 /bin/bash ``` ### Step 2: Configure the runner by `config.sh` diff --git a/docs/start/install.md b/docs/start/install.md index bae1abd909a..5b98ff41781 100644 --- a/docs/start/install.md +++ b/docs/start/install.md @@ -11,7 +11,7 @@ It is recommended to use uv to install the dependencies for faster installation: ```bash pip install --upgrade pip pip install uv -uv pip install "sglang[all]>=0.4.7.post1" +uv pip install "sglang[all]>=0.4.8" ``` **Quick Fixes to Common Problems** @@ -27,7 +27,7 @@ uv pip install "sglang[all]>=0.4.7.post1" ```bash # Use the last release branch -git clone -b v0.4.7.post1 https://github.com/sgl-project/sglang.git +git clone -b v0.4.8 https://github.com/sgl-project/sglang.git cd sglang pip install --upgrade pip @@ -42,7 +42,7 @@ Note: For AMD ROCm system with Instinct/MI GPUs, do following instead: ```bash # Use the last release branch -git clone -b v0.4.7.post1 https://github.com/sgl-project/sglang.git +git clone -b v0.4.8 https://github.com/sgl-project/sglang.git cd sglang pip install --upgrade pip @@ -71,7 +71,7 @@ docker run --gpus all \ Note: For AMD ROCm system with Instinct/MI GPUs, it is recommended to use `docker/Dockerfile.rocm` to build images, example and usage as below: ```bash -docker build --build-arg SGL_BRANCH=v0.4.7.post1 -t v0.4.7.post1-rocm630 -f Dockerfile.rocm . +docker build --build-arg SGL_BRANCH=v0.4.8 -t v0.4.8-rocm630 -f Dockerfile.rocm . alias drun='docker run -it --rm --network=host --device=/dev/kfd --device=/dev/dri --ipc=host \ --shm-size 16G --group-add video --cap-add=SYS_PTRACE --security-opt seccomp=unconfined \ @@ -80,11 +80,11 @@ alias drun='docker run -it --rm --network=host --device=/dev/kfd --device=/dev/d drun -p 30000:30000 \ -v ~/.cache/huggingface:/root/.cache/huggingface \ --env "HF_TOKEN=" \ - v0.4.7.post1-rocm630 \ + v0.4.8-rocm630 \ python3 -m sglang.launch_server --model-path meta-llama/Llama-3.1-8B-Instruct --host 0.0.0.0 --port 30000 # Till flashinfer backend available, --attention-backend triton --sampling-backend pytorch are set by default -drun v0.4.7.post1-rocm630 python3 -m sglang.bench_one_batch --batch-size 32 --input 1024 --output 128 --model amd/Meta-Llama-3.1-8B-Instruct-FP8-KV --tp 8 --quantization fp8 +drun v0.4.8-rocm630 python3 -m sglang.bench_one_batch --batch-size 32 --input 1024 --output 128 --model amd/Meta-Llama-3.1-8B-Instruct-FP8-KV --tp 8 --quantization fp8 ``` ## Method 4: Using docker compose diff --git a/python/pyproject.toml b/python/pyproject.toml index 61ee9b7517e..e6c5a6c8de6 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "sglang" -version = "0.4.7.post1" +version = "0.4.8" description = "SGLang is yet another fast serving framework for large language models and vision language models." readme = "README.md" requires-python = ">=3.8" diff --git a/python/sglang/version.py b/python/sglang/version.py index 645f4365d2d..a3a9bd54437 100644 --- a/python/sglang/version.py +++ b/python/sglang/version.py @@ -1 +1 @@ -__version__ = "0.4.7.post1" +__version__ = "0.4.8"