@@ -16,7 +16,7 @@ Note: Please check the [FlashInfer installation doc](https://docs.flashinfer.ai/
16
16
## Method 2: From source
17
17
```
18
18
# Use the last release branch
19
- git clone -b v0.3.5 https://github.com/sgl-project/sglang.git
19
+ git clone -b v0.3.5.post1 https://github.com/sgl-project/sglang.git
20
20
cd sglang
21
21
22
22
pip install --upgrade pip
@@ -46,7 +46,7 @@ docker run --gpus all \
46
46
Note: To AMD ROCm system with Instinct/MI GPUs, it is recommended to use ` docker/Dockerfile.rocm ` to build images, example and usage as below:
47
47
48
48
``` bash
49
- docker build --build-arg SGL_BRANCH=v0.3.5 -t v0.3.5-rocm620 -f Dockerfile.rocm .
49
+ docker build --build-arg SGL_BRANCH=v0.3.5.post1 -t v0.3.5.post1 -rocm620 -f Dockerfile.rocm .
50
50
51
51
alias drun=' docker run -it --rm --network=host --device=/dev/kfd --device=/dev/dri --ipc=host \
52
52
--shm-size 16G --group-add video --cap-add=SYS_PTRACE --security-opt seccomp=unconfined \
@@ -55,11 +55,11 @@ alias drun='docker run -it --rm --network=host --device=/dev/kfd --device=/dev/d
55
55
drun -p 30000:30000 \
56
56
-v ~ /.cache/huggingface:/root/.cache/huggingface \
57
57
--env " HF_TOKEN=<secret>" \
58
- v0.3.5-rocm620 \
58
+ v0.3.5.post1 -rocm620 \
59
59
python3 -m sglang.launch_server --model-path meta-llama/Llama-3.1-8B-Instruct --host 0.0.0.0 --port 30000
60
60
61
61
# Till flashinfer backend available, --attention-backend triton --sampling-backend pytorch are set by default
62
- drun v0.3.5-rocm620 python3 -m sglang.bench_latency --batch-size 32 --input 1024 --output 128 --model amd/Meta-Llama-3.1-8B-Instruct-FP8-KV --tp 8 --quantization fp8
62
+ drun v0.3.5.post1 -rocm620 python3 -m sglang.bench_latency --batch-size 32 --input 1024 --output 128 --model amd/Meta-Llama-3.1-8B-Instruct-FP8-KV --tp 8 --quantization fp8
63
63
```
64
64
65
65
## Method 4: Using docker compose
0 commit comments