@@ -11,7 +11,7 @@ It is recommended to use uv to install the dependencies for faster installation:
11
11
``` bash
12
12
pip install --upgrade pip
13
13
pip install uv
14
- uv pip install " sglang[all]>=0.4.6.post4 "
14
+ uv pip install " sglang[all]>=0.4.6.post5 "
15
15
```
16
16
17
17
** Quick Fixes to Common Problems**
@@ -29,7 +29,7 @@ uv pip install "sglang[all]>=0.4.6.post4"
29
29
30
30
``` bash
31
31
# Use the last release branch
32
- git clone -b v0.4.6.post4 https://github.com/sgl-project/sglang.git
32
+ git clone -b v0.4.6.post5 https://github.com/sgl-project/sglang.git
33
33
cd sglang
34
34
35
35
pip install --upgrade pip
@@ -44,7 +44,7 @@ Note: For AMD ROCm system with Instinct/MI GPUs, do following instead:
44
44
45
45
``` bash
46
46
# Use the last release branch
47
- git clone -b v0.4.6.post4 https://github.com/sgl-project/sglang.git
47
+ git clone -b v0.4.6.post5 https://github.com/sgl-project/sglang.git
48
48
cd sglang
49
49
50
50
pip install --upgrade pip
@@ -73,7 +73,7 @@ docker run --gpus all \
73
73
Note: For AMD ROCm system with Instinct/MI GPUs, it is recommended to use ` docker/Dockerfile.rocm ` to build images, example and usage as below:
74
74
75
75
``` bash
76
- docker build --build-arg SGL_BRANCH=v0.4.6.post4 -t v0.4.6.post4 -rocm630 -f Dockerfile.rocm .
76
+ docker build --build-arg SGL_BRANCH=v0.4.6.post5 -t v0.4.6.post5 -rocm630 -f Dockerfile.rocm .
77
77
78
78
alias drun=' docker run -it --rm --network=host --device=/dev/kfd --device=/dev/dri --ipc=host \
79
79
--shm-size 16G --group-add video --cap-add=SYS_PTRACE --security-opt seccomp=unconfined \
@@ -82,11 +82,11 @@ alias drun='docker run -it --rm --network=host --device=/dev/kfd --device=/dev/d
82
82
drun -p 30000:30000 \
83
83
-v ~ /.cache/huggingface:/root/.cache/huggingface \
84
84
--env " HF_TOKEN=<secret>" \
85
- v0.4.6.post4 -rocm630 \
85
+ v0.4.6.post5 -rocm630 \
86
86
python3 -m sglang.launch_server --model-path meta-llama/Llama-3.1-8B-Instruct --host 0.0.0.0 --port 30000
87
87
88
88
# Till flashinfer backend available, --attention-backend triton --sampling-backend pytorch are set by default
89
- drun v0.4.6.post4 -rocm630 python3 -m sglang.bench_one_batch --batch-size 32 --input 1024 --output 128 --model amd/Meta-Llama-3.1-8B-Instruct-FP8-KV --tp 8 --quantization fp8
89
+ drun v0.4.6.post5 -rocm630 python3 -m sglang.bench_one_batch --batch-size 32 --input 1024 --output 128 --model amd/Meta-Llama-3.1-8B-Instruct-FP8-KV --tp 8 --quantization fp8
90
90
```
91
91
92
92
## Method 4: Using docker compose
0 commit comments