-
Notifications
You must be signed in to change notification settings - Fork 54
Expand file tree
/
Copy pathpd-disagg.yaml
More file actions
225 lines (221 loc) · 7.05 KB
/
pd-disagg.yaml
File metadata and controls
225 lines (221 loc) · 7.05 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
# Example: PD-Disaggregated LLM Inference with NVIDIA Dynamo SGLang Runtime (v1alpha2)
# This example demonstrates a complete Prefill-Decode disaggregated deployment
# using NVIDIA Dynamo SGLang runtime with Qwen 0.6B model.
#
# Architecture:
# - processor: Dynamo frontend for request routing and HTTP API
# - prefill: SGLang prefill engine (tp-size=1, with rolloutStrategy)
# - decode: SGLang decode engine (tp-size=1)
#
# Model: Qwen/Qwen3-0.6B (lightweight model suitable for testing PD disaggregation)
#
# Features demonstrated:
# - NVIDIA Dynamo SGLang runtime with PD disaggregation
# - standalonePattern for independent pod-per-instance deployment
# - RoleTemplates for reducing configuration duplication
# - rolloutStrategy with InPlaceIfPossible (prefill only)
#
# Prerequisites:
# - NVIDIA Dynamo SGLang runtime image: nvcr.io/nvidia/ai-dynamo/sglang-runtime:1.0.1
# - GPU nodes with CUDA support
# - ETCD and NATS services deployed (required for Dynamo runtime)
# - Model weights mounted at /models or use HuggingFace download
---
apiVersion: workloads.x-k8s.io/v1alpha2
kind: RoleBasedGroup
metadata:
name: dynamo-pd-inference
labels:
app: dynamo-sglang-inference
deployment-type: pd-disaggregated
model: qwen-0.6b
runtime: dynamo-sglang
spec:
# Define reusable templates shared by multiple roles
roleTemplates:
- name: dynamo-base
template:
metadata:
labels:
app: dynamo-sglang-inference
spec:
containers:
- name: sglang
image: nvcr.io/nvidia/ai-dynamo/sglang-runtime:1.0.1
env:
- name: DYN_NAMESPACE
value: default
- name: DYNAMO_NAMESPACE
value: default
- name: ETCD_ENDPOINTS
value: http://etcd:2379
- name: NATS_SERVER
value: nats://nats:4222
roles:
# Processor: Dynamo frontend for request routing and HTTP API
- name: processor
replicas: 1
standalonePattern:
templateRef:
name: dynamo-base
patch:
spec:
containers:
- name: sglang
command:
- python3
- -m
- dynamo.frontend
args:
- --http-port
- "8000"
ports:
- name: http
containerPort: 8000
- name: metrics
containerPort: 9090
readinessProbe:
httpGet:
path: /health
port: 8000
initialDelaySeconds: 60
periodSeconds: 10
livenessProbe:
httpGet:
path: /health
port: 8000
initialDelaySeconds: 120
periodSeconds: 30
resources:
requests:
cpu: "2"
memory: "8Gi"
limits:
cpu: "4"
memory: "16Gi"
# Prefill: SGLang prefill engine (tp-size=1, with rolloutStrategy)
# Uses --disaggregation-mode=prefill for PD disaggregation
- name: prefill
replicas: 1
restartPolicy: RecreateRoleInstanceOnPodRestart
scalingAdapter:
enable: true
rolloutStrategy:
type: RollingUpdate
rollingUpdate:
type: InPlaceIfPossible
maxUnavailable: 1
inPlaceUpdateStrategy:
gracePeriodSeconds: 30
standalonePattern:
templateRef:
name: dynamo-base
patch:
spec:
volumes:
- name: dshm
emptyDir:
medium: Memory
sizeLimit: 30Gi
containers:
- name: sglang
command:
- python3
- -m
- dynamo.sglang
args:
- --model-path
- Qwen/Qwen3-0.6B
- --served-model-name
- qwen3
- --tp-size
- "1"
- --trust-remote-code
- --skip-tokenizer-init
- --disaggregation-mode
- prefill
- --disaggregation-transfer-backend
- nixl
- --disaggregation-bootstrap-port
- "30001"
- --host
- 0.0.0.0
resources:
requests:
cpu: "4"
memory: "16Gi"
nvidia.com/gpu: "1"
limits:
cpu: "8"
memory: "32Gi"
nvidia.com/gpu: "1"
volumeMounts:
- name: dshm
mountPath: /dev/shm
# Decode: SGLang decode engine (tp-size=1)
# Uses --disaggregation-mode=decode for PD disaggregation
- name: decode
replicas: 1
restartPolicy: RecreateRoleInstanceOnPodRestart
scalingAdapter:
enable: true
standalonePattern:
templateRef:
name: dynamo-base
patch:
spec:
volumes:
- name: dshm
emptyDir:
medium: Memory
sizeLimit: 30Gi
containers:
- name: sglang
command:
- python3
- -m
- dynamo.sglang
args:
- --model-path
- Qwen/Qwen3-0.6B
- --served-model-name
- qwen3
- --tp-size
- "1"
- --trust-remote-code
- --skip-tokenizer-init
- --disaggregation-mode
- decode
- --disaggregation-transfer-backend
- nixl
- --disaggregation-bootstrap-port
- "30001"
- --host
- 0.0.0.0
resources:
requests:
cpu: "4"
memory: "16Gi"
nvidia.com/gpu: "1"
limits:
cpu: "8"
memory: "32Gi"
nvidia.com/gpu: "1"
volumeMounts:
- name: dshm
mountPath: /dev/shm
---
apiVersion: v1
kind: Service
metadata:
name: dynamo-pd-inference-service
spec:
type: ClusterIP
ports:
- name: http
port: 8000
protocol: TCP
targetPort: 8000
selector:
rolebasedgroup.workloads.x-k8s.io/name: dynamo-pd-inference
rolebasedgroup.workloads.x-k8s.io/role: processor