Skip to content

Commit e279f43

Browse files
committed
fix: go lint + test with wrong poolName
- if e2e is only tested on kind with specific guide gaie-sim then make it clear and fix the label value - llm-d.ai/guide:simlulated-accelerators - llm-d.ai/model:random Signed-off-by: Wen Zhou <wenzhou@redhat.com>
1 parent 3630976 commit e279f43

6 files changed

Lines changed: 40 additions & 27 deletions

File tree

docs/developer-guide/testing.md

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ WVA has a multi-layered testing strategy:
88

99
1. **Unit Tests** - Fast, isolated tests for individual packages and functions
1010
2. **Integration Tests** - Tests for component interactions within the controller
11-
3. **E2E Tests** - Environment-agnostic end-to-end tests (Kind emulated or OpenShift), with smoke and full tiers
11+
3. **E2E Tests** - End-to-end tests on Kind clusters with emulated GPUs, with smoke and full tiers
1212

1313
## Unit Tests
1414

@@ -134,10 +134,12 @@ var _ = AfterSuite(func() {
134134

135135
## End-to-End Tests
136136

137-
WVA provides a **single consolidated E2E suite** that runs on multiple environments (Kind with emulated GPUs, or OpenShift/kubernetes with real infrastructure). Tests are environment-agnostic and parameterized via environment variables; they create VA, HPA, and model services dynamically as part of the test workflow.
137+
WVA provides a **single consolidated E2E suite** that runs on Kind clusters with emulated GPUs. Tests create VA, HPA, and model services dynamically as part of the test workflow.
138+
139+
> **Note**: E2E tests are only supported on Kind clusters (`ENVIRONMENT=kind-emulator`). The test fixtures and labels are configured specifically for the Kind emulator deployment which uses the `simulated-accelerators` guide from llm-d. Running E2E tests on other environments (OpenShift, generic Kubernetes) is not supported for now.
138140
139141
- **Location**: `test/e2e/`
140-
- **Environments**: Kind (emulated), OpenShift, or generic Kubernetes
142+
- **Supported Environment**: Kind (emulated GPUs only)
141143
- **Tiers**: Smoke (~5–10 min) for PRs; full suite (~15–25 min) for comprehensive validation
142144

143145
### Infra-Only Setup (Required Before Running Tests)

internal/controller/configmap_bootstrap.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ func (r *ConfigMapReconciler) BootstrapInitialConfigMaps(ctx context.Context) er
6363
continue // Skip excluded namespaces. Only for all-namespaces mode.
6464
}
6565
}
66-
66+
6767
}
6868
if ns.Labels != nil {
6969
if value, ok := ns.Labels[constants.NamespaceConfigEnabledLabelKey]; ok {

test/e2e/fixtures/infra_builder.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -60,14 +60,14 @@ func buildService(namespace, name, appLabel string, port int) *corev1.Service {
6060
Name: serviceName,
6161
Namespace: namespace,
6262
Labels: map[string]string{
63-
"app": appLabel,
63+
"app": appLabel,
6464
"llm-d.ai/inference-serving": "true",
65-
"test-resource": "true",
65+
"test-resource": "true",
6666
},
6767
},
6868
Spec: corev1.ServiceSpec{
6969
Selector: map[string]string{
70-
"app": appLabel,
70+
"app": appLabel,
7171
"llm-d.ai/inference-serving": "true",
7272
},
7373
Ports: []corev1.ServicePort{

test/e2e/fixtures/model_service_builder.go

Lines changed: 18 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -86,12 +86,20 @@ func buildModelServiceDeployment(namespace, name, poolName, modelID string, useS
8686
image = "ghcr.io/llm-d/llm-d-cuda-dev:latest"
8787
}
8888
args := buildModelServerArgs(modelID, useSimulator, maxNumSeqs)
89+
// Labels must include all labels from the infrastructure InferencePool's selector.
90+
// For kind-emulator (gaie-sim), the selector uses:
91+
// - llm-d.ai/inference-serving: "true"
92+
// - llm-d.ai/guide: "simulated-accelerators"
93+
// - llm-d.ai/accelerator-variant: "cpu"
94+
// - llm-d.ai/model: "random"
8995
labels := map[string]string{
90-
"app": appLabel,
91-
"llm-d.ai/inference-serving": "true",
92-
"llm-d.ai/model": "ms-sim-llm-d-modelservice",
93-
"llm-d.ai/model-pool": poolName,
94-
"test-resource": "true",
96+
"app": appLabel,
97+
"llm-d.ai/inference-serving": "true",
98+
"llm-d.ai/guide": poolName,
99+
"llm-d.ai/accelerator-variant": "cpu",
100+
"llm-d.ai/model": "random",
101+
"llm-d.ai/model-pool": poolName,
102+
"test-resource": "true",
95103
}
96104

97105
envVars := []corev1.EnvVar{
@@ -135,10 +143,11 @@ func buildModelServiceDeployment(namespace, name, poolName, modelID string, useS
135143
Replicas: ptr.To(int32(1)),
136144
Selector: &metav1.LabelSelector{
137145
MatchLabels: map[string]string{
138-
"app": appLabel,
139-
"llm-d.ai/inference-serving": "true",
140-
"llm-d.ai/model": "ms-sim-llm-d-modelservice",
141-
"llm-d.ai/model-pool": poolName,
146+
"app": appLabel,
147+
"llm-d.ai/inference-serving": "true",
148+
"llm-d.ai/guide": poolName,
149+
"llm-d.ai/accelerator-variant": "cpu",
150+
"llm-d.ai/model": "random", // to match simulator model
142151
},
143152
},
144153
Template: corev1.PodTemplateSpec{

test/e2e/scale_from_zero_test.go

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,9 @@ import (
2828
// Uses KEDA ScaledObject when standard HPA rejects minReplicas=0 (e.g. OpenShift).
2929
var _ = Describe("Scale-From-Zero Feature", Label("smoke", "full"), Ordered, func() {
3030
var (
31-
poolName = "scale-from-zero-pool"
31+
// poolName must match the infrastructure InferencePool's selector label (llm-d.ai/guide).
32+
// E2E tests only run on kind-emulator which uses "simulated-accelerators" (WELL_LIT_PATH_NAME).
33+
poolName = "simulated-accelerators"
3234
modelServiceName = "scale-from-zero-ms"
3335
vaName = "scale-from-zero-va"
3436
hpaName = "scale-from-zero-hpa"

test/utils/resources/llmdsim.go

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -23,17 +23,17 @@ func CreateLlmdSimDeployment(namespace, deployName, modelName, appLabel, port st
2323
Replicas: ptr.To(replicas),
2424
Selector: &metav1.LabelSelector{
2525
MatchLabels: map[string]string{
26-
"app": appLabel,
26+
"app": appLabel,
2727
"llm-d.ai/inference-serving": "true",
28-
"llm-d.ai/model": "ms-sim-llm-d-modelservice",
28+
"llm-d.ai/model": "ms-sim-llm-d-modelservice",
2929
},
3030
},
3131
Template: corev1.PodTemplateSpec{
3232
ObjectMeta: metav1.ObjectMeta{
3333
Labels: map[string]string{
34-
"app": appLabel,
34+
"app": appLabel,
3535
"llm-d.ai/inference-serving": "true",
36-
"llm-d.ai/model": "ms-sim-llm-d-modelservice",
36+
"llm-d.ai/model": "ms-sim-llm-d-modelservice",
3737
},
3838
},
3939
Spec: corev1.PodSpec{
@@ -161,17 +161,17 @@ func CreateLlmdSimDeploymentWithGPU(namespace, deployName, modelName, appLabel,
161161
Replicas: ptr.To(replicas),
162162
Selector: &metav1.LabelSelector{
163163
MatchLabels: map[string]string{
164-
"app": appLabel,
164+
"app": appLabel,
165165
"llm-d.ai/inference-serving": "true",
166-
"llm-d.ai/model": "ms-sim-llm-d-modelservice",
166+
"llm-d.ai/model": "ms-sim-llm-d-modelservice",
167167
},
168168
},
169169
Template: corev1.PodTemplateSpec{
170170
ObjectMeta: metav1.ObjectMeta{
171171
Labels: map[string]string{
172-
"app": appLabel,
172+
"app": appLabel,
173173
"llm-d.ai/inference-serving": "true",
174-
"llm-d.ai/model": "ms-sim-llm-d-modelservice",
174+
"llm-d.ai/model": "ms-sim-llm-d-modelservice",
175175
},
176176
},
177177
Spec: corev1.PodSpec{
@@ -251,13 +251,13 @@ func CreateLlmdSimService(namespace, serviceName, appLabel string, nodePort, por
251251
Name: serviceName,
252252
Namespace: namespace,
253253
Labels: map[string]string{
254-
"app": appLabel,
254+
"app": appLabel,
255255
"llm-d.ai/inference-serving": "true",
256256
},
257257
},
258258
Spec: corev1.ServiceSpec{
259259
Selector: map[string]string{
260-
"app": appLabel,
260+
"app": appLabel,
261261
"llm-d.ai/inference-serving": "true",
262262
},
263263
Ports: []corev1.ServicePort{

0 commit comments

Comments
 (0)