diff --git a/maas-api/internal/handlers/models_test.go b/maas-api/internal/handlers/models_test.go index bb95e5c30..66408af0e 100644 --- a/maas-api/internal/handlers/models_test.go +++ b/maas-api/internal/handlers/models_test.go @@ -17,6 +17,8 @@ import ( ) func TestListingModels(t *testing.T) { + strptr := func(s string) *string { return &s } + llmTestScenarios := []fixtures.LLMTestScenario{ { Name: "llama-7b", @@ -48,6 +50,13 @@ func TestListingModels(t *testing.T) { URL: fixtures.PublicURL(""), Ready: false, }, + { + Name: "fallback-model-name", + Namespace: fixtures.TestNamespace, + URL: fixtures.PublicURL("http://fallback-model-name." + fixtures.TestNamespace + ".acme.com/v1"), + Ready: true, + SpecModelName: strptr("fallback-model-name"), + }, } llmInferenceServices := fixtures.CreateLLMInferenceServices(llmTestScenarios...) @@ -88,11 +97,17 @@ func TestListingModels(t *testing.T) { var testCases []expectedModel for _, llmTestScenario := range llmTestScenarios { + // expected ID mirrors toModels(): fallback to metadata.name unless spec.model.name is non-empty + expectedModelID := llmTestScenario.Name + if llmTestScenario.SpecModelName != nil && *llmTestScenario.SpecModelName != "" { + expectedModelID = *llmTestScenario.SpecModelName + } + testCases = append(testCases, expectedModel{ - name: llmTestScenario.Name, + name: expectedModelID, expectedModel: models.Model{ Model: openai.Model{ - ID: llmTestScenario.Name, + ID: expectedModelID, Object: "model", OwnedBy: llmTestScenario.Namespace, }, diff --git a/maas-api/internal/models/kserve.go b/maas-api/internal/models/kserve.go index 5a237b5c7..51286cf12 100644 --- a/maas-api/internal/models/kserve.go +++ b/maas-api/internal/models/kserve.go @@ -78,12 +78,24 @@ func toModels(list *unstructured.UnstructuredList) ([]Model, error) { for _, item := range list.Items { url, errURL := findURL(item) if errURL != nil { - log.Printf("DEBUG: Failed to find URL for %s: %v", item.GetKind(), errURL) + log.Printf("DEBUG: Failed to find URL for %s %s/%s: %v", + item.GetKind(), item.GetNamespace(), item.GetName(), errURL) + } + + // Default to metadata.name + modelID := item.GetName() + + // Check if .spec.model.name exists + if name, found, err := unstructured.NestedString(item.Object, "spec", "model", "name"); err != nil { + log.Printf("DEBUG: Error reading spec.model.name for %s %s/%s: %v", + item.GetKind(), item.GetNamespace(), item.GetName(), err) + } else if found && name != "" { + modelID = name } models = append(models, Model{ Model: openai.Model{ - ID: item.GetName(), + ID: modelID, Object: "model", OwnedBy: item.GetNamespace(), Created: item.GetCreationTimestamp().Unix(), diff --git a/maas-api/test/fixtures/llm_models.go b/maas-api/test/fixtures/llm_models.go index 98dbd06da..4dbc39892 100644 --- a/maas-api/test/fixtures/llm_models.go +++ b/maas-api/test/fixtures/llm_models.go @@ -42,8 +42,17 @@ func (i AddressEntry) AddTo(obj *unstructured.Unstructured) { }, "status", "addresses") } +type LLMInferenceServiceOption func(*unstructured.Unstructured) + +// WithSpecModelName sets .spec.model.name (can be an empty string "" to test fallback logic). +func WithSpecModelName(name string) LLMInferenceServiceOption { + return func(obj *unstructured.Unstructured) { + _ = unstructured.SetNestedField(obj.Object, name, "spec", "model", "name") + } +} + // CreateLLMInferenceService creates a test LLMInferenceService unstructured object -func CreateLLMInferenceService(name, namespace string, url ModelURL, ready bool) *unstructured.Unstructured { +func CreateLLMInferenceService(name, namespace string, url ModelURL, ready bool, opts ...LLMInferenceServiceOption) *unstructured.Unstructured { obj := &unstructured.Unstructured{} obj.Object = map[string]any{} obj.SetAPIVersion("serving.kserve.io/v1alpha1") @@ -114,22 +123,40 @@ func CreateLLMInferenceService(name, namespace string, url ModelURL, ready bool) _ = unstructured.SetNestedSlice(obj.Object, conditions, "status", "conditions") + // Apply options (e.g., WithSpecModelName) + for _, opt := range opts { + opt(obj) + } + return obj } // LLMTestScenario defines a test scenario for LLM models type LLMTestScenario struct { - Name string - Namespace string - URL ModelURL - Ready bool + Name string + Namespace string + URL ModelURL + Ready bool + SpecModelName *string } // CreateLLMInferenceServices creates a set of test LLM objects for testing func CreateLLMInferenceServices(scenarios ...LLMTestScenario) []runtime.Object { var objects []runtime.Object for _, scenario := range scenarios { - obj := CreateLLMInferenceService(scenario.Name, scenario.Namespace, scenario.URL, scenario.Ready) + var opts []LLMInferenceServiceOption + if scenario.SpecModelName != nil { + opts = append(opts, WithSpecModelName(*scenario.SpecModelName)) + } + + obj := CreateLLMInferenceService( + scenario.Name, + scenario.Namespace, + scenario.URL, + scenario.Ready, + opts..., // apply any options (e.g., WithSpecModelName) + ) + objects = append(objects, obj) }