diff --git a/api/v1alpha1/ai_gateway_route_helper.go b/api/v1alpha1/ai_gateway_route_helper.go index e35ac12c8b..a09891ab57 100644 --- a/api/v1alpha1/ai_gateway_route_helper.go +++ b/api/v1alpha1/ai_gateway_route_helper.go @@ -20,7 +20,7 @@ const ( inferencePoolKind = "InferencePool" ) -// GetTimeoutsWithDefaults returns the timeouts with default values applied when not specified. +// GetTimeoutsOrDefault returns the timeouts with default values applied when not specified. // This ensures that AI Gateway routes have appropriate timeout defaults for AI workloads. func (r *AIGatewayRouteRule) GetTimeoutsOrDefault() *gwapiv1.HTTPRouteTimeouts { defaultTimeout := defaultRequestTimeout diff --git a/cmd/aigw/docker-compose-otel.yaml b/cmd/aigw/docker-compose-otel.yaml index 20f1f782af..5e37bd88b0 100644 --- a/cmd/aigw/docker-compose-otel.yaml +++ b/cmd/aigw/docker-compose-otel.yaml @@ -10,7 +10,7 @@ volumes: services: # aigw-build builds the Envoy AI Gateway CLI binary, so you can use main code. aigw-build: - image: golang:1.24.6 + image: golang:1.25 container_name: aigw-build working_dir: /workspace volumes: diff --git a/cmd/aigw/docker-compose.yaml b/cmd/aigw/docker-compose.yaml index 1e13bcfa18..2e1f988799 100644 --- a/cmd/aigw/docker-compose.yaml +++ b/cmd/aigw/docker-compose.yaml @@ -11,7 +11,7 @@ volumes: services: # aigw-build builds the Envoy AI Gateway CLI binary, so you can use main code. aigw-build: - image: golang:1.24.6 + image: golang:1.25 container_name: aigw-build working_dir: /workspace volumes: @@ -56,7 +56,7 @@ services: # chat-completion is a simple curl-based test client for sending requests to aigw. chat-completion: - image: golang:1.24.6 + image: golang:1.25 container_name: chat-completion profiles: ["test"] env_file: diff --git a/cmd/aigw/run.go b/cmd/aigw/run.go index 7f6ad6f753..c83d70d64c 100644 --- a/cmd/aigw/run.go +++ b/cmd/aigw/run.go @@ -320,7 +320,7 @@ func (runCtx *runCmdContext) mustClearSetOwnerReferencesAndStatusAndWriteObj(typ if err != nil { panic(err) } - var raw map[string]interface{} + var raw map[string]any err = yaml.Unmarshal(marshaled, &raw) if err != nil { panic(err) diff --git a/cmd/extproc/mainlib/main.go b/cmd/extproc/mainlib/main.go index a6f4c1a359..974af5a7c4 100644 --- a/cmd/extproc/mainlib/main.go +++ b/cmd/extproc/mainlib/main.go @@ -218,8 +218,8 @@ func listen(ctx context.Context, name, network, address string) (net.Listener, e // listenAddress returns the network and address for the given address flag. func listenAddress(addrFlag string) (string, string) { - if strings.HasPrefix(addrFlag, "unix://") { - p := strings.TrimPrefix(addrFlag, "unix://") + if after, ok := strings.CutPrefix(addrFlag, "unix://"); ok { + p := after _ = os.Remove(p) // Remove the socket file if it exists. return "unix", p } diff --git a/go.mod b/go.mod index 73323eb5d8..0482bc1b4e 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/envoyproxy/ai-gateway -go 1.24.6 +go 1.25 replace go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 => go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 diff --git a/internal/apischema/anthropic/anthropic.go b/internal/apischema/anthropic/anthropic.go index 954ff50541..f14fd334d3 100644 --- a/internal/apischema/anthropic/anthropic.go +++ b/internal/apischema/anthropic/anthropic.go @@ -8,9 +8,10 @@ package anthropic // MessagesRequest represents a request to the Anthropic Messages API. // Uses a dictionary approach to handle any JSON structure flexibly. -type MessagesRequest map[string]interface{} +type MessagesRequest map[string]any // Helper methods to extract common fields from the dictionary + func (m MessagesRequest) GetModel() string { if model, ok := m["model"].(string); ok { return model diff --git a/internal/apischema/awsbedrock/awsbedrock.go b/internal/apischema/awsbedrock/awsbedrock.go index 364032542e..97d0ae7038 100644 --- a/internal/apischema/awsbedrock/awsbedrock.go +++ b/internal/apischema/awsbedrock/awsbedrock.go @@ -287,7 +287,7 @@ type ToolUseBlock struct { // Name is the name the tool that the model wants to use. Name string `json:"name"` // Input is to pass to the tool in JSON format. - Input map[string]interface{} `json:"input"` + Input map[string]any `json:"input"` // ToolUseID is the ID for the tool request, pattern is ^[a-zA-Z0-9_-]+$. ToolUseID string `json:"toolUseId"` } diff --git a/internal/apischema/openai/openai.go b/internal/apischema/openai/openai.go index ec07c04d78..2338a4d1b0 100644 --- a/internal/apischema/openai/openai.go +++ b/internal/apischema/openai/openai.go @@ -139,7 +139,7 @@ type ChatCompletionContentPartUserUnionParam struct { } func (c *ChatCompletionContentPartUserUnionParam) UnmarshalJSON(data []byte) error { - var chatContentPart map[string]interface{} + var chatContentPart map[string]any if err := json.Unmarshal(data, &chatContentPart); err != nil { return err } @@ -187,7 +187,7 @@ func (c ChatCompletionContentPartUserUnionParam) MarshalJSON() ([]byte, error) { } type StringOrAssistantRoleContentUnion struct { - Value interface{} + Value any } func (s *StringOrAssistantRoleContentUnion) UnmarshalJSON(data []byte) error { @@ -213,7 +213,7 @@ func (s StringOrAssistantRoleContentUnion) MarshalJSON() ([]byte, error) { } type StringOrArray struct { - Value interface{} + Value any } func (s *StringOrArray) UnmarshalJSON(data []byte) error { @@ -256,7 +256,7 @@ func (s StringOrArray) MarshalJSON() ([]byte, error) { } type StringOrUserRoleContentUnion struct { - Value interface{} + Value any } func (s *StringOrUserRoleContentUnion) UnmarshalJSON(data []byte) error { @@ -282,12 +282,12 @@ func (s StringOrUserRoleContentUnion) MarshalJSON() ([]byte, error) { } type ChatCompletionMessageParamUnion struct { - Value interface{} + Value any Type string } func (c *ChatCompletionMessageParamUnion) UnmarshalJSON(data []byte) error { - var chatMessage map[string]interface{} + var chatMessage map[string]any if err := json.Unmarshal(data, &chatMessage); err != nil { return err } @@ -502,7 +502,6 @@ type Reasoning struct { Summary *string `json:"summary,omitempty"` } -// ChatCompletionRequest represents a request structure for chat completion API. // ChatCompletionModality represents the output types that the model can generate. type ChatCompletionModality string @@ -704,7 +703,7 @@ type ChatCompletionRequest struct { // Stop string / array / null Defaults to null // Up to 4 sequences where the API will stop generating further tokens. // Docs: https://platform.openai.com/docs/api-reference/chat/create#chat-create-stop - Stop interface{} `json:"stop,omitempty"` + Stop any `json:"stop,omitempty"` // Stream: If set, partial message deltas will be sent, like in ChatGPT. // Docs: https://platform.openai.com/docs/api-reference/chat/create#chat-create-stream @@ -810,7 +809,7 @@ const ( // ChatCompletionToolChoice represents the tool choice for chat completions. // It can be either a string (none, auto, required) or a ChatCompletionNamedToolChoice object. -type ChatCompletionToolChoice interface{} +type ChatCompletionToolChoice any // ChatCompletionNamedToolChoice specifies a tool the model should use. Use to force the model to call a specific function. type ChatCompletionNamedToolChoice struct { @@ -1236,7 +1235,7 @@ type Embedding struct { // EmbeddingUnion is a union type that can handle both []float64 and string formats. type EmbeddingUnion struct { - Value interface{} + Value any } // UnmarshalJSON implements json.Unmarshaler to handle both []float64 and string formats. diff --git a/internal/apischema/openai/openai_test.go b/internal/apischema/openai/openai_test.go index 8dbf35d29a..e21ea7b3c0 100644 --- a/internal/apischema/openai/openai_test.go +++ b/internal/apischema/openai/openai_test.go @@ -275,13 +275,13 @@ func TestOpenAIChatCompletionMessageUnmarshal(t *testing.T) { JSONSchema: &ChatCompletionResponseFormatJSONSchema{ Name: "math_response", Strict: true, - Schema: map[string]interface{}{ + Schema: map[string]any{ "additionalProperties": false, "type": "object", - "properties": map[string]interface{}{ + "properties": map[string]any{ "step": "test_step", }, - "required": []interface{}{"steps"}, + "required": []any{"steps"}, }, }, }, @@ -310,7 +310,7 @@ func TestOpenAIChatCompletionMessageUnmarshal(t *testing.T) { }, MaxCompletionTokens: ptr.To[int64](1024), ParallelToolCalls: ptr.To(true), - Stop: []interface{}{"\n", "stop"}, + Stop: []any{"\n", "stop"}, ServiceTier: ptr.To("flex"), }, }, @@ -1232,7 +1232,7 @@ func TestEmbeddingUnionUnmarshal(t *testing.T) { tests := []struct { name string input string - want interface{} + want any wantErr bool }{ { diff --git a/internal/controller/ai_service_backend_test.go b/internal/controller/ai_service_backend_test.go index 037399cea8..0e4366505d 100644 --- a/internal/controller/ai_service_backend_test.go +++ b/internal/controller/ai_service_backend_test.go @@ -98,7 +98,7 @@ func TestAIServiceBackendController_Reconcile_error_with_multiple_bsps(t *testin const backendName, namespace = "mybackend", "default" // Create Multiple Backend Security Policies that target the same backend. - for i := 0; i < 5; i++ { + for i := range 5 { bsp := &aigv1a1.BackendSecurityPolicy{ ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("bsp-%d", i), Namespace: namespace}, Spec: aigv1a1.BackendSecurityPolicySpec{ diff --git a/internal/controller/backend_security_policy_test.go b/internal/controller/backend_security_policy_test.go index 3bfac5bca6..0cd1db4594 100644 --- a/internal/controller/backend_security_policy_test.go +++ b/internal/controller/backend_security_policy_test.go @@ -255,8 +255,8 @@ func TestBackendSecurityPolicyController_RotateCredential(t *testing.T) { ctx := oidcv3.InsecureIssuerURLContext(t.Context(), discoveryServer.URL) data := map[string][]byte{ - "credentials": []byte(fmt.Sprintf("[%s]\naws_access_key_id = %s\naws_secret_access_key = %s\naws_session_token = %s\nregion = %s\n", - "default", "accessKey", "secretKey", "sessionToken", "us-east-2")), + "credentials": fmt.Appendf(nil, "[%s]\naws_access_key_id = %s\naws_secret_access_key = %s\naws_session_token = %s\nregion = %s\n", + "default", "accessKey", "secretKey", "sessionToken", "us-east-2"), } secret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ @@ -707,8 +707,8 @@ func TestBackendSecurityPolicyController_ExecutionRotation(t *testing.T) { require.NoError(t, cl.Create(t.Context(), bsp)) ctx := oidcv3.InsecureIssuerURLContext(t.Context(), discoveryServer.URL) data := map[string][]byte{ - "credentials": []byte(fmt.Sprintf("[%s]\naws_access_key_id = %s\naws_secret_access_key = %s\naws_session_token = %s\nregion = %s\n", - "default", "accessKey", "secretKey", "sessionToken", "us-east-2")), + "credentials": fmt.Appendf(nil, "[%s]\naws_access_key_id = %s\naws_secret_access_key = %s\naws_session_token = %s\nregion = %s\n", + "default", "accessKey", "secretKey", "sessionToken", "us-east-2"), } now := time.Now() expirationTime := now.Add(-1 * time.Hour) diff --git a/internal/controller/gateway.go b/internal/controller/gateway.go index 1cfb2c17d4..26d70e288e 100644 --- a/internal/controller/gateway.go +++ b/internal/controller/gateway.go @@ -441,9 +441,9 @@ func (c *GatewayController) annotateGatewayPods(ctx context.Context, c.logger.Info("annotating pod", "namespace", pod.Namespace, "name", pod.Name) _, err := c.kube.CoreV1().Pods(pod.Namespace).Patch(ctx, pod.Name, types.MergePatchType, - []byte(fmt.Sprintf( + fmt.Appendf(nil, `{"metadata":{"annotations":{"%s":"%s"}}}`, aigatewayUUIDAnnotationKey, uuid), - ), metav1.PatchOptions{}) + metav1.PatchOptions{}) if err != nil { return fmt.Errorf("failed to patch pod %s: %w", pod.Name, err) } @@ -453,9 +453,9 @@ func (c *GatewayController) annotateGatewayPods(ctx context.Context, for _, dep := range deployments { c.logger.Info("rolling out deployment", "namespace", dep.Namespace, "name", dep.Name) _, err := c.kube.AppsV1().Deployments(dep.Namespace).Patch(ctx, dep.Name, types.MergePatchType, - []byte(fmt.Sprintf( + fmt.Appendf(nil, `{"spec":{"template":{"metadata":{"annotations":{"%s":"%s"}}}}}`, aigatewayUUIDAnnotationKey, uuid), - ), metav1.PatchOptions{}) + metav1.PatchOptions{}) if err != nil { return fmt.Errorf("failed to patch deployment %s: %w", dep.Name, err) } @@ -464,9 +464,9 @@ func (c *GatewayController) annotateGatewayPods(ctx context.Context, for _, daemonSet := range daemonSets { c.logger.Info("rolling out daemonSet", "namespace", daemonSet.Namespace, "name", daemonSet.Name) _, err := c.kube.AppsV1().DaemonSets(daemonSet.Namespace).Patch(ctx, daemonSet.Name, types.MergePatchType, - []byte(fmt.Sprintf( + fmt.Appendf(nil, `{"spec":{"template":{"metadata":{"annotations":{"%s":"%s"}}}}}`, aigatewayUUIDAnnotationKey, uuid), - ), metav1.PatchOptions{}) + metav1.PatchOptions{}) if err != nil { return fmt.Errorf("failed to patch daemonset %s: %w", daemonSet.Name, err) } diff --git a/internal/controller/rotators/aws_oidc_rotator_test.go b/internal/controller/rotators/aws_oidc_rotator_test.go index fd7ca3fb83..06ae583488 100644 --- a/internal/controller/rotators/aws_oidc_rotator_test.go +++ b/internal/controller/rotators/aws_oidc_rotator_test.go @@ -55,8 +55,8 @@ func createTestAwsSecret(t *testing.T, client client.Client, bspName string, acc profile = awsProfileName } data := map[string][]byte{ - AwsCredentialsKey: []byte(fmt.Sprintf("[%s]\naws_access_key_id = %s\naws_secret_access_key = %s\naws_session_token = %s\nregion = %s\n", - profile, accessKey, secretKey, sessionToken, awsRegion)), + AwsCredentialsKey: fmt.Appendf(nil, "[%s]\naws_access_key_id = %s\naws_secret_access_key = %s\naws_session_token = %s\nregion = %s\n", + profile, accessKey, secretKey, sessionToken, awsRegion), } err := client.Create(t.Context(), &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ diff --git a/internal/controller/secret_test.go b/internal/controller/secret_test.go index 1db31a714a..892220e243 100644 --- a/internal/controller/secret_test.go +++ b/internal/controller/secret_test.go @@ -6,7 +6,8 @@ package controller import ( - "sort" + "cmp" + "slices" "testing" "github.com/stretchr/testify/require" @@ -62,11 +63,11 @@ func TestSecretController_Reconcile(t *testing.T) { }}) require.NoError(t, err) actual := eventCh.RequireItemsEventually(t, len(originals)) - sort.Slice(actual, func(i, j int) bool { - return actual[i].Name < actual[j].Name + slices.SortFunc(actual, func(a, b *aigv1a1.BackendSecurityPolicy) int { + return cmp.Compare(a.Name, b.Name) }) - sort.Slice(originals, func(i, j int) bool { - return originals[i].Name < originals[j].Name + slices.SortFunc(originals, func(a, b *aigv1a1.BackendSecurityPolicy) int { + return cmp.Compare(a.Name, b.Name) }) require.Equal(t, originals, actual) diff --git a/internal/extensionserver/extensionserver_test.go b/internal/extensionserver/extensionserver_test.go index 2d085e6a31..a2891c948a 100644 --- a/internal/extensionserver/extensionserver_test.go +++ b/internal/extensionserver/extensionserver_test.go @@ -184,19 +184,19 @@ func Test_maybeModifyCluster(t *testing.T) { // Helper function to create an InferencePool ExtensionResource. func createInferencePoolExtensionResource(name, namespace string) *egextension.ExtensionResource { unstructuredObj := &unstructured.Unstructured{ - Object: map[string]interface{}{ + Object: map[string]any{ "apiVersion": "inference.networking.x-k8s.io/v1alpha2", "kind": "InferencePool", - "metadata": map[string]interface{}{ + "metadata": map[string]any{ "name": name, "namespace": namespace, }, - "spec": map[string]interface{}{ + "spec": map[string]any{ "targetPortNumber": int32(8080), - "selector": map[string]interface{}{ + "selector": map[string]any{ "app": "test-inference", }, - "extensionRef": map[string]interface{}{ + "extensionRef": map[string]any{ "name": "test-epp", }, }, @@ -1187,10 +1187,10 @@ func TestConstructInferencePoolsFrom(t *testing.T) { t.Run("wrong API version", func(t *testing.T) { unstructuredObj := &unstructured.Unstructured{ - Object: map[string]interface{}{ + Object: map[string]any{ "apiVersion": "v1", "kind": "Service", - "metadata": map[string]interface{}{ + "metadata": map[string]any{ "name": "test-service", "namespace": "default", }, diff --git a/internal/extensionserver/post_translate_modify_test.go b/internal/extensionserver/post_translate_modify_test.go index 03ef30573d..d99633a46d 100644 --- a/internal/extensionserver/post_translate_modify_test.go +++ b/internal/extensionserver/post_translate_modify_test.go @@ -189,27 +189,27 @@ func TestInsertAIGatewayExtProcFilter(t *testing.T) { } func TestServer_isRouteGeneratedByAIGateway(t *testing.T) { - emptyStruct, err := structpb.NewStruct(map[string]interface{}{}) + emptyStruct, err := structpb.NewStruct(map[string]any{}) require.NoError(t, err) - structWithEmptyResources, err := structpb.NewStruct(map[string]interface{}{ + structWithEmptyResources, err := structpb.NewStruct(map[string]any{ "resources": nil, }) require.NoError(t, err) - withAnnotationsListStruct, err := structpb.NewStruct(map[string]interface{}{ - "resources": []interface{}{ - map[string]interface{}{ - "annotations": map[string]interface{}{}, + withAnnotationsListStruct, err := structpb.NewStruct(map[string]any{ + "resources": []any{ + map[string]any{ + "annotations": map[string]any{}, }, }, }) require.NoError(t, err) - withOKAnnotationsListStruct, err := structpb.NewStruct(map[string]interface{}{ - "resources": []interface{}{ - map[string]interface{}{ - "annotations": map[string]interface{}{ + withOKAnnotationsListStruct, err := structpb.NewStruct(map[string]any{ + "resources": []any{ + map[string]any{ + "annotations": map[string]any{ internalapi.AIGatewayGeneratedHTTPRouteAnnotation: "true", }, }, diff --git a/internal/extproc/backendauth/gcp.go b/internal/extproc/backendauth/gcp.go index 3f247b6c77..0e3490ef57 100644 --- a/internal/extproc/backendauth/gcp.go +++ b/internal/extproc/backendauth/gcp.go @@ -80,7 +80,7 @@ func (g *gcpHandler) Do(_ context.Context, _ map[string]string, headerMut *extpr &corev3.HeaderValueOption{ Header: &corev3.HeaderValue{ Key: "Authorization", - RawValue: []byte(fmt.Sprintf("Bearer %s", g.gcpAccessToken)), + RawValue: fmt.Appendf(nil, "Bearer %s", g.gcpAccessToken), }, }, ) diff --git a/internal/extproc/messages_processor_test.go b/internal/extproc/messages_processor_test.go index 33f8b6866c..fd37ce2599 100644 --- a/internal/extproc/messages_processor_test.go +++ b/internal/extproc/messages_processor_test.go @@ -373,7 +373,7 @@ func TestMessagesProcessorUpstreamFilter_ProcessRequestHeaders_WithMocks(t *test requestBody := &anthropicschema.MessagesRequest{ "model": "claude-3-sonnet", "max_tokens": 1000, - "messages": []interface{}{map[string]interface{}{"role": "user", "content": "Hello"}}, + "messages": []any{map[string]any{"role": "user", "content": "Hello"}}, } requestBodyRaw := []byte(`{"model": "claude-3-sonnet", "max_tokens": 1000, "messages": [{"role": "user", "content": "Hello"}]}`) diff --git a/internal/extproc/server.go b/internal/extproc/server.go index cf684e8281..c5c3431761 100644 --- a/internal/extproc/server.go +++ b/internal/extproc/server.go @@ -190,7 +190,7 @@ func (s *Server) Process(stream extprocv3.ExternalProcessor_ProcessServer) error Response: &extprocv3.ProcessingResponse_ImmediateResponse{ ImmediateResponse: &extprocv3.ImmediateResponse{ Status: &typev3.HttpStatus{Code: typev3.StatusCode_NotFound}, - Body: []byte(fmt.Sprintf("unsupported path: %s", path)), + Body: fmt.Appendf(nil, "unsupported path: %s", path), GrpcStatus: &extprocv3.GrpcStatus{Status: uint32(codes.NotFound)}, }, }, diff --git a/internal/extproc/translator/anthropic_gcpanthropic.go b/internal/extproc/translator/anthropic_gcpanthropic.go index bd5bbf570d..a2c7ca57c2 100644 --- a/internal/extproc/translator/anthropic_gcpanthropic.go +++ b/internal/extproc/translator/anthropic_gcpanthropic.go @@ -9,6 +9,7 @@ import ( "encoding/json" "fmt" "io" + "maps" "github.com/anthropics/anthropic-sdk-go" extprocv3 "github.com/envoyproxy/go-control-plane/envoy/service/ext_proc/v3" @@ -39,10 +40,8 @@ func (a *anthropicToGCPAnthropicTranslator) RequestBody(_ []byte, body *anthropi modelName := body.GetModel() // Work directly with the map since MessagesRequest is already map[string]interface{}. - anthropicReq := make(map[string]interface{}) - for k, v := range *body { - anthropicReq[k] = v - } + anthropicReq := make(map[string]any) + maps.Copy(anthropicReq, *body) // Apply model name override if configured. if a.modelNameOverride != "" { @@ -99,10 +98,10 @@ func (a *anthropicToGCPAnthropicTranslator) ResponseBody(_ map[string]string, bo // For streaming chunks, try to extract token usage from message_delta events. if !endOfStream { // Try to parse as a message_delta event to extract usage. - var eventData map[string]interface{} + var eventData map[string]any if unmarshalErr := json.Unmarshal(bodyBytes, &eventData); unmarshalErr == nil { if eventType, ok := eventData["type"].(string); ok && eventType == "message_delta" { - if usageData, ok := eventData["usage"].(map[string]interface{}); ok { + if usageData, ok := eventData["usage"].(map[string]any); ok { // Extract token usage from the message_delta event. if outputTokens, ok := usageData["output_tokens"].(float64); ok { tokenUsage = LLMTokenUsage{ diff --git a/internal/extproc/translator/anthropic_gcpanthropic_test.go b/internal/extproc/translator/anthropic_gcpanthropic_test.go index 7f10c46771..19937aed3c 100644 --- a/internal/extproc/translator/anthropic_gcpanthropic_test.go +++ b/internal/extproc/translator/anthropic_gcpanthropic_test.go @@ -78,7 +78,7 @@ func TestAnthropicToGCPAnthropicTranslator_RequestBody_ModelNameOverride(t *test assert.Equal(t, expectedPath, string(pathHeader.Header.RawValue)) // Check that model field is removed from body (since it's in the path). - var modifiedReq map[string]interface{} + var modifiedReq map[string]any err = json.Unmarshal(bodyMutation.GetBody(), &modifiedReq) require.NoError(t, err) _, hasModel := modifiedReq["model"] @@ -125,8 +125,8 @@ func TestAnthropicToGCPAnthropicTranslator_ComprehensiveMarshalling(t *testing.T Description: anthropic.String("Get current weather information"), InputSchema: anthropic.ToolInputSchemaParam{ Type: "object", - Properties: map[string]interface{}{ - "location": map[string]interface{}{ + Properties: map[string]any{ + "location": map[string]any{ "type": "string", "description": "City name", }, @@ -145,7 +145,7 @@ func TestAnthropicToGCPAnthropicTranslator_ComprehensiveMarshalling(t *testing.T require.NotNil(t, headerMutation) require.NotNil(t, bodyMutation) - var outputReq map[string]interface{} + var outputReq map[string]any err = json.Unmarshal(bodyMutation.GetBody(), &outputReq) require.NoError(t, err) @@ -154,7 +154,7 @@ func TestAnthropicToGCPAnthropicTranslator_ComprehensiveMarshalling(t *testing.T require.Contains(t, outputReq, "anthropic_version", "should add anthropic_version for GCP") require.Equal(t, "2023-06-01", outputReq["anthropic_version"]) - messages, ok := outputReq["messages"].([]interface{}) + messages, ok := outputReq["messages"].([]any) require.True(t, ok, "messages should be an array") require.Len(t, messages, 3, "should have 3 messages") @@ -165,17 +165,17 @@ func TestAnthropicToGCPAnthropicTranslator_ComprehensiveMarshalling(t *testing.T require.Equal(t, 0.95, outputReq["top_p"]) require.Equal(t, "You are a helpful weather assistant.", outputReq["system"]) - stopSeq, ok := outputReq["stop_sequences"].([]interface{}) + stopSeq, ok := outputReq["stop_sequences"].([]any) require.True(t, ok, "stop_sequences should be an array") require.Len(t, stopSeq, 2) require.Equal(t, "Human:", stopSeq[0]) require.Equal(t, "Assistant:", stopSeq[1]) - tools, ok := outputReq["tools"].([]interface{}) + tools, ok := outputReq["tools"].([]any) require.True(t, ok, "tools should be an array") require.Len(t, tools, 1) - toolChoice, ok := outputReq["tool_choice"].(map[string]interface{}) + toolChoice, ok := outputReq["tool_choice"].(map[string]any) require.True(t, ok, "tool_choice should be an object") require.NotEmpty(t, toolChoice) @@ -241,7 +241,7 @@ func TestAnthropicToGCPAnthropicTranslator_BackendVersionHandling(t *testing.T) require.NoError(t, err) require.NotNil(t, bodyMutation) - var outputReq map[string]interface{} + var outputReq map[string]any err = json.Unmarshal(bodyMutation.GetBody(), &outputReq) require.NoError(t, err) @@ -254,7 +254,7 @@ func TestAnthropicToGCPAnthropicTranslator_BackendVersionHandling(t *testing.T) func TestAnthropicToGCPAnthropicTranslator_RequestBody_StreamingPaths(t *testing.T) { tests := []struct { name string - stream interface{} + stream any expectedSpecifier string }{ { @@ -283,9 +283,9 @@ func TestAnthropicToGCPAnthropicTranslator_RequestBody_StreamingPaths(t *testing t.Run(tt.name, func(t *testing.T) { translator := NewAnthropicToGCPAnthropicTranslator("2023-06-01", "") - reqBody := map[string]interface{}{ + reqBody := map[string]any{ "model": "claude-3-sonnet-20240229", - "messages": []map[string]interface{}{{"role": "user", "content": "Test"}}, + "messages": []map[string]any{{"role": "user", "content": "Test"}}, } if tt.stream != nil { @@ -362,21 +362,21 @@ func TestAnthropicToGCPAnthropicTranslator_RequestBody_FieldPassthrough(t *testi Description: anthropic.String("Get weather info"), InputSchema: anthropic.ToolInputSchemaParam{ Type: "object", - Properties: map[string]interface{}{ - "location": map[string]interface{}{"type": "string"}, + Properties: map[string]any{ + "location": map[string]any{"type": "string"}, }, }, }, }, - "tool_choice": map[string]interface{}{"type": "auto"}, - "metadata": map[string]interface{}{"user_id": "test123"}, + "tool_choice": map[string]any{"type": "auto"}, + "metadata": map[string]any{"user_id": "test123"}, } _, bodyMutation, err := translator.RequestBody(nil, parsedReq, false) require.NoError(t, err) require.NotNil(t, bodyMutation) - var modifiedReq map[string]interface{} + var modifiedReq map[string]any err = json.Unmarshal(bodyMutation.GetBody(), &modifiedReq) require.NoError(t, err) @@ -390,7 +390,7 @@ func TestAnthropicToGCPAnthropicTranslator_RequestBody_FieldPassthrough(t *testi require.Equal(t, float64(40), modifiedReq["top_k"]) // Arrays become []interface{} by JSON unmarshalling. - stopSeq, ok := modifiedReq["stop_sequences"].([]interface{}) + stopSeq, ok := modifiedReq["stop_sequences"].([]any) require.True(t, ok) require.Len(t, stopSeq, 2) require.Equal(t, "Human:", stopSeq[0]) diff --git a/internal/extproc/translator/gemini_helper.go b/internal/extproc/translator/gemini_helper.go index 5191ae674f..d5ecfd5daa 100644 --- a/internal/extproc/translator/gemini_helper.go +++ b/internal/extproc/translator/gemini_helper.go @@ -8,6 +8,7 @@ package translator import ( "encoding/json" "fmt" + "maps" "mime" "net/url" "path" @@ -91,9 +92,7 @@ func openAIMessagesToGeminiContents(messages []openai.ChatCompletionMessageParam if err != nil { return nil, nil, fmt.Errorf("error converting assistant message: %w", err) } - for k, v := range toolCalls { - knownToolCalls[k] = v - } + maps.Copy(knownToolCalls, toolCalls) gcpContents = append(gcpContents, genai.Content{Role: genai.RoleModel, Parts: assistantParts}) default: return nil, nil, fmt.Errorf("invalid role in message: %s", msgUnion.Type) @@ -350,7 +349,7 @@ func openAIToolsToGeminiTools(openaiTools []openai.Tool) ([]genai.Tool, error) { // ] // } // } -func openAIToolChoiceToGeminiToolConfig(toolChoice interface{}) (*genai.ToolConfig, error) { +func openAIToolChoiceToGeminiToolConfig(toolChoice any) (*genai.ToolConfig, error) { if toolChoice == nil { return nil, nil } @@ -412,14 +411,14 @@ func openAIReqToGeminiGenerationConfig(openAIReq *openai.ChatCompletionRequest) case openai.ChatCompletionResponseFormatTypeJSONObject: gc.ResponseMIMEType = mimeTypeApplicationJSON case openai.ChatCompletionResponseFormatTypeJSONSchema: - var schemaMap map[string]interface{} + var schemaMap map[string]any switch sch := openAIReq.ResponseFormat.JSONSchema.Schema.(type) { case string: if err := json.Unmarshal([]byte(sch), &schemaMap); err != nil { return nil, fmt.Errorf("invalid JSON schema string: %w", err) } - case map[string]interface{}: + case map[string]any: schemaMap = sch } @@ -556,7 +555,7 @@ func extractToolCallsFromGeminiParts(parts []*genai.Part) ([]openai.ChatCompleti toolCall := openai.ChatCompletionMessageToolCallParam{ ID: &toolCallID, - Type: "function", + Type: openai.ChatCompletionMessageToolCallTypeFunction, Function: openai.ChatCompletionMessageToolCallFunctionParam{ Name: part.FunctionCall.Name, Arguments: string(args), diff --git a/internal/extproc/translator/gemini_helper_test.go b/internal/extproc/translator/gemini_helper_test.go index 7985164f67..f818522cc4 100644 --- a/internal/extproc/translator/gemini_helper_test.go +++ b/internal/extproc/translator/gemini_helper_test.go @@ -6,6 +6,7 @@ package translator import ( + "fmt" "testing" "github.com/google/go-cmp/cmp" @@ -442,7 +443,7 @@ func TestToolMsgToGeminiParts(t *testing.T) { expectedPart: &genai.Part{ FunctionResponse: &genai.FunctionResponse{ Name: "get_weather", - Response: map[string]interface{}{"output": "This is a tool message"}, + Response: map[string]any{"output": "This is a tool message"}, }, }, }, @@ -468,7 +469,7 @@ func TestToolMsgToGeminiParts(t *testing.T) { expectedPart: &genai.Part{ FunctionResponse: &genai.FunctionResponse{ Name: "get_weather", - Response: map[string]interface{}{"output": "This is a tool message. And this is another part"}, + Response: map[string]any{"output": "This is a tool message. And this is another part"}, }, }, }, @@ -785,7 +786,7 @@ func TestOpenAIReqToGeminiGenerationConfig(t *testing.T) { ResponseFormat: &openai.ChatCompletionResponseFormat{ Type: openai.ChatCompletionResponseFormatTypeJSONSchema, JSONSchema: &openai.ChatCompletionResponseFormatJSONSchema{ - Schema: map[string]interface{}{ + Schema: map[string]any{ "type": "string", }, }, @@ -975,7 +976,7 @@ func TestOpenAIToolsToGeminiTools(t *testing.T) { func TestOpenAIToolChoiceToGeminiToolConfig(t *testing.T) { tests := []struct { name string - input interface{} + input any expected *genai.ToolConfig expectErr string }{ @@ -1034,3 +1035,306 @@ func TestOpenAIToolChoiceToGeminiToolConfig(t *testing.T) { }) } } + +func TestGeminiLogprobsToOpenAILogprobs(t *testing.T) { + tests := []struct { + name string + input genai.LogprobsResult + expected openai.ChatCompletionChoicesLogprobs + }{ + { + name: "empty logprobs result", + input: genai.LogprobsResult{}, + expected: openai.ChatCompletionChoicesLogprobs{}, + }, + { + name: "single chosen candidate without top candidates", + input: genai.LogprobsResult{ + ChosenCandidates: []*genai.LogprobsResultCandidate{ + { + Token: "hello", + LogProbability: -0.5, + }, + }, + }, + expected: openai.ChatCompletionChoicesLogprobs{ + Content: []openai.ChatCompletionTokenLogprob{ + { + Token: "hello", + Logprob: -0.5, + TopLogprobs: nil, + }, + }, + }, + }, + { + name: "multiple chosen candidates with top candidates", + input: genai.LogprobsResult{ + ChosenCandidates: []*genai.LogprobsResultCandidate{ + { + Token: "hello", + LogProbability: -0.5, + }, + { + Token: "world", + LogProbability: -0.3, + }, + }, + TopCandidates: []*genai.LogprobsResultTopCandidates{ + { + Candidates: []*genai.LogprobsResultCandidate{ + {Token: "hello", LogProbability: -0.5}, + {Token: "hi", LogProbability: -1.2}, + {Token: "hey", LogProbability: -1.5}, + }, + }, + { + Candidates: []*genai.LogprobsResultCandidate{ + {Token: "world", LogProbability: -0.3}, + {Token: "earth", LogProbability: -1.1}, + }, + }, + }, + }, + expected: openai.ChatCompletionChoicesLogprobs{ + Content: []openai.ChatCompletionTokenLogprob{ + { + Token: "hello", + Logprob: -0.5, + TopLogprobs: []openai.ChatCompletionTokenLogprobTopLogprob{ + {Token: "hello", Logprob: -0.5}, + {Token: "hi", Logprob: -1.2}, + {Token: "hey", Logprob: -1.5}, + }, + }, + { + Token: "world", + Logprob: -0.3, + TopLogprobs: []openai.ChatCompletionTokenLogprobTopLogprob{ + {Token: "world", Logprob: -0.3}, + {Token: "earth", Logprob: -1.1}, + }, + }, + }, + }, + }, + { + name: "chosen candidates with nil top candidates entry", + input: genai.LogprobsResult{ + ChosenCandidates: []*genai.LogprobsResultCandidate{ + { + Token: "test", + LogProbability: -0.8, + }, + }, + TopCandidates: []*genai.LogprobsResultTopCandidates{ + nil, + }, + }, + expected: openai.ChatCompletionChoicesLogprobs{ + Content: []openai.ChatCompletionTokenLogprob{ + { + Token: "test", + Logprob: -0.8, + TopLogprobs: nil, + }, + }, + }, + }, + { + name: "chosen candidates with empty top candidates", + input: genai.LogprobsResult{ + ChosenCandidates: []*genai.LogprobsResultCandidate{ + { + Token: "empty", + LogProbability: -0.2, + }, + }, + TopCandidates: []*genai.LogprobsResultTopCandidates{ + { + Candidates: []*genai.LogprobsResultCandidate{}, + }, + }, + }, + expected: openai.ChatCompletionChoicesLogprobs{ + Content: []openai.ChatCompletionTokenLogprob{ + { + Token: "empty", + Logprob: -0.2, + TopLogprobs: nil, + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := geminiLogprobsToOpenAILogprobs(tt.input) + // Use cmp.Equal with EquateApprox for float comparison due to float32->float64 conversion. + if !cmp.Equal(tt.expected, result, cmpopts.EquateApprox(0, 0.0001)) { + t.Errorf("geminiLogprobsToOpenAILogprobs() diff:\n%s", cmp.Diff(tt.expected, result, cmpopts.EquateApprox(0, 0.0001))) + } + }) + } +} + +func TestExtractToolCallsFromGeminiParts(t *testing.T) { + tests := []struct { + name string + input []*genai.Part + expected []openai.ChatCompletionMessageToolCallParam + wantErr bool + }{ + { + name: "nil parts", + input: nil, + expected: nil, + }, + { + name: "empty parts", + input: []*genai.Part{}, + expected: nil, + }, + { + name: "parts without function calls", + input: []*genai.Part{ + {Text: "some text"}, + nil, + {Text: "more text"}, + }, + expected: nil, + }, + { + name: "single function call", + input: []*genai.Part{ + { + FunctionCall: &genai.FunctionCall{ + Name: "get_weather", + Args: map[string]any{ + "location": "San Francisco", + "unit": "celsius", + }, + }, + }, + }, + expected: []openai.ChatCompletionMessageToolCallParam{ + { + ID: ptr.To("0"), + Type: openai.ChatCompletionMessageToolCallTypeFunction, + Function: openai.ChatCompletionMessageToolCallFunctionParam{ + Name: "get_weather", + Arguments: `{"location":"San Francisco","unit":"celsius"}`, + }, + }, + }, + }, + { + name: "multiple function calls", + input: []*genai.Part{ + { + FunctionCall: &genai.FunctionCall{ + Name: "function1", + Args: map[string]any{"param1": "value1"}, + }, + }, + {Text: "some text between"}, + { + FunctionCall: &genai.FunctionCall{ + Name: "function2", + Args: map[string]any{"param2": float64(42)}, + }, + }, + }, + expected: []openai.ChatCompletionMessageToolCallParam{ + { + ID: ptr.To("0"), + Type: openai.ChatCompletionMessageToolCallTypeFunction, + Function: openai.ChatCompletionMessageToolCallFunctionParam{ + Name: "function1", + Arguments: `{"param1":"value1"}`, + }, + }, + { + ID: ptr.To("1"), + Type: openai.ChatCompletionMessageToolCallTypeFunction, + Function: openai.ChatCompletionMessageToolCallFunctionParam{ + Name: "function2", + Arguments: `{"param2":42}`, + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + calls, err := extractToolCallsFromGeminiParts(tt.input) + + if tt.wantErr { + require.Error(t, err) + return + } + + require.NoError(t, err) + + // Normalize IDs since they're generated. + for i := range calls { + calls[i].ID = ptr.To(fmt.Sprintf("%d", i)) + } + + require.Equal(t, tt.expected, calls) + }) + } +} + +func TestGeminiFinishReasonToOpenAI(t *testing.T) { + tests := []struct { + name string + input genai.FinishReason + expected openai.ChatCompletionChoicesFinishReason + }{ + { + name: "stop reason", + input: genai.FinishReasonStop, + expected: openai.ChatCompletionChoicesFinishReasonStop, + }, + { + name: "max tokens reason", + input: genai.FinishReasonMaxTokens, + expected: openai.ChatCompletionChoicesFinishReasonLength, + }, + { + name: "empty reason for streaming", + input: "", + expected: "", + }, + { + name: "safety reason", + input: genai.FinishReasonSafety, + expected: openai.ChatCompletionChoicesFinishReasonContentFilter, + }, + { + name: "recitation reason", + input: genai.FinishReasonRecitation, + expected: openai.ChatCompletionChoicesFinishReasonContentFilter, + }, + { + name: "other reason", + input: genai.FinishReasonOther, + expected: openai.ChatCompletionChoicesFinishReasonContentFilter, + }, + { + name: "unknown reason", + input: genai.FinishReason("unknown_reason"), + expected: openai.ChatCompletionChoicesFinishReasonContentFilter, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := geminiFinishReasonToOpenAI(tt.input) + require.Equal(t, tt.expected, result) + }) + } +} diff --git a/internal/extproc/translator/openai_awsbedrock.go b/internal/extproc/translator/openai_awsbedrock.go index f5812c9a5e..8e91d4f616 100644 --- a/internal/extproc/translator/openai_awsbedrock.go +++ b/internal/extproc/translator/openai_awsbedrock.go @@ -61,7 +61,7 @@ func (o *openAIToAWSBedrockTranslatorV1ChatCompletion) RequestBody(_ []byte, ope SetHeaders: []*corev3.HeaderValueOption{ {Header: &corev3.HeaderValue{ Key: ":path", - RawValue: []byte(fmt.Sprintf(pathTemplate, modelName)), + RawValue: fmt.Appendf(nil, pathTemplate, modelName), }}, }, } @@ -226,8 +226,8 @@ func (o *openAIToAWSBedrockTranslatorV1ChatCompletion) openAIMessageToBedrockMes } // unmarshalToolCallArguments is a helper method to unmarshal tool call arguments. -func unmarshalToolCallArguments(arguments string) (map[string]interface{}, error) { - var input map[string]interface{} +func unmarshalToolCallArguments(arguments string) (map[string]any, error) { + var input map[string]any if err := json.Unmarshal([]byte(arguments), &input); err != nil { return nil, fmt.Errorf("failed to unmarshal tool call arguments: %w", err) } @@ -638,6 +638,7 @@ func (o *openAIToAWSBedrockTranslatorV1ChatCompletion) extractAmazonEventStreamE // TODO: Maybe reuse the reader and decoder. r := bytes.NewReader(o.bufferedBody) dec := eventstream.NewDecoder() + clear(o.events) o.events = o.events[:0] var lastRead int64 for { diff --git a/internal/extproc/translator/openai_awsbedrock_test.go b/internal/extproc/translator/openai_awsbedrock_test.go index bcd86b927b..216216f64e 100644 --- a/internal/extproc/translator/openai_awsbedrock_test.go +++ b/internal/extproc/translator/openai_awsbedrock_test.go @@ -176,7 +176,7 @@ func TestOpenAIToAWSBedrockTranslatorV1ChatCompletion_RequestBody(t *testing.T) ToolUse: &awsbedrock.ToolUseBlock{ Name: "exec_python_code", ToolUseID: "call_6g7a", - Input: map[string]interface{}{"code_block": "from playwright.sync_api import sync_playwright\n"}, + Input: map[string]any{"code_block": "from playwright.sync_api import sync_playwright\n"}, }, }, }, @@ -400,14 +400,14 @@ func TestOpenAIToAWSBedrockTranslatorV1ChatCompletion_RequestBody(t *testing.T) Function: &openai.FunctionDefinition{ Name: "get_current_weather", Description: "Get the current weather in a given location", - Parameters: map[string]interface{}{ + Parameters: map[string]any{ "type": "object", - "properties": map[string]interface{}{ - "location": map[string]interface{}{ + "properties": map[string]any{ + "location": map[string]any{ "type": "string", "description": "The city and state, e.g. San Francisco, CA", }, - "unit": map[string]interface{}{ + "unit": map[string]any{ "type": "string", "enum": []string{"celsius", "fahrenheit"}, }, @@ -441,14 +441,14 @@ func TestOpenAIToAWSBedrockTranslatorV1ChatCompletion_RequestBody(t *testing.T) Name: ptr.To("get_current_weather"), Description: ptr.To("Get the current weather in a given location"), InputSchema: &awsbedrock.ToolInputSchema{ - JSON: map[string]interface{}{ + JSON: map[string]any{ "type": "object", - "properties": map[string]interface{}{ - "location": map[string]interface{}{ + "properties": map[string]any{ + "location": map[string]any{ "type": "string", "description": "The city and state, e.g. San Francisco, CA", }, - "unit": map[string]interface{}{ + "unit": map[string]any{ "type": "string", "enum": []any{"celsius", "fahrenheit"}, }, @@ -784,14 +784,14 @@ func TestOpenAIToAWSBedrockTranslatorV1ChatCompletion_RequestBody(t *testing.T) ToolUse: &awsbedrock.ToolUseBlock{ Name: "get_current_weather", ToolUseID: "tool-1", - Input: map[string]interface{}{"city": "Dallas", "state": "TX", "unit": "fahrenheit"}, + Input: map[string]any{"city": "Dallas", "state": "TX", "unit": "fahrenheit"}, }, }, { ToolUse: &awsbedrock.ToolUseBlock{ Name: "get_current_weather", ToolUseID: "tool-2", - Input: map[string]interface{}{"city": "Orlando", "state": "FL", "unit": "fahrenheit"}, + Input: map[string]any{"city": "Orlando", "state": "FL", "unit": "fahrenheit"}, }, }, }, @@ -897,7 +897,7 @@ func TestOpenAIToAWSBedrockTranslatorV1ChatCompletion_Streaming_ResponseBody(t * require.NoError(t, err) var results []string - for i := 0; i < len(buf); i++ { + for i := range buf { hm, bm, tokenUsage, err := o.ResponseBody(nil, bytes.NewBuffer([]byte{buf[i]}), i == len(buf)-1, nil) require.NoError(t, err) require.Nil(t, hm) @@ -1138,7 +1138,7 @@ func TestOpenAIToAWSBedrockTranslatorV1ChatCompletion_ResponseBody(t *testing.T) ToolUse: &awsbedrock.ToolUseBlock{ Name: "exec_python_code", ToolUseID: "call_6g7a", - Input: map[string]interface{}{"code_block": "from playwright.sync_api import sync_playwright\n"}, + Input: map[string]any{"code_block": "from playwright.sync_api import sync_playwright\n"}, }, }, }, @@ -1185,7 +1185,7 @@ func TestOpenAIToAWSBedrockTranslatorV1ChatCompletion_ResponseBody(t *testing.T) {ToolUse: &awsbedrock.ToolUseBlock{ Name: "exec_python_code", ToolUseID: "call_6g7a", - Input: map[string]interface{}{"code_block": "from playwright.sync_api import sync_playwright\n"}, + Input: map[string]any{"code_block": "from playwright.sync_api import sync_playwright\n"}, }}, }, }, @@ -1305,6 +1305,7 @@ func TestOpenAIToAWSBedrockTranslatorExtractAmazonEventStreamEvents(t *testing.T require.Len(t, o.events, 1) require.Equal(t, eventBytes[offsets[1]:offsets[1]+5], o.bufferedBody) + clear(o.events) o.events = o.events[:0] o.bufferedBody = eventBytes[0 : offsets[2]+5] o.extractAmazonEventStreamEvents() diff --git a/internal/extproc/translator/openai_azureopenai.go b/internal/extproc/translator/openai_azureopenai.go index d9a298cd1b..e92de3a4e3 100644 --- a/internal/extproc/translator/openai_azureopenai.go +++ b/internal/extproc/translator/openai_azureopenai.go @@ -47,7 +47,7 @@ func (o *openAIToAzureOpenAITranslatorV1ChatCompletion) RequestBody(raw []byte, SetHeaders: []*corev3.HeaderValueOption{ {Header: &corev3.HeaderValue{ Key: ":path", - RawValue: []byte(fmt.Sprintf(pathTemplate, modelName, o.apiVersion)), + RawValue: fmt.Appendf(nil, pathTemplate, modelName, o.apiVersion), }}, }, } diff --git a/internal/extproc/translator/openai_gcpanthropic.go b/internal/extproc/translator/openai_gcpanthropic.go index 68817d9dbc..e47ff62679 100644 --- a/internal/extproc/translator/openai_gcpanthropic.go +++ b/internal/extproc/translator/openai_gcpanthropic.go @@ -150,7 +150,7 @@ func translateOpenAItoAnthropicTools(openAITools []openai.Tool, openAIToolChoice // The parameters for the function are expected to be a JSON Schema object. // We can pass them through as-is. if openAITool.Function.Parameters != nil { - paramsMap, ok := openAITool.Function.Parameters.(map[string]interface{}) + paramsMap, ok := openAITool.Function.Parameters.(map[string]any) if !ok { err = fmt.Errorf("failed to cast tool parameters to map[string]interface{}") return @@ -163,13 +163,13 @@ func translateOpenAItoAnthropicTools(openAITools []openai.Tool, openAIToolChoice inputSchema.Type = constant.Object(typeVal) } - var propsVal map[string]interface{} - if propsVal, ok = paramsMap["properties"].(map[string]interface{}); ok { + var propsVal map[string]any + if propsVal, ok = paramsMap["properties"].(map[string]any); ok { inputSchema.Properties = propsVal } - var requiredVal []interface{} - if requiredVal, ok = paramsMap["required"].([]interface{}); ok { + var requiredVal []any + if requiredVal, ok = paramsMap["required"].([]any); ok { requiredSlice := make([]string, len(requiredVal)) for i, v := range requiredVal { if s, ok := v.(string); ok { @@ -256,7 +256,7 @@ func convertContentPartsToAnthropic(parts []openai.ChatCompletionContentPartUser } // Helper: Convert OpenAI message content to Anthropic content. -func openAIToAnthropicContent(content interface{}) ([]anthropic.ContentBlockParamUnion, error) { +func openAIToAnthropicContent(content any) ([]anthropic.ContentBlockParamUnion, error) { switch v := content.(type) { case nil: return nil, nil @@ -357,7 +357,7 @@ func openAIMessageToAnthropicMessageRoleAssistant(openAiMessage *openai.ChatComp // Handle tool_calls (if any). for i := range openAiMessage.ToolCalls { toolCall := &openAiMessage.ToolCalls[i] - var input map[string]interface{} + var input map[string]any if err = json.Unmarshal([]byte(toolCall.Function.Arguments), &input); err != nil { err = fmt.Errorf("failed to unmarshal tool call arguments: %w", err) return @@ -440,7 +440,7 @@ func openAIToAnthropicMessages(openAIMsgs []openai.ChatCompletionMessageParamUni isError := false if contentStr, ok := toolMsg.Content.Value.(string); ok { - var contentMap map[string]interface{} + var contentMap map[string]any if json.Unmarshal([]byte(contentStr), &contentMap) == nil { if _, ok = contentMap["error"]; ok { isError = true diff --git a/internal/extproc/translator/openai_gcpanthropic_stream.go b/internal/extproc/translator/openai_gcpanthropic_stream.go index 4022f155e2..be89050cd5 100644 --- a/internal/extproc/translator/openai_gcpanthropic_stream.go +++ b/internal/extproc/translator/openai_gcpanthropic_stream.go @@ -163,14 +163,14 @@ func (p *anthropicStreamParser) parseAndHandleEvent(eventBlock []byte) (*openai. var eventType []byte var eventData []byte - lines := bytes.Split(eventBlock, []byte("\n")) - for _, line := range lines { - if bytes.HasPrefix(line, sseEventPrefix) { - eventType = bytes.TrimSpace(bytes.TrimPrefix(line, sseEventPrefix)) - } else if bytes.HasPrefix(line, sseDataPrefix) { + lines := bytes.SplitSeq(eventBlock, []byte("\n")) + for line := range lines { + if after, ok := bytes.CutPrefix(line, sseEventPrefix); ok { + eventType = bytes.TrimSpace(after) + } else if after, ok := bytes.CutPrefix(line, sseDataPrefix); ok { // This handles JSON data that might be split across multiple 'data:' lines // by concatenating them (Anthropic's format). - data := bytes.TrimSpace(bytes.TrimPrefix(line, sseDataPrefix)) + data := bytes.TrimSpace(after) eventData = append(eventData, data...) } } diff --git a/internal/extproc/translator/openai_gcpanthropic_stream_test.go b/internal/extproc/translator/openai_gcpanthropic_stream_test.go index 8e07642dcc..7a98338194 100644 --- a/internal/extproc/translator/openai_gcpanthropic_stream_test.go +++ b/internal/extproc/translator/openai_gcpanthropic_stream_test.go @@ -353,8 +353,8 @@ data: {"type":"content_block_delta","index":0,"delta":{"type":"input_json_delta" var finalToolCallChunk openai.ChatCompletionResponseChunk // Split the response into individual SSE messages and find the final data chunk. - lines := strings.Split(strings.TrimSpace(bodyStr), "\n\n") - for _, line := range lines { + lines := strings.SplitSeq(strings.TrimSpace(bodyStr), "\n\n") + for line := range lines { if !strings.HasPrefix(line, "data: ") || strings.HasPrefix(line, "data: [DONE]") { continue } @@ -412,8 +412,8 @@ data: {"type": "message_stop"} var foundToolCallWithArgs bool var finalFinishReason openai.ChatCompletionChoicesFinishReason - lines := strings.Split(strings.TrimSpace(bodyStr), "\n\n") - for _, line := range lines { + lines := strings.SplitSeq(strings.TrimSpace(bodyStr), "\n\n") + for line := range lines { if !strings.HasPrefix(line, "data: ") || strings.Contains(line, "[DONE]") { continue } @@ -495,8 +495,8 @@ data: {"type": "content_block_stop", "index": 0} // 1. Split the stream into individual data chunks // and remove the "data: " prefix. var chunks []openai.ChatCompletionResponseChunk - lines := strings.Split(strings.TrimSpace(bodyStr), "\n\n") - for _, line := range lines { + lines := strings.SplitSeq(strings.TrimSpace(bodyStr), "\n\n") + for line := range lines { if !strings.HasPrefix(line, "data: ") { continue } @@ -593,8 +593,8 @@ data: {"type": "content_block_stop", "index": 0} // 1. Unmarshal all the chunks from the stream response. var chunks []openai.ChatCompletionResponseChunk - lines := strings.Split(strings.TrimSpace(bodyStr), "\n\n") - for _, line := range lines { + lines := strings.SplitSeq(strings.TrimSpace(bodyStr), "\n\n") + for line := range lines { if !strings.HasPrefix(line, "data: ") { continue } @@ -640,10 +640,10 @@ data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text var contentChunk openai.ChatCompletionResponseChunk foundChunk := false - lines := strings.Split(strings.TrimSpace(bodyStr), "\n\n") - for _, line := range lines { - if strings.HasPrefix(line, "data: ") { - jsonBody := strings.TrimPrefix(line, "data: ") + lines := strings.SplitSeq(strings.TrimSpace(bodyStr), "\n\n") + for line := range lines { + if after, ok := strings.CutPrefix(line, "data: "); ok { + jsonBody := after // We only care about the chunk that has the text content. if strings.Contains(jsonBody, `"content"`) { err := json.Unmarshal([]byte(jsonBody), &contentChunk) diff --git a/internal/extproc/translator/openai_gcpanthropic_test.go b/internal/extproc/translator/openai_gcpanthropic_test.go index 813761983f..412751d3d3 100644 --- a/internal/extproc/translator/openai_gcpanthropic_test.go +++ b/internal/extproc/translator/openai_gcpanthropic_test.go @@ -422,7 +422,7 @@ func TestMessageTranslation(t *testing.T) { ID: testTool, Type: "tool_use", Name: "get_weather", - Input: map[string]interface{}{"location": "NYC"}, + Input: map[string]any{"location": "NYC"}, }, }, }, @@ -707,7 +707,7 @@ func TestOpenAIToGCPAnthropicTranslator_ResponseError(t *testing.T) { tests := []struct { name string responseHeaders map[string]string - inputBody interface{} + inputBody any expectedOutput openai.Error }{ { @@ -864,10 +864,10 @@ func TestTranslateOpenAItoAnthropicTools(t *testing.T) { Function: &openai.FunctionDefinition{ Name: "get_weather", Description: "Get the weather", - Parameters: map[string]interface{}{ + Parameters: map[string]any{ "type": "object", - "properties": map[string]interface{}{ - "location": map[string]interface{}{"type": "string"}, + "properties": map[string]any{ + "location": map[string]any{"type": "string"}, }, }, }, @@ -881,8 +881,8 @@ func TestTranslateOpenAItoAnthropicTools(t *testing.T) { Description: anthropic.String("Get the weather"), InputSchema: anthropic.ToolInputSchemaParam{ Type: "object", - Properties: map[string]interface{}{ - "location": map[string]interface{}{"type": "string"}, + Properties: map[string]any{ + "location": map[string]any{"type": "string"}, }, }, }, @@ -898,13 +898,13 @@ func TestTranslateOpenAItoAnthropicTools(t *testing.T) { Function: &openai.FunctionDefinition{ Name: "get_weather", Description: "Get the weather with a required location", - Parameters: map[string]interface{}{ + Parameters: map[string]any{ "type": "object", - "properties": map[string]interface{}{ - "location": map[string]interface{}{"type": "string"}, - "unit": map[string]interface{}{"type": "string"}, + "properties": map[string]any{ + "location": map[string]any{"type": "string"}, + "unit": map[string]any{"type": "string"}, }, - "required": []interface{}{"location"}, + "required": []any{"location"}, }, }, }, @@ -917,9 +917,9 @@ func TestTranslateOpenAItoAnthropicTools(t *testing.T) { Description: anthropic.String("Get the weather with a required location"), InputSchema: anthropic.ToolInputSchemaParam{ Type: "object", - Properties: map[string]interface{}{ - "location": map[string]interface{}{"type": "string"}, - "unit": map[string]interface{}{"type": "string"}, + Properties: map[string]any{ + "location": map[string]any{"type": "string"}, + "unit": map[string]any{"type": "string"}, }, Required: []string{"location"}, }, @@ -1063,11 +1063,11 @@ func TestTranslateOpenAItoAnthropicTools(t *testing.T) { Function: &openai.FunctionDefinition{ Name: "get_weather", Description: "Get the weather without type", - Parameters: map[string]interface{}{ - "properties": map[string]interface{}{ - "location": map[string]interface{}{"type": "string"}, + Parameters: map[string]any{ + "properties": map[string]any{ + "location": map[string]any{"type": "string"}, }, - "required": []interface{}{"location"}, + "required": []any{"location"}, }, }, }, @@ -1080,8 +1080,8 @@ func TestTranslateOpenAItoAnthropicTools(t *testing.T) { Description: anthropic.String("Get the weather without type"), InputSchema: anthropic.ToolInputSchemaParam{ Type: "", - Properties: map[string]interface{}{ - "location": map[string]interface{}{"type": "string"}, + Properties: map[string]any{ + "location": map[string]any{"type": "string"}, }, Required: []string{"location"}, }, @@ -1098,9 +1098,9 @@ func TestTranslateOpenAItoAnthropicTools(t *testing.T) { Function: &openai.FunctionDefinition{ Name: "get_weather", Description: "Get the weather without properties", - Parameters: map[string]interface{}{ + Parameters: map[string]any{ "type": "object", - "required": []interface{}{"location"}, + "required": []any{"location"}, }, }, }, @@ -1201,7 +1201,7 @@ func TestFinishReasonTranslation(t *testing.T) { func TestContentTranslationCoverage(t *testing.T) { tests := []struct { name string - inputContent interface{} + inputContent any expectedContent []anthropic.ContentBlockParamUnion expectErr bool }{ diff --git a/internal/extproc/translator/openai_gcpvertexai_test.go b/internal/extproc/translator/openai_gcpvertexai_test.go index 453c7fc390..e5cff7b874 100644 --- a/internal/extproc/translator/openai_gcpvertexai_test.go +++ b/internal/extproc/translator/openai_gcpvertexai_test.go @@ -318,14 +318,14 @@ func TestOpenAIToGCPVertexAITranslatorV1ChatCompletion_RequestBody(t *testing.T) Function: &openai.FunctionDefinition{ Name: "get_weather", Description: "Get the current weather in a given location", - Parameters: map[string]interface{}{ + Parameters: map[string]any{ "type": "object", - "properties": map[string]interface{}{ - "location": map[string]interface{}{ + "properties": map[string]any{ + "location": map[string]any{ "type": "string", "description": "The city and state, e.g. San Francisco, CA", }, - "unit": map[string]interface{}{ + "unit": map[string]any{ "type": "string", "enum": []string{"celsius", "fahrenheit"}, }, @@ -381,10 +381,10 @@ func TestOpenAIToGCPVertexAITranslatorV1ChatCompletion_RequestBody(t *testing.T) Function: &openai.FunctionDefinition{ Name: "test_function", Description: "A test function", - Parameters: map[string]interface{}{ + Parameters: map[string]any{ "type": "object", - "properties": map[string]interface{}{ - "param1": map[string]interface{}{ + "properties": map[string]any{ + "param1": map[string]any{ "type": "string", }, }, @@ -1111,16 +1111,16 @@ Details: [ } func bodyMutTransformer(_ *testing.T) cmp.Option { - return cmp.Transformer("BodyMutationsToBodyBytes", func(bm *extprocv3.BodyMutation) map[string]interface{} { + return cmp.Transformer("BodyMutationsToBodyBytes", func(bm *extprocv3.BodyMutation) map[string]any { if bm == nil { return nil } - var bdy map[string]interface{} + var bdy map[string]any if body, ok := bm.Mutation.(*extprocv3.BodyMutation_Body); ok { if err := json.Unmarshal(body.Body, &bdy); err != nil { // The response body may not be valid JSON for streaming requests. - return map[string]interface{}{ + return map[string]any{ "BodyMutation": string(body.Body), } } diff --git a/internal/extproc/translator/openai_openai_test.go b/internal/extproc/translator/openai_openai_test.go index 0c76797e6a..b400f99d64 100644 --- a/internal/extproc/translator/openai_openai_test.go +++ b/internal/extproc/translator/openai_openai_test.go @@ -196,7 +196,7 @@ data: [DONE] `) o := &openAIToOpenAITranslatorV1ChatCompletion{stream: true} - for i := 0; i < len(wholeBody); i++ { + for i := range wholeBody { hm, bm, tokenUsage, err := o.ResponseBody(nil, bytes.NewReader(wholeBody[i:i+1]), false, nil) require.NoError(t, err) require.Nil(t, hm) diff --git a/internal/extproc/translator/translator.go b/internal/extproc/translator/translator.go index 6dc85ac085..ac8c8ca777 100644 --- a/internal/extproc/translator/translator.go +++ b/internal/extproc/translator/translator.go @@ -73,7 +73,7 @@ func setContentLength(headers *extprocv3.HeaderMutation, body []byte) { headers.SetHeaders = append(headers.SetHeaders, &corev3.HeaderValueOption{ Header: &corev3.HeaderValue{ Key: "content-length", - RawValue: []byte(fmt.Sprintf("%d", len(body))), + RawValue: fmt.Appendf(nil, "%d", len(body)), }, }) } diff --git a/internal/extproc/translator/util.go b/internal/extproc/translator/util.go index d4b6353e68..5d57d921c8 100644 --- a/internal/extproc/translator/util.go +++ b/internal/extproc/translator/util.go @@ -100,7 +100,7 @@ func systemMsgToDeveloperMsg(msg openai.ChatCompletionSystemMessageParam) openai // processStop handles the 'stop' parameter which can be a string or a slice of strings. // It normalizes the input into a []*string. -func processStop(data interface{}) ([]*string, error) { +func processStop(data any) ([]*string, error) { if data == nil { return nil, nil } diff --git a/internal/extproc/translator/util_test.go b/internal/extproc/translator/util_test.go index ed6f698ca5..0187afe3f8 100644 --- a/internal/extproc/translator/util_test.go +++ b/internal/extproc/translator/util_test.go @@ -189,7 +189,7 @@ func TestProcessStopToStringPointers(t *testing.T) { testCases := []struct { name string - input interface{} + input any expected []*string expectError bool }{ diff --git a/internal/extproc/watcher_test.go b/internal/extproc/watcher_test.go index a43d1ffa35..75ceca2447 100644 --- a/internal/extproc/watcher_test.go +++ b/internal/extproc/watcher_test.go @@ -15,6 +15,7 @@ import ( "strings" "sync" "testing" + "testing/synctest" "time" "github.com/google/go-cmp/cmp" @@ -75,6 +76,13 @@ func newTestLoggerWithBuffer() (*slog.Logger, *syncBuffer) { } func TestStartConfigWatcher(t *testing.T) { + // Virtualize time so sleeps take no time! + synctest.Test(t, testStartConfigWatcher) +} + +func testStartConfigWatcher(t *testing.T) { + t.Helper() + tmpdir := t.TempDir() path := tmpdir + "/config.yaml" rcv := &mockReceiver{} diff --git a/internal/llmcostcel/cel.go b/internal/llmcostcel/cel.go index 22fba3498e..2189889049 100644 --- a/internal/llmcostcel/cel.go +++ b/internal/llmcostcel/cel.go @@ -61,7 +61,7 @@ func NewProgram(expr string) (prog cel.Program, err error) { // EvaluateProgram evaluates the given CEL program with the given variables. func EvaluateProgram(prog cel.Program, modelName, backend string, inputTokens, outputTokens, totalTokens uint32) (uint64, error) { - out, _, err := prog.Eval(map[string]interface{}{ + out, _, err := prog.Eval(map[string]any{ celModelNameKey: modelName, celBackendKey: backend, celInputTokensKey: inputTokens, diff --git a/internal/llmcostcel/cel_test.go b/internal/llmcostcel/cel_test.go index 144828b515..a27c061a9f 100644 --- a/internal/llmcostcel/cel_test.go +++ b/internal/llmcostcel/cel_test.go @@ -6,8 +6,8 @@ package llmcostcel import ( - "sync" "testing" + "testing/synctest" "github.com/stretchr/testify/require" ) @@ -44,16 +44,14 @@ func TestNewProgram(t *testing.T) { t.Run("ensure concurrency safety", func(t *testing.T) { // Ensure that the program can be evaluated concurrently. - var wg sync.WaitGroup - wg.Add(100) - for i := 0; i < 100; i++ { - go func() { - defer wg.Done() - _, err := NewProgram("model == 'cool_model' ? input_tokens * output_tokens : total_tokens") - require.NoError(t, err) - }() - } - wg.Wait() + synctest.Test(t, func(t *testing.T) { + for range 100 { + go func() { + _, err := NewProgram("model == 'cool_model' ? input_tokens * output_tokens : total_tokens") + require.NoError(t, err) + }() + } + }) // synctest.Test waits for all goroutines to complete. }) } @@ -75,16 +73,14 @@ func TestEvaluateProgram(t *testing.T) { require.NoError(t, err) // Ensure that the program can be evaluated concurrently. - var wg sync.WaitGroup - wg.Add(100) - for i := 0; i < 100; i++ { - go func() { - defer wg.Done() - v, err := EvaluateProgram(prog, "cool_model", "cool_backend", 100, 2, 3) - require.NoError(t, err) - require.Equal(t, uint64(200), v) - }() - } - wg.Wait() + synctest.Test(t, func(t *testing.T) { + for range 100 { + go func() { + v, err := EvaluateProgram(prog, "cool_model", "cool_backend", 100, 2, 3) + require.NoError(t, err) + require.Equal(t, uint64(200), v) + }() + } + }) // synctest.Test waits for all goroutines to complete. }) } diff --git a/internal/metrics/chat_completion_metrics_test.go b/internal/metrics/chat_completion_metrics_test.go index 457affdf1c..4a3133ffdb 100644 --- a/internal/metrics/chat_completion_metrics_test.go +++ b/internal/metrics/chat_completion_metrics_test.go @@ -7,6 +7,7 @@ package metrics import ( "testing" + "testing/synctest" "time" "github.com/stretchr/testify/assert" @@ -79,6 +80,13 @@ func TestRecordTokenUsage(t *testing.T) { } func TestRecordTokenLatency(t *testing.T) { + // Virtualize time so sleeps take no time! + synctest.Test(t, testRecordTokenLatency) +} + +func testRecordTokenLatency(t *testing.T) { + t.Helper() + var ( mr = metric.NewManualReader() meter = metric.NewMeterProvider(metric.WithReader(mr)).Meter("test") @@ -118,6 +126,13 @@ func TestRecordTokenLatency(t *testing.T) { } func TestRecordRequestCompletion(t *testing.T) { + // Virtualize time so sleeps take no time! + synctest.Test(t, testRecordRequestCompletion) +} + +func testRecordRequestCompletion(t *testing.T) { + t.Helper() + var ( mr = metric.NewManualReader() meter = metric.NewMeterProvider(metric.WithReader(mr)).Meter("test") diff --git a/internal/tracing/openinference/openai/request_attrs_test.go b/internal/tracing/openinference/openai/request_attrs_test.go index 742a259f03..4d51525735 100644 --- a/internal/tracing/openinference/openai/request_attrs_test.go +++ b/internal/tracing/openinference/openai/request_attrs_test.go @@ -78,14 +78,14 @@ var ( Function: &openai.FunctionDefinition{ Name: "get_current_weather", Description: "Get the current weather in a given location", - Parameters: map[string]interface{}{ + Parameters: map[string]any{ "type": "object", - "properties": map[string]interface{}{ - "location": map[string]interface{}{ + "properties": map[string]any{ + "location": map[string]any{ "type": "string", "description": "The city and state, e.g. San Francisco, CA", }, - "unit": map[string]interface{}{ + "unit": map[string]any{ "type": "string", "enum": []string{"celsius", "fahrenheit"}, }, diff --git a/internal/tracing/openinference/test_helpers.go b/internal/tracing/openinference/test_helpers.go index 05e7cbfcd5..b61e6b1a20 100644 --- a/internal/tracing/openinference/test_helpers.go +++ b/internal/tracing/openinference/test_helpers.go @@ -35,7 +35,7 @@ func RequireAttributesEqual(t *testing.T, expected, actual []attribute.KeyValue) valStr := expVal.AsString() if len(valStr) > 0 && (valStr[0] == '{' || valStr[0] == '[') { // Try to parse as JSON, but if it fails, fall back to string comparison. - var expectedJSON interface{} + var expectedJSON any if err := json.Unmarshal([]byte(valStr), &expectedJSON); err == nil { require.JSONEq(t, valStr, attr.Value.AsString(), "attribute %s does not match expected JSON", attr.Key) } else { diff --git a/tests/controller/controller_test.go b/tests/controller/controller_test.go index 23a2f680c9..ad4b009608 100644 --- a/tests/controller/controller_test.go +++ b/tests/controller/controller_test.go @@ -9,10 +9,11 @@ package controller import ( + "cmp" "fmt" "log/slog" "os" - "sort" + "slices" "testing" "time" @@ -464,10 +465,10 @@ func TestBackendSecurityPolicyController(t *testing.T) { require.NoError(t, c.Create(t.Context(), origin)) // Verify that they are the same. backends := eventCh.RequireItemsEventually(t, 2) - sort.Slice(backends, func(i, j int) bool { - backends[i].TypeMeta = metav1.TypeMeta{} - backends[j].TypeMeta = metav1.TypeMeta{} - return backends[i].Name < backends[j].Name + slices.SortFunc(backends, func(a, b *aigv1a1.AIServiceBackend) int { + a.TypeMeta = metav1.TypeMeta{} + b.TypeMeta = metav1.TypeMeta{} + return cmp.Compare(a.Name, b.Name) }) require.Equal(t, originals, backends) }) @@ -494,10 +495,10 @@ func TestBackendSecurityPolicyController(t *testing.T) { // Verify that they are the same. backends := eventCh.RequireItemsEventually(t, 2) - sort.Slice(backends, func(i, j int) bool { - backends[i].TypeMeta = metav1.TypeMeta{} - backends[j].TypeMeta = metav1.TypeMeta{} - return backends[i].Name < backends[j].Name + slices.SortFunc(backends, func(a, b *aigv1a1.AIServiceBackend) int { + a.TypeMeta = metav1.TypeMeta{} + b.TypeMeta = metav1.TypeMeta{} + return cmp.Compare(a.Name, b.Name) }) require.Equal(t, originals, backends) }) @@ -537,10 +538,10 @@ func TestBackendSecurityPolicyController(t *testing.T) { } // On deletion, the event should be sent to the event channel to propagate the deletion to the Gateway. backends := eventCh.RequireItemsEventually(t, 2) - sort.Slice(backends, func(i, j int) bool { - backends[i].TypeMeta = metav1.TypeMeta{} - backends[j].TypeMeta = metav1.TypeMeta{} - return backends[i].Name < backends[j].Name + slices.SortFunc(backends, func(a, b *aigv1a1.AIServiceBackend) int { + a.TypeMeta = metav1.TypeMeta{} + b.TypeMeta = metav1.TypeMeta{} + return cmp.Compare(a.Name, b.Name) }) require.Equal(t, originals, backends) return true @@ -626,10 +627,10 @@ func TestAIServiceBackendController(t *testing.T) { // Verify that they are the same. routes := eventCh.RequireItemsEventually(t, 2) - sort.Slice(routes, func(i, j int) bool { - routes[i].TypeMeta = metav1.TypeMeta{} - routes[j].TypeMeta = metav1.TypeMeta{} - return routes[i].Name < routes[j].Name + slices.SortFunc(routes, func(a, b *aigv1a1.AIGatewayRoute) int { + a.TypeMeta = metav1.TypeMeta{} + b.TypeMeta = metav1.TypeMeta{} + return cmp.Compare(a.Name, b.Name) }) require.Equal(t, originals, routes) }) @@ -645,10 +646,10 @@ func TestAIServiceBackendController(t *testing.T) { require.NoError(t, err) // Verify that they are the same. routes := eventCh.RequireItemsEventually(t, 2) - sort.Slice(routes, func(i, j int) bool { - routes[i].TypeMeta = metav1.TypeMeta{} - routes[j].TypeMeta = metav1.TypeMeta{} - return routes[i].Name < routes[j].Name + slices.SortFunc(routes, func(a, b *aigv1a1.AIGatewayRoute) int { + a.TypeMeta = metav1.TypeMeta{} + b.TypeMeta = metav1.TypeMeta{} + return cmp.Compare(a.Name, b.Name) }) require.Equal(t, originals, routes) }) @@ -689,10 +690,10 @@ func TestAIServiceBackendController(t *testing.T) { } // On deletion, the event should be sent to the event channel to propagate the deletion to the Gateway. routes := eventCh.RequireItemsEventually(t, 2) - sort.Slice(routes, func(i, j int) bool { - routes[i].TypeMeta = metav1.TypeMeta{} - routes[j].TypeMeta = metav1.TypeMeta{} - return routes[i].Name < routes[j].Name + slices.SortFunc(routes, func(a, b *aigv1a1.AIGatewayRoute) int { + a.TypeMeta = metav1.TypeMeta{} + b.TypeMeta = metav1.TypeMeta{} + return cmp.Compare(a.Name, b.Name) }) require.Equal(t, originals, routes) return true @@ -738,7 +739,9 @@ func TestSecretController(t *testing.T) { for _, bsp := range originals { require.NoError(t, c.Create(t.Context(), bsp)) } - sort.Slice(originals, func(i, j int) bool { return originals[i].Name < originals[j].Name }) + slices.SortFunc(originals, func(a, b *aigv1a1.BackendSecurityPolicy) int { + return cmp.Compare(a.Name, b.Name) + }) // Start the manager and wait for bsps to be cached before trigger a reconciler. go func() { require.NoError(t, mgr.Start(t.Context())) }() @@ -753,10 +756,10 @@ func TestSecretController(t *testing.T) { // Verify that they are the same. bsps := eventCh.RequireItemsEventually(t, 2) - sort.Slice(bsps, func(i, j int) bool { - bsps[i].TypeMeta = metav1.TypeMeta{} - bsps[j].TypeMeta = metav1.TypeMeta{} - return bsps[i].Name < bsps[j].Name + slices.SortFunc(bsps, func(a, b *aigv1a1.BackendSecurityPolicy) int { + a.TypeMeta = metav1.TypeMeta{} + b.TypeMeta = metav1.TypeMeta{} + return cmp.Compare(a.Name, b.Name) }) require.Equal(t, originals, bsps) }) @@ -770,10 +773,10 @@ func TestSecretController(t *testing.T) { // Verify that they are the same. bsps := eventCh.RequireItemsEventually(t, 2) - sort.Slice(bsps, func(i, j int) bool { - bsps[i].TypeMeta = metav1.TypeMeta{} - bsps[j].TypeMeta = metav1.TypeMeta{} - return bsps[i].Name < bsps[j].Name + slices.SortFunc(bsps, func(a, b *aigv1a1.BackendSecurityPolicy) int { + a.TypeMeta = metav1.TypeMeta{} + b.TypeMeta = metav1.TypeMeta{} + return cmp.Compare(a.Name, b.Name) }) require.Equal(t, originals, bsps) }) diff --git a/tests/e2e/token_ratelimit_test.go b/tests/e2e/token_ratelimit_test.go index a610d18b17..bab6bd5998 100644 --- a/tests/e2e/token_ratelimit_test.go +++ b/tests/e2e/token_ratelimit_test.go @@ -129,7 +129,7 @@ func Test_Examples_TokenRateLimit(t *testing.T) { ResultType string `json:"resultType"` Result []struct { Metric map[string]string `json:"metric"` - Value []interface{} `json:"value"` + Value []any `json:"value"` } } } diff --git a/tests/e2e/traffic_splitting_fallback_test.go b/tests/e2e/traffic_splitting_fallback_test.go index aeaa68591c..2f6303ffa7 100644 --- a/tests/e2e/traffic_splitting_fallback_test.go +++ b/tests/e2e/traffic_splitting_fallback_test.go @@ -35,7 +35,7 @@ func TestTrafficSplittingFallback(t *testing.T) { backendAResponses := 0 backendBResponses := 0 - for i := 0; i < requestCount; i++ { + for range requestCount { fwd := e2elib.RequireNewHTTPPortForwarder(t, e2elib.EnvoyGatewayNamespace, egSelector, e2elib.EnvoyGatewayDefaultServicePort) defer fwd.Kill() @@ -92,7 +92,7 @@ func TestTrafficSplittingFallback(t *testing.T) { backendCounts := make(map[string]int) numRequests := 20 - for i := 0; i < numRequests; i++ { + for range numRequests { req, err := http.NewRequest(http.MethodPost, fwd.Address()+"/v1/chat/completions", strings.NewReader( `{"messages":[{"role":"user","content":"Say this is a test"}],"model":"model-a"}`)) require.NoError(t, err) diff --git a/tests/extproc/build_extproc.go b/tests/extproc/build_extproc.go index 079e78f98c..c11df3e821 100644 --- a/tests/extproc/build_extproc.go +++ b/tests/extproc/build_extproc.go @@ -38,11 +38,6 @@ func buildExtProc() (string, error) { return buildGoBinary("extproc", "./cmd/extproc") } -// BuildExtProcCustom builds a custom extproc binary from the given package path. -func BuildExtProcCustom(binaryNamePrefix, packagePath string) (string, error) { - return buildGoBinary(binaryNamePrefix, packagePath) -} - // buildGoBinary builds a Go binary with the given name and package path. func buildGoBinary(binaryNamePrefix, packagePath string) (string, error) { projectRoot := findProjectRoot() diff --git a/tests/extproc/real_providers_test.go b/tests/extproc/real_providers_test.go index 7d547cc986..d2425e52b2 100644 --- a/tests/extproc/real_providers_test.go +++ b/tests/extproc/real_providers_test.go @@ -231,7 +231,7 @@ func TestWithRealProviders(t *testing.T) { Description: openai.String("Get weather at the given location"), Parameters: openai.FunctionParameters{ "type": "object", - "properties": map[string]interface{}{ + "properties": map[string]any{ "location": map[string]string{ "type": "string", }, @@ -266,7 +266,7 @@ func TestWithRealProviders(t *testing.T) { if toolCall.Function.Name == "get_weather" { getWeatherCalled = true // Extract the location from the function call arguments. - var args map[string]interface{} + var args map[string]any if argErr := json.Unmarshal([]byte(toolCall.Function.Arguments), &args); argErr != nil { t.Logf("Error unmarshalling the function arguments: %v", argErr) } diff --git a/tests/extproc/vcr/docker-compose-otel.yaml b/tests/extproc/vcr/docker-compose-otel.yaml index d92f395227..95419bce55 100644 --- a/tests/extproc/vcr/docker-compose-otel.yaml +++ b/tests/extproc/vcr/docker-compose-otel.yaml @@ -8,7 +8,7 @@ volumes: services: extproc-build: - image: golang:1.24.6 + image: golang:1.25 container_name: extproc-build working_dir: /workspace volumes: diff --git a/tests/extproc/vcr/docker-compose.yaml b/tests/extproc/vcr/docker-compose.yaml index 4e48a72d69..8b2b176c02 100644 --- a/tests/extproc/vcr/docker-compose.yaml +++ b/tests/extproc/vcr/docker-compose.yaml @@ -9,7 +9,7 @@ volumes: services: extproc-build: - image: golang:1.24.6 + image: golang:1.25 container_name: extproc-build working_dir: /workspace volumes: @@ -62,7 +62,7 @@ services: command: ["envoy", "-c", "/etc/envoy/envoy.yaml", "--log-level", "debug"] openai-client: - image: golang:1.24.5 + image: golang:1.25 container_name: openai-client profiles: ["test"] env_file: diff --git a/tests/internal/testenvironment/test_environment.go b/tests/internal/testenvironment/test_environment.go index a56a7a91c7..6960eb1572 100644 --- a/tests/internal/testenvironment/test_environment.go +++ b/tests/internal/testenvironment/test_environment.go @@ -196,7 +196,7 @@ func requireRandomPorts(t require.TestingT, count int) []int { ports := make([]int, count) var listeners []net.Listener - for i := 0; i < count; i++ { + for i := range count { lc := net.ListenConfig{} lis, err := lc.Listen(context.Background(), "tcp", "127.0.0.1:0") require.NoError(t, err, "failed to listen on random port %d", i) diff --git a/tests/internal/testopenai/chat_requests.go b/tests/internal/testopenai/chat_requests.go index ee55e01e63..bcf7fae69a 100644 --- a/tests/internal/testopenai/chat_requests.go +++ b/tests/internal/testopenai/chat_requests.go @@ -45,14 +45,14 @@ var chatRequests = map[Cassette]*openai.ChatCompletionRequest{ Function: &openai.FunctionDefinition{ Name: "get_current_weather", Description: "Get the current weather in a given location", - Parameters: map[string]interface{}{ + Parameters: map[string]any{ "type": "object", - "properties": map[string]interface{}{ - "location": map[string]interface{}{ + "properties": map[string]any{ + "location": map[string]any{ "type": "string", "description": "The city and state, e.g. San Francisco, CA", }, - "unit": map[string]interface{}{ + "unit": map[string]any{ "type": "string", "enum": []string{"celsius", "fahrenheit"}, }, @@ -176,14 +176,14 @@ var chatRequests = map[Cassette]*openai.ChatCompletionRequest{ Function: &openai.FunctionDefinition{ Name: "get_current_weather", Description: "Get the current weather in a given location", - Parameters: map[string]interface{}{ + Parameters: map[string]any{ "type": "object", - "properties": map[string]interface{}{ - "location": map[string]interface{}{ + "properties": map[string]any{ + "location": map[string]any{ "type": "string", "description": "The city and state, e.g. San Francisco, CA", }, - "unit": map[string]interface{}{ + "unit": map[string]any{ "type": "string", "enum": []string{"celsius", "fahrenheit"}, }, @@ -351,10 +351,10 @@ var chatRequests = map[Cassette]*openai.ChatCompletionRequest{ Function: &openai.FunctionDefinition{ Name: "generate_image", Description: "Generate a simple, minimalist image based on the given prompt in sketch style with low quality for cost efficiency.", - Parameters: map[string]interface{}{ + Parameters: map[string]any{ "type": "object", - "properties": map[string]interface{}{ - "prompt": map[string]interface{}{ + "properties": map[string]any{ + "prompt": map[string]any{ "type": "string", "description": "The text description of the image to generate.", }, diff --git a/tests/internal/testopenai/handler.go b/tests/internal/testopenai/handler.go index aee4611b6e..b331bcecb5 100644 --- a/tests/internal/testopenai/handler.go +++ b/tests/internal/testopenai/handler.go @@ -11,6 +11,7 @@ import ( "fmt" "io" "log" + "maps" "net/http" "net/url" "os" @@ -213,9 +214,7 @@ func (h *cassetteHandler) recordNewInteraction(r *http.Request, body []byte, w h defer resp.Body.Close() // Copy the response to the client. - for k, v := range resp.Header { - w.Header()[k] = v - } + maps.Copy(w.Header(), resp.Header) w.WriteHeader(resp.StatusCode) respBody, err := io.ReadAll(resp.Body) @@ -281,7 +280,7 @@ func splitSSEEvents(body string) []string { var current []byte bytes := []byte(body) - for i := 0; i < len(bytes); i++ { + for i := range bytes { current = append(current, bytes[i]) // Check for double newline (event separator). diff --git a/tests/internal/testopenai/handler_test.go b/tests/internal/testopenai/handler_test.go index ababbefed6..539be156c7 100644 --- a/tests/internal/testopenai/handler_test.go +++ b/tests/internal/testopenai/handler_test.go @@ -132,7 +132,7 @@ func TestRecordNewInteraction(t *testing.T) { require.NoError(t, err) // Parse and verify the JSON response. - var respData map[string]interface{} + var respData map[string]any err = json.Unmarshal(respBody, &respData) require.NoError(t, err) require.Equal(t, "chatcmpl-123", respData["id"]) @@ -195,11 +195,11 @@ func TestRecordNewInteraction_ServerError(t *testing.T) { require.Equal(t, http.StatusInternalServerError, resp.StatusCode) respBody, _ := io.ReadAll(resp.Body) - var errResp map[string]interface{} + var errResp map[string]any err = json.Unmarshal(respBody, &errResp) require.NoError(t, err) - require.Equal(t, map[string]interface{}{ - "error": map[string]interface{}{ + require.Equal(t, map[string]any{ + "error": map[string]any{ "message": "Internal server error", "type": "server_error", }, diff --git a/tests/internal/testopenai/vcr_test.go b/tests/internal/testopenai/vcr_test.go index 18b254b0b5..b9f1bc443e 100644 --- a/tests/internal/testopenai/vcr_test.go +++ b/tests/internal/testopenai/vcr_test.go @@ -206,7 +206,7 @@ func TestAfterCaptureHook(t *testing.T) { require.Contains(t, interaction.Request.Headers, "Content-Type") // Request body should be pretty-printed. - var reqBody map[string]interface{} + var reqBody map[string]any err = json.Unmarshal([]byte(interaction.Request.Body), &reqBody) require.NoError(t, err) require.Contains(t, interaction.Request.Body, "\n") // Pretty-printed has newlines. @@ -217,7 +217,7 @@ func TestAfterCaptureHook(t *testing.T) { require.NotContains(t, interaction.Response.Headers, "Content-Encoding") // Removed after decompression. // Response body should be pretty-printed. - var respBody map[string]interface{} + var respBody map[string]any err = json.Unmarshal([]byte(interaction.Response.Body), &respBody) require.NoError(t, err) require.Contains(t, interaction.Response.Body, "\n") // Pretty-printed has newlines. diff --git a/tests/internal/testopeninference/assertions.go b/tests/internal/testopeninference/assertions.go index be34806f16..90d3d4b4a3 100644 --- a/tests/internal/testopeninference/assertions.go +++ b/tests/internal/testopeninference/assertions.go @@ -6,14 +6,15 @@ package testopeninference import ( + "cmp" "encoding/json" "reflect" "regexp" - "sort" + "slices" "strings" "testing" - "github.com/google/go-cmp/cmp" + gocmp "github.com/google/go-cmp/cmp" commonv1 "go.opentelemetry.io/proto/otlp/common/v1" tracev1 "go.opentelemetry.io/proto/otlp/trace/v1" "google.golang.org/protobuf/proto" @@ -44,7 +45,7 @@ func RequireSpanEqual(t testing.TB, expected, actual *tracev1.Span) { normalizeSpanForComparison(expectedCopy) normalizeSpanForComparison(actualCopy) - if diff := cmp.Diff(expectedCopy, actualCopy, protocmp.Transform()); diff != "" { + if diff := gocmp.Diff(expectedCopy, actualCopy, protocmp.Transform()); diff != "" { t.Fatalf("spans are not equal (-expected +actual):\n%s", diff) } } @@ -243,7 +244,7 @@ func normalizeErrorMessage(s string) string { // sortAttributes sorts key-value pairs by key. func sortAttributes(attrs []*commonv1.KeyValue) { - sort.Slice(attrs, func(i, j int) bool { - return attrs[i].Key < attrs[j].Key + slices.SortFunc(attrs, func(a, b *commonv1.KeyValue) int { + return cmp.Compare(a.Key, b.Key) }) } diff --git a/tests/internal/testopeninference/assertions_test.go b/tests/internal/testopeninference/assertions_test.go index 55b7fe81fa..4468b6ffc7 100644 --- a/tests/internal/testopeninference/assertions_test.go +++ b/tests/internal/testopeninference/assertions_test.go @@ -323,7 +323,7 @@ func (m *mockT) Helper() { // No-op for testing. } -func (m *mockT) Fatalf(format string, args ...interface{}) { +func (m *mockT) Fatalf(format string, args ...any) { m.failed = true m.errorMsg = strings.TrimSpace(fmt.Sprintf(format, args...)) } diff --git a/tests/internal/testupstreamlib/testupstream/main.go b/tests/internal/testupstreamlib/testupstream/main.go index e75cec2ed5..c375d16c66 100644 --- a/tests/internal/testupstreamlib/testupstream/main.go +++ b/tests/internal/testupstreamlib/testupstream/main.go @@ -92,7 +92,7 @@ func handler(w http.ResponseWriter, r *http.Request) { logger.Println("expected headers", string(expectedHeaders)) // Comma separated key-value pairs. - for _, kv := range bytes.Split(expectedHeaders, []byte(",")) { + for kv := range bytes.SplitSeq(expectedHeaders, []byte(",")) { parts := bytes.SplitN(kv, []byte(":"), 2) if len(parts) != 2 { logger.Println("invalid header key-value pair", string(kv)) @@ -122,7 +122,7 @@ func handler(w http.ResponseWriter, r *http.Request) { logger.Println("non-expected headers", string(nonExpectedHeaders)) // Comma separated key-value pairs. - for _, kv := range bytes.Split(nonExpectedHeaders, []byte(",")) { + for kv := range bytes.SplitSeq(nonExpectedHeaders, []byte(",")) { key := string(kv) if r.Header.Get(key) != "" { logger.Printf("unexpected header %q presence with value %q\n", key, r.Header.Get(key)) @@ -219,7 +219,7 @@ func handler(w http.ResponseWriter, r *http.Request) { logger.Println("response headers", string(responseHeaders)) // Comma separated key-value pairs. - for _, kv := range bytes.Split(responseHeaders, []byte(",")) { + for kv := range bytes.SplitSeq(responseHeaders, []byte(",")) { parts := bytes.SplitN(kv, []byte(":"), 2) if len(parts) != 2 { logger.Println("invalid header key-value pair", string(kv)) @@ -263,9 +263,9 @@ func handler(w http.ResponseWriter, r *http.Request) { // we treat it as a stream of pre-formatted "raw" SSE events. Otherwise, we treat it // as a simple line-by-line stream that needs to be formatted. if bytes.Contains(expResponseBody, []byte("\n\n")) { - eventBlocks := bytes.Split(expResponseBody, []byte("\n\n")) + eventBlocks := bytes.SplitSeq(expResponseBody, []byte("\n\n")) - for _, block := range eventBlocks { + for block := range eventBlocks { // Skip any empty blocks that can result from splitting. if len(bytes.TrimSpace(block)) == 0 { continue @@ -288,9 +288,9 @@ func handler(w http.ResponseWriter, r *http.Request) { } else { logger.Println("detected line-by-line stream, formatting as SSE") - lines := bytes.Split(expResponseBody, []byte("\n")) + lines := bytes.SplitSeq(expResponseBody, []byte("\n")) - for _, line := range lines { + for line := range lines { if len(line) == 0 { continue } @@ -326,7 +326,7 @@ func handler(w http.ResponseWriter, r *http.Request) { w.WriteHeader(status) e := eventstream.NewEncoder() - for _, line := range bytes.Split(expResponseBody, []byte("\n")) { + for line := range bytes.SplitSeq(expResponseBody, []byte("\n")) { // Write each line as a chunk with AWS Event Stream format. if len(line) == 0 { continue diff --git a/tests/internal/testupstreamlib/testupstream/main_test.go b/tests/internal/testupstreamlib/testupstream/main_test.go index 1242d9e291..6ff638451b 100644 --- a/tests/internal/testupstreamlib/testupstream/main_test.go +++ b/tests/internal/testupstreamlib/testupstream/main_test.go @@ -60,7 +60,7 @@ func Test_main(t *testing.T) { require.Equal(t, http.StatusOK, response.StatusCode) reader := bufio.NewReader(response.Body) - for i := 0; i < 5; i++ { + for i := range 5 { dataLine, err := reader.ReadString('\n') require.NoError(t, err) require.Equal(t, fmt.Sprintf("data: %d\n", i+1), dataLine) @@ -328,7 +328,7 @@ func Test_main(t *testing.T) { require.Equal(t, http.StatusOK, response.StatusCode) decoder := eventstream.NewDecoder() - for i := 0; i < 5; i++ { + for i := range 5 { var message eventstream.Message message, err = decoder.Decode(response.Body, nil) require.NoError(t, err) @@ -589,7 +589,7 @@ func Test_main(t *testing.T) { require.Equal(t, http.StatusOK, response.StatusCode) reader := bufio.NewReader(response.Body) - for i := 0; i < 2; i++ { + for i := range 2 { dataLine, err := reader.ReadString('\n') require.NoError(t, err) require.Equal(t, fmt.Sprintf("data: %d\n", i+1), dataLine)