Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
71 changes: 71 additions & 0 deletions gateway/it/config_manager.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
package it

import (
"context"
"fmt"
"log"
"strings"
"sync"

"github.com/cucumber/godog"
)

const ConfigTagPrefix = "@config-"

// GatewayConfigManager handles configuration switching for the gateway
type GatewayConfigManager struct {
registry *ConfigProfileRegistry
composeManager *ComposeManager
currentProfile string
mu sync.Mutex
}

// NewGatewayConfigManager creates a new config manager
func NewGatewayConfigManager(cm *ComposeManager) *GatewayConfigManager {
return &GatewayConfigManager{
registry: NewConfigProfileRegistry(),
composeManager: cm,
currentProfile: "default", // Assume default on startup
}
}

// EnsureConfig checks the scenario tags and restarts the gateway if a different config is required
func (m *GatewayConfigManager) EnsureConfig(ctx context.Context, sc *godog.Scenario) error {
m.mu.Lock()
defer m.mu.Unlock()

requiredProfile := m.extractConfigTag(sc)
if requiredProfile == "" {
requiredProfile = "default"
}

if m.currentProfile == requiredProfile {
return nil // No restart needed
}

log.Printf("Switching gateway config from '%s' to '%s'...", m.currentProfile, requiredProfile)

profile, ok := m.registry.Get(requiredProfile)
if !ok {
return fmt.Errorf("unknown config profile: %s", requiredProfile)
}

// Restart gateway-controller with new env vars
if err := m.composeManager.RestartGatewayController(ctx, profile.EnvVars); err != nil {
return fmt.Errorf("failed to restart gateway with profile %s: %w", requiredProfile, err)
}

m.currentProfile = requiredProfile
log.Printf("Switched to '%s' profile successfully", requiredProfile)
return nil
}

// extractConfigTag finds the first tag starting with @config- and returns the suffix
func (m *GatewayConfigManager) extractConfigTag(sc *godog.Scenario) string {
for _, tag := range sc.Tags {
if strings.HasPrefix(tag.Name, ConfigTagPrefix) {
return strings.TrimPrefix(tag.Name, ConfigTagPrefix)
}
}
return ""
}
74 changes: 74 additions & 0 deletions gateway/it/config_profiles.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
package it

// ConfigProfile defines a named configuration set for the gateway
type ConfigProfile struct {
Name string
EnvVars map[string]string
Description string
}

// ConfigProfileRegistry manages the available configuration profiles
type ConfigProfileRegistry struct {
profiles map[string]*ConfigProfile
defaultProfile string
}

// NewConfigProfileRegistry creates a new registry with standard profiles
func NewConfigProfileRegistry() *ConfigProfileRegistry {
registry := &ConfigProfileRegistry{
profiles: make(map[string]*ConfigProfile),
defaultProfile: "default",
}

// Register standard profiles
registry.Register(&ConfigProfile{
Name: "default",
EnvVars: map[string]string{
"GATEWAY_LOGGING_LEVEL": "info",
"GATEWAY_STORAGE_TYPE": "sqlite",
},
Description: "Standard configuration using SQLite and Info logging",
})

registry.Register(&ConfigProfile{
Name: "debug",
EnvVars: map[string]string{
"GATEWAY_LOGGING_LEVEL": "debug",
"GATEWAY_STORAGE_TYPE": "sqlite",
},
Description: "Debug configuration enabling verbose logging",
})

registry.Register(&ConfigProfile{
Name: "memory",
EnvVars: map[string]string{
"GATEWAY_LOGGING_LEVEL": "info",
"GATEWAY_STORAGE_TYPE": "memory",
},
Description: "In-memory storage configuration (non-persistent)",
})

registry.Register(&ConfigProfile{
Name: "tracing",
EnvVars: map[string]string{
"GATEWAY_LOGGING_LEVEL": "info",
"GATEWAY_STORAGE_TYPE": "memory",
"GATEWAY_TRACING_ENABLED": "true",
},
Description: "Configuration with OpenTelemetry tracing enabled",
})

return registry

}

// Register adds a profile to the registry
func (r *ConfigProfileRegistry) Register(profile *ConfigProfile) {
r.profiles[profile.Name] = profile
}

// Get retrieves a profile by name
func (r *ConfigProfileRegistry) Get(name string) (*ConfigProfile, bool) {
profile, ok := r.profiles[name]
return profile, ok
}
13 changes: 13 additions & 0 deletions gateway/it/docker-compose.test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,19 @@ services:
networks:
- it-gateway-network

otel-collector:
image: otel/opentelemetry-collector-contrib:0.88.0
container_name: it-otel-collector
command: ["--config=/etc/otel-collector-config.yaml"]
volumes:
- ./otel-config.yaml:/etc/otel-collector-config.yaml:ro
ports:
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
networks:
- it-gateway-network


router:
container_name: it-router
image: ghcr.io/wso2/api-platform/gateway-router:0.1.0-SNAPSHOT
Expand Down
12 changes: 12 additions & 0 deletions gateway/it/features/tracing.feature
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
Feature: Distributed Tracing
As a developer
I want to ensure that API requests are traced
So that I can observe the system behavior

@config-tracing
Scenario: API invocation generates a trace
Given the Gateway is running with tracing enabled
And I have a valid API Key for the "Sales API"
When I send a GET request to "http://localhost:9090/api/v1/sales/orders"
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

🧩 Analysis chain

🏁 Script executed:

#!/bin/bash
# Find and examine docker-compose.test.yaml
find . -name "docker-compose.test.yaml" -o -name "docker-compose*.yaml" | head -20

Repository: wso2/api-platform

Length of output: 265


🏁 Script executed:

#!/bin/bash
# Find the tracing.feature file and examine it
find . -name "tracing.feature" -type f | head -20

Repository: wso2/api-platform

Length of output: 98


🏁 Script executed:

#!/bin/bash
# Search for port definitions in key files
rg -n "9090|8080" --max-count=20

Repository: wso2/api-platform

Length of output: 50375


🏁 Script executed:

#!/bin/bash
# Read docker-compose.test.yaml to check port mappings
cat gateway/it/docker-compose.test.yaml | grep -A5 -B5 "ports:"

Repository: wso2/api-platform

Length of output: 1616


🏁 Script executed:

#!/bin/bash
# Read tracing.feature to see line 10 and surrounding context
head -20 gateway/it/features/tracing.feature | cat -n

Repository: wso2/api-platform

Length of output: 657


🏁 Script executed:

#!/bin/bash
# Look for port constants in setup.go or similar
find gateway -name "setup.go" -o -name "config*.go" | head -5

Repository: wso2/api-platform

Length of output: 235


🏁 Script executed:

#!/bin/bash
# Check setup.go to understand how the test harness is configured
cat gateway/it/setup.go | grep -A5 -B5 "9090\|8080" | head -50

Repository: wso2/api-platform

Length of output: 795


Port 9090 is for the gateway-controller REST API, not for API requests.

The request should use http://localhost:8080/api/v1/sales/orders instead. The router service exposes HTTP traffic on port 8080, while port 9090 is reserved for the gateway-controller management API.

🤖 Prompt for AI Agents
gateway/it/features/tracing.feature around line 10: the test sends the GET
request to http://localhost:9090/api/v1/sales/orders but port 9090 is for the
gateway-controller management API; update the request URL to use port 8080
(http://localhost:8080/api/v1/sales/orders) so the test targets the router
service HTTP endpoint.

Then the response status code should be 200
And I should see a trace for "Sales API" in the OpenTelemetry collector logs
16 changes: 16 additions & 0 deletions gateway/it/otel-config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
receivers:
otlp:
protocols:
grpc:
http:

exporters:
logging:
loglevel: debug
Comment on lines +7 to +9
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

Replace deprecated loglevel with verbosity configuration.

The loglevel configuration key is deprecated in favor of verbosity. Update the logging exporter configuration to use verbosity: detailed instead of loglevel: debug.

🤖 Prompt for AI Agents
In gateway/it/otel-config.yaml around lines 7 to 9, the logging exporter uses
the deprecated key `loglevel: debug`; replace that key with `verbosity:
detailed` so the exporter configuration reads the new `verbosity` setting
(ensure indentation and YAML formatting remain unchanged and no other keys are
modified).


service:
pipelines:
traces:
receivers: [otlp]
processors: []
exporters: [logging]
134 changes: 120 additions & 14 deletions gateway/it/setup.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ import (
"os/exec"
"os/signal"
"path/filepath"
"strings"
"sync"
"syscall"
"time"
Expand All @@ -46,7 +47,7 @@ const (
HealthCheckInterval = 2 * time.Second

// GatewayControllerPort is the REST API port for gateway-controller
GatewayControllerPort = "9090"
GatewayControllerPort = "9111"

// RouterPort is the HTTP traffic port for the router
RouterPort = "8080"
Expand Down Expand Up @@ -139,6 +140,9 @@ func (cm *ComposeManager) Start() error {
return fmt.Errorf("failed to start docker compose: %w", err)
}

// Stream logs in background
cm.StreamLogs()

log.Println("Docker Compose services started, waiting for health checks...")

// Wait for services to be healthy (additional verification)
Expand All @@ -156,6 +160,87 @@ func (cm *ComposeManager) Start() error {
return nil
}

// RestartGatewayController restarts the gateway-controller service with specific environment variables
func (cm *ComposeManager) RestartGatewayController(ctx context.Context, envVars map[string]string) error {
// Project name is "gateway-it", service is "gateway-controller".
// Default naming is usually project-service-1 or project_service_1.
// However, explicit container_name might be set in compose file.
// Given the context, we should rely on docker compose commands rather than assuming container name for 'docker stop/rm'
// BUT the prompt explicitly used "docker stop containerName" and "docker rm containerName".
// I should check if I can just use `docker compose stop` and `docker compose up`.
// The prompt suggested:
// exec.CommandContext(ctx, "docker", "stop", containerName).Run()
// exec.CommandContext(ctx, "docker", "rm", containerName).Run()
// args := []string{"compose", "-f", cm.composeFile, "-p", cm.projectName, "up", "-d", "gateway-controller"}

// I'll stick to 'docker compose' commands to be safe with names.

log.Println("Restarting gateway-controller with new configuration...")

// Stop and remove the service container
stopCmd := execCommandContext(ctx, "docker", "compose", "-f", cm.composeFile, "-p", cm.projectName, "stop", "gateway-controller")
if err := stopCmd.Run(); err != nil {
return fmt.Errorf("failed to stop gateway-controller: %w", err)
}

// Force remove the container by declared name to avoid conflicts
// We use direct docker rm because compose rm sometimes doesn't clear the name reservation fast enough
// or behaves differently with static container_names.
rmCmd := execCommandContext(ctx, "docker", "rm", "-f", "it-gateway-controller")
// We ignore error here because if it doesn't exist, that's fine.
_ = rmCmd.Run()

// Start with new env vars
args := []string{"compose", "-f", cm.composeFile, "-p", cm.projectName, "up", "-d", "gateway-controller"}
cmd := execCommandContext(ctx, "docker", args...)

// Copy existing env and append new ones
cmd.Env = os.Environ()
for k, v := range envVars {
cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", k, v))
log.Printf("Setting env: %s=%s", k, v)
}

output, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("failed to start gateway-controller: %w\nOutput: %s", err, string(output))
}
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Environment variables set via cmd.Env will not propagate to the container.

Setting environment variables on the docker compose up command only affects the Docker CLI process, not the container itself. Docker Compose reads environment variables for the container from the compose file's environment section or from .env files.

To dynamically set container environment variables, you need to either:

  1. Use docker compose run with -e flags
  2. Modify the compose file dynamically
  3. Use an .env file that gets updated
  4. Use docker run directly with -e flags
🔎 Possible fix using docker run

One approach is to use docker run directly instead of docker compose up:

// Build args with environment variables
args := []string{"run", "-d", "--name", "it-gateway-controller",
    "--network", "gateway-it_it-gateway-network",
    // Add other necessary mounts and ports from compose file
}
for k, v := range envVars {
    args = append(args, "-e", fmt.Sprintf("%s=%s", k, v))
}
args = append(args, "ghcr.io/wso2/api-platform/gateway-controller-coverage:0.1.0-SNAPSHOT")

cmd := execCommandContext(ctx, "docker", args...)

However, this requires duplicating the container configuration. A better approach might be to use a templated compose file or environment file.

🤖 Prompt for AI Agents
In gateway/it/setup.go around lines 193-207, environment variables appended to
cmd.Env won't be passed into containers by "docker compose up"; instead create a
temporary .env file containing each "KEY=VALUE" from envVars, update the docker
compose command args to include "--env-file", "-f", cm.composeFile, "-p",
cm.projectName, "up", "-d", "gateway-controller" (removing the manual cmd.Env
population), write the envVars to that file before running the command, ensure
the file is closed and removed afterward (or reused appropriately), and keep
cmd.Env minimal (e.g., os.Environ()) so the Docker CLI inherits normal
environment.


// Wait for health check
return cm.WaitForGatewayControllerHealthy(ctx)
}

// WaitForGatewayControllerHealthy waits for the gateway-controller to be healthy
func (cm *ComposeManager) WaitForGatewayControllerHealthy(ctx context.Context) error {
endpoint := fmt.Sprintf("http://localhost:%s/health", GatewayControllerPort)
client := &http.Client{
Timeout: 2 * time.Second,
}

ticker := time.NewTicker(500 * time.Millisecond)
defer ticker.Stop()

timeoutCtx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()

for {
select {
case <-timeoutCtx.Done():
return fmt.Errorf("timeout waiting for gateway-controller to be healthy")
case <-ticker.C:
resp, err := client.Get(endpoint)
if err != nil {
continue
}
resp.Body.Close()

if resp.StatusCode == http.StatusOK {
return nil
}
}
}
}

// WaitForHealthy waits for all services to pass health checks
func (cm *ComposeManager) WaitForHealthy(ctx context.Context) error {
services := []struct {
Expand Down Expand Up @@ -245,24 +330,45 @@ func (cm *ComposeManager) Cleanup() {
cm.isShutdown = true

log.Println("Cleaning up Docker Compose services...")

// Cancel context to stop any ongoing operations
cm.cancel()

// Stop signal handling
signal.Stop(cm.signalChan)
close(cm.signalChan)
cleanupCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()

// Run docker compose down with cleanup context
cleanupCtx, cleanupCancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cleanupCancel()
if err := cm.compose.Down(cleanupCtx, tc.RemoveOrphans(true), tc.RemoveImagesLocal); err != nil {
log.Printf("Failed to stop docker compose: %v", err)
}
})
}

if err := cm.compose.Down(cleanupCtx, tc.RemoveOrphans(true), tc.RemoveVolumes(true)); err != nil {
log.Printf("Warning: error during cleanup: %v", err)
// StreamLogs streams service logs to stdout
func (cm *ComposeManager) StreamLogs() {
go func() {
log.Println("Streaming logs from containers...")
cmd := execCommandContext(cm.ctx, "docker", "compose", "-f", cm.composeFile, "-p", cm.projectName, "logs", "-f")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
// Don't log error on context cancellation (standard shutdown)
if cm.ctx.Err() == nil {
log.Printf("Background log streaming stopped: %v", err)
}
}
}()
}

log.Println("Cleanup complete")
})
// CheckLogsForText checks if a container's logs contain specific text
func (cm *ComposeManager) CheckLogsForText(ctx context.Context, containerName, text string) (bool, error) {
// Need to use the actual container name (project name + service name usually, or explicit name)
// In our compose file, we set container_name explicitly (e.g., it-otel-collector)

cmd := execCommandContext(ctx, "docker", "logs", containerName)
output, err := cmd.CombinedOutput()
if err != nil {
return false, fmt.Errorf("failed to get logs for %s: %w", containerName, err)
}

return strings.Contains(string(output), text), nil
}

// CheckDockerAvailable verifies that Docker is running and accessible
Expand All @@ -287,7 +393,7 @@ func CheckPortsAvailable() error {
"8443", // HTTPS
EnvoyAdminPort, // 9901
"9002", // Policy engine
"5000", // Sample backend
"5050", // Sample backend
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

Port check mismatch with docker-compose.test.yaml.

The port check uses 5050 but docker-compose.test.yaml shows sample-backend on port 5000:5000. This inconsistency will cause false port conflict detection.

🔎 Suggested fix
-		"5050",                // Sample backend
+		"5000",                // Sample backend
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
"5050", // Sample backend
"5000", // Sample backend
🤖 Prompt for AI Agents
In gateway/it/setup.go around line 396, the hard-coded sample backend port is
set to "5050" but docker-compose.test.yaml maps the sample-backend to 5000:5000;
update the port check to use "5000" (or better, derive the expected port from
the docker-compose/test config or an environment variable) so the test's port
conflict detection matches docker-compose.test.yaml.

"18000", // xDS gRPC
"18001", // xDS gRPC
}
Expand Down
Loading
Loading