diff --git a/operator/e2e/cmd/setup-debug-cluster/README.md b/operator/e2e/cmd/setup-debug-cluster/README.md new file mode 100644 index 00000000..069a5254 --- /dev/null +++ b/operator/e2e/cmd/setup-debug-cluster/README.md @@ -0,0 +1,32 @@ +# setup-debug-cluster + +Creates a local K3D cluster identical to the one used in E2E tests, with Grove operator and Kai scheduler pre-installed. + +## Usage + +```bash +# From this directory +go run . + +# Or build and run +go build +./setup-debug-cluster +``` + +## Options + +``` +--name Cluster name (default: "grove-e2e-cluster") +--worker-nodes Number of worker nodes (default: 30) +--verbose, -v Enable verbose logging +--quiet, -q Suppress non-error output +--help Show all options +``` + +## Teardown + +Press `Ctrl+C` if running interactively, or: + +```bash +k3d cluster delete grove-e2e-cluster +``` diff --git a/operator/e2e/cmd/setup-debug-cluster/main.go b/operator/e2e/cmd/setup-debug-cluster/main.go new file mode 100644 index 00000000..3ea69df3 --- /dev/null +++ b/operator/e2e/cmd/setup-debug-cluster/main.go @@ -0,0 +1,344 @@ +// /* +// Copyright 2025 The Grove Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// */ + +package main + +import ( + "context" + "fmt" + "os" + "os/exec" + "os/signal" + "path/filepath" + "strings" + "syscall" + + "github.com/ai-dynamo/grove/operator/e2e/setup" + "github.com/ai-dynamo/grove/operator/e2e/utils" + + "github.com/alecthomas/kong" + "golang.org/x/term" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" +) + +// CLI defines the command-line interface using Kong struct tags. +// All cluster configuration options default to the values from setup.DefaultE2EClusterConfig() +// to ensure consistency with e2e tests. +type CLI struct { + // Cluster configuration overrides (defaults come from setup.DefaultE2EClusterConfig()) + Name *string `name:"name" help:"Name of the K3D cluster"` + ControlPlaneNodes *int `name:"control-plane-nodes" help:"Number of control plane nodes"` + WorkerNodes *int `name:"worker-nodes" help:"Number of worker nodes"` + K3sImage *string `name:"k3s-image" help:"K3s Docker image to use"` + APIPort *string `name:"api-port" help:"Port on host to expose Kubernetes API"` + LBPort *string `name:"lb-port" help:"Load balancer port mapping (host:container)"` + WorkerMemory *string `name:"worker-memory" help:"Memory allocation for worker nodes"` + EnableRegistry *bool `name:"enable-registry" help:"Enable built-in Docker registry" negatable:""` + RegistryPort *string `name:"registry-port" help:"Port for the Docker registry"` + + // Deployment options + SkaffoldPath string `name:"skaffold-path" help:"Path to skaffold.yaml (defaults to repo root)" type:"path"` + + // Test images + TestImages []string `name:"test-images" help:"Test images to pre-load into registry" default:"nginx:alpine-slim"` + + // Logging + Verbose bool `name:"verbose" short:"v" help:"Enable verbose logging"` + Quiet bool `name:"quiet" short:"q" help:"Suppress non-error output"` +} + +func main() { + var cli CLI + ctx := kong.Parse(&cli, + kong.Name("setup-debug-cluster"), + kong.Description("Create a K3D cluster with Grove operator for local development and debugging.\n\n"+ + "This command handles all setup steps including:\n"+ + " - Creating the K3D cluster\n"+ + " - Setting up a Docker registry\n"+ + " - Pre-pulling and caching images\n"+ + " - Deploying Grove operator via Skaffold\n"+ + " - Installing Kai scheduler via Helm"), + kong.UsageOnError(), + ) + + if err := run(&cli); err != nil { + ctx.FatalIfErrorf(err) + } +} + +// run executes the main logic for setting up the debug cluster. +func run(cli *CLI) error { + // Set up logging + logger := utils.NewTestLogger(getLogLevel(cli)) + + // Start with the default cluster configuration + // This includes all node labels and taints required for Grove e2e testing + cfg := setup.DefaultClusterConfig() + + // Apply CLI overrides if provided + if cli.Name != nil { + cfg.Name = *cli.Name + } + if cli.ControlPlaneNodes != nil { + cfg.ControlPlaneNodes = *cli.ControlPlaneNodes + } + if cli.WorkerNodes != nil { + cfg.WorkerNodes = *cli.WorkerNodes + } + if cli.K3sImage != nil { + cfg.Image = *cli.K3sImage + } + if cli.APIPort != nil { + cfg.HostPort = *cli.APIPort + } + if cli.LBPort != nil { + cfg.LoadBalancerPort = *cli.LBPort + } + if cli.WorkerMemory != nil { + cfg.WorkerMemory = *cli.WorkerMemory + } + if cli.EnableRegistry != nil { + cfg.EnableRegistry = *cli.EnableRegistry + } + if cli.RegistryPort != nil { + cfg.RegistryPort = *cli.RegistryPort + } + + // Determine skaffold path + skaffoldPath := cli.SkaffoldPath + if skaffoldPath == "" { + // Find skaffold.yaml relative to git repo root + skaffoldPath = findSkaffoldYAML() + if skaffoldPath == "" { + return fmt.Errorf("could not find skaffold.yaml. Are you running from within the grove repo? You can also specify the path with --skaffold-path") + } + logger.Debugf("Using skaffold.yaml from: %s", skaffoldPath) + } else { + // Validate specified skaffold path exists + if _, err := os.Stat(skaffoldPath); err != nil { + return fmt.Errorf("skaffold file not found at %s: %w", skaffoldPath, err) + } + } + + // Create context that cancels on SIGINT/SIGTERM + runCtx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) + defer cancel() + + // Print configuration + if !cli.Quiet { + printConfiguration(&cfg, logger) + } + + // Set up the cluster + logger.Info("🚀 Setting up K3D cluster with Grove operator...") + + _, cleanup, err := setup.SetupCompleteK3DCluster(runCtx, cfg, skaffoldPath, logger) + if err != nil { + logger.Errorf("Failed to setup K3D cluster: %v", err) + if cleanup != nil { + logger.Info("Running cleanup...") + cleanup() + } + return fmt.Errorf("failed to setup K3D cluster: %w", err) + } + + // Setup test images in registry (matching what e2e tests do) + logger.Infof("📦 Pre-loading %d test image(s) to registry...", len(cli.TestImages)) + if err := setup.SetupRegistryTestImages(cfg.RegistryPort, cli.TestImages); err != nil { + logger.Warnf("⚠️ Failed to pre-load test images (you can push them manually): %v", err) + // Don't fail - user can push images manually if needed + } else { + logger.Info("✅ Test images successfully pre-loaded to registry") + } + + // Write kubeconfig to KUBECONFIG env var or default location + kubeconfigPath, err := writeKubeconfig(runCtx, cfg.Name, logger) + if err != nil { + logger.Errorf("Failed to write kubeconfig: %v", err) + logger.Info("Running cleanup...") + cleanup() + return fmt.Errorf("failed to write kubeconfig: %w", err) + } + + // Success message + logger.Info("✅ K3D cluster successfully created!") + logger.Infof("Cluster name: %s", cfg.Name) + logger.Infof("API server: https://localhost:%s", cfg.HostPort) + if cfg.EnableRegistry { + logger.Infof("Docker registry: localhost:%s", cfg.RegistryPort) + } + logger.Infof("Kubeconfig written to: %s", kubeconfigPath) + + // Print kubectl config instructions + fmt.Println("\nTo use this cluster:") + if kubeconfigPath != clientcmd.RecommendedHomeFile { + fmt.Printf(" export KUBECONFIG=%s\n", kubeconfigPath) + } + fmt.Printf(" kubectl cluster-info\n\n") + + // Print teardown instructions + fmt.Println("To tear down the cluster:") + fmt.Printf(" k3d cluster delete %s\n\n", cfg.Name) + + // If running interactively, wait for signal + if term.IsTerminal(int(os.Stdin.Fd())) { + fmt.Println("Press Ctrl+C to tear down the cluster...") + <-runCtx.Done() + + logger.Info("Tearing down cluster...") + cleanup() + logger.Info("✅ Cluster teardown complete") + } else { + logger.Info("Cluster is ready. Run 'k3d cluster delete " + cfg.Name + "' to tear it down.") + } + + return nil +} + +// getLogLevel returns the log level based on CLI flags. +func getLogLevel(cli *CLI) utils.LogLevel { + if cli.Quiet { + return utils.ErrorLevel + } + if cli.Verbose { + return utils.DebugLevel + } + return utils.InfoLevel +} + +// printConfiguration logs the cluster configuration. +func printConfiguration(cfg *setup.ClusterConfig, logger *utils.Logger) { + logger.Info("Cluster Configuration:") + logger.Infof(" Name: %s", cfg.Name) + logger.Infof(" Control Plane Nodes: %d", cfg.ControlPlaneNodes) + logger.Infof(" Worker Nodes: %d", cfg.WorkerNodes) + logger.Infof(" K3s Image: %s", cfg.Image) + logger.Infof(" API Port: %s", cfg.HostPort) + logger.Infof(" Load Balancer Port: %s", cfg.LoadBalancerPort) + logger.Infof(" Worker Memory: %s", cfg.WorkerMemory) + if cfg.EnableRegistry { + logger.Infof(" Registry: Enabled (port %s)", cfg.RegistryPort) + } else { + logger.Info(" Registry: Disabled") + } + logger.Info("") +} + +// findSkaffoldYAML finds skaffold.yaml by locating the git repo root. +func findSkaffoldYAML() string { + // Use git to find the repo root - works from anywhere in the repo + cmd := exec.Command("git", "rev-parse", "--show-toplevel") + output, err := cmd.Output() + if err != nil { + return "" + } + + repoRoot := strings.TrimSpace(string(output)) + skaffoldPath := filepath.Join(repoRoot, "operator", "skaffold.yaml") + + if _, err := os.Stat(skaffoldPath); err == nil { + return skaffoldPath + } + + return "" +} + +// getKubeconfigPath returns KUBECONFIG env var or ~/.kube/config. +func getKubeconfigPath() string { + if kubeconfigEnv := os.Getenv("KUBECONFIG"); kubeconfigEnv != "" { + return kubeconfigEnv + } + return clientcmd.RecommendedHomeFile +} + +// writeKubeconfig writes the cluster kubeconfig, merging with existing config if present. +func writeKubeconfig(ctx context.Context, clusterName string, logger *utils.Logger) (string, error) { + logger.Debug("📄 Fetching kubeconfig from k3d cluster...") + + // Get kubeconfig from k3d + kubeconfig, err := setup.GetKubeconfig(ctx, clusterName) + if err != nil { + return "", err + } + + // Determine target path + targetPath := getKubeconfigPath() + if targetPath == "" { + return "", fmt.Errorf("could not determine kubeconfig path") + } + + // Ensure directory exists + dir := filepath.Dir(targetPath) + if err := os.MkdirAll(dir, 0755); err != nil { + return "", fmt.Errorf("failed to create kubeconfig directory: %w", err) + } + + // Check if kubeconfig file already exists + var existingConfig *clientcmdapi.Config + if _, err := os.Stat(targetPath); err == nil { + // File exists, load it + logger.Debugf("Loading existing kubeconfig from %s", targetPath) + existingConfig, err = clientcmd.LoadFromFile(targetPath) + if err != nil { + logger.Warnf("Failed to load existing kubeconfig, will overwrite: %v", err) + existingConfig = nil + } + } + + // Merge or use new config + var finalConfig *clientcmdapi.Config + if existingConfig != nil { + // Merge the new cluster config into existing + logger.Debug("Merging new cluster config with existing kubeconfig") + finalConfig = mergeKubeconfigs(existingConfig, kubeconfig, clusterName) + } else { + finalConfig = kubeconfig + } + + // Write the kubeconfig + if err := clientcmd.WriteToFile(*finalConfig, targetPath); err != nil { + return "", fmt.Errorf("failed to write kubeconfig to %s: %w", targetPath, err) + } + + logger.Debugf("✓ Kubeconfig written to %s", targetPath) + return targetPath, nil +} + +// mergeKubeconfigs merges a new kubeconfig into an existing one. +func mergeKubeconfigs(existing, new *clientcmdapi.Config, _ string) *clientcmdapi.Config { + merged := existing.DeepCopy() + + // Add/update cluster + for name, cluster := range new.Clusters { + merged.Clusters[name] = cluster + } + + // Add/update auth info + for name, authInfo := range new.AuthInfos { + merged.AuthInfos[name] = authInfo + } + + // Add/update context + for name, context := range new.Contexts { + merged.Contexts[name] = context + // Set the new cluster as current context + merged.CurrentContext = name + } + + return merged +} + diff --git a/operator/e2e/setup/k8s_clusters.go b/operator/e2e/setup/k8s_clusters.go index 000129eb..49c117a9 100644 --- a/operator/e2e/setup/k8s_clusters.go +++ b/operator/e2e/setup/k8s_clusters.go @@ -50,6 +50,7 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" "sigs.k8s.io/yaml" ) @@ -109,18 +110,43 @@ type ClusterConfig struct { RegistryPort string // Port for the Docker registry (e.g., "5001") } -// DefaultClusterConfig returns a sensible default cluster configuration +// DefaultClusterConfig returns the default cluster configuration used by e2e tests. +// This includes all the node labels and taints required for Grove e2e testing. +// The setup-debug-cluster tool and SharedClusterManager both use this as their base config. func DefaultClusterConfig() ClusterConfig { return ClusterConfig{ - Name: "test-k3d-cluster", + Name: "grove-e2e-cluster", ControlPlaneNodes: 1, - WorkerNodes: 2, - Image: "rancher/k3s:v1.28.8-k3s1", + WorkerNodes: 30, // Maximum needed across all tests + WorkerMemory: "150m", // 150m memory per agent node to fit one workload pod + Image: "rancher/k3s:v1.33.5-k3s1", HostPort: "6550", LoadBalancerPort: "8080:80", - WorkerMemory: "150m", - EnableRegistry: false, + EnableRegistry: true, RegistryPort: "5001", + NodeLabels: []NodeLabel{ + { + Key: "node_role.e2e.grove.nvidia.com", + // k3s refers to worker nodes as agent nodes + Value: "agent", + NodeFilters: []string{"agent:*"}, + }, + // Disable GPU deployment on all nodes (validator causes issues) + { + Key: "nvidia.com/gpu.deploy.operands", + Value: "false", + // k3s refers to worker nodes as agent nodes + NodeFilters: []string{"server:*", "agent:*"}, + }, + }, + WorkerNodeTaints: []NodeTaint{ + { + Key: "node_role.e2e.grove.nvidia.com", + // k3s refers to worker nodes as agent nodes + Value: "agent", + Effect: "NoSchedule", + }, + }, } } @@ -140,6 +166,21 @@ func stripRegistryDomain(img string) string { return img } +// GetKubeconfig fetches and returns the kubeconfig for a k3d cluster. +func GetKubeconfig(ctx context.Context, clusterName string) (*clientcmdapi.Config, error) { + cluster, err := k3dclient.ClusterGet(ctx, runtimes.Docker, &k3d.Cluster{Name: clusterName}) + if err != nil { + return nil, fmt.Errorf("could not get cluster: %w", err) + } + + kubeconfig, err := k3dclient.KubeconfigGet(ctx, runtimes.Docker, cluster) + if err != nil { + return nil, fmt.Errorf("failed to get kubeconfig from k3d: %w", err) + } + + return kubeconfig, nil +} + // ensureClusterDoesNotExist removes any stale k3d cluster with the same name from previous runs. func ensureClusterDoesNotExist(ctx context.Context, clusterName string, logger *utils.Logger) error { cluster := &k3d.Cluster{Name: clusterName} @@ -161,29 +202,27 @@ func ensureClusterDoesNotExist(ctx context.Context, clusterName string, logger * } // ensureRegistryDoesNotExist removes any stale k3d registry container from previous runs. -func ensureRegistryDoesNotExist(ctx context.Context, clusterName string, logger *utils.Logger) error { - registryContainerName := fmt.Sprintf("k3d-%s-registry", clusterName) - +// It looks for containers named "registry" (the name used in our k3d config). +// If another container is using the registry port, it returns an error with details. +func ensureRegistryDoesNotExist(ctx context.Context, registryPort string, logger *utils.Logger) error { dockerClient, err := dockerclient.NewClientWithOpts(dockerclient.FromEnv, dockerclient.WithAPIVersionNegotiation()) if err != nil { return fmt.Errorf("failed to create Docker client: %w", err) } defer dockerClient.Close() + // Look for the registry container by name - k3d creates it as just "registry" + // based on the Name field in SimpleConfigRegistryCreateConfig filterArgs := filters.NewArgs() - filterArgs.Add("name", registryContainerName) + filterArgs.Add("name", "registry") containers, err := dockerClient.ContainerList(ctx, container.ListOptions{All: true, Filters: filterArgs}) if err != nil { return fmt.Errorf("failed to list Docker containers: %w", err) } - if len(containers) == 0 { - return nil - } - for _, c := range containers { - displayName := registryContainerName + displayName := "registry" if len(c.Names) > 0 { displayName = strings.TrimPrefix(c.Names[0], "/") } @@ -195,6 +234,25 @@ func ensureRegistryDoesNotExist(ctx context.Context, clusterName string, logger } } + // Check if any other container is using the registry port + allContainers, err := dockerClient.ContainerList(ctx, container.ListOptions{All: true}) + if err != nil { + return fmt.Errorf("failed to list all Docker containers: %w", err) + } + + for _, c := range allContainers { + for _, port := range c.Ports { + if fmt.Sprintf("%d", port.PublicPort) == registryPort { + displayName := c.ID[:12] + if len(c.Names) > 0 { + displayName = strings.TrimPrefix(c.Names[0], "/") + } + + return fmt.Errorf("port %s is already in use by container %s (%s). Please stop or remove this container, or use a different registry port with --registry-port", registryPort, displayName, c.ID[:12]) + } + } + } + return nil } @@ -439,15 +497,19 @@ configs: } if cfg.EnableRegistry { - if err := ensureRegistryDoesNotExist(ctx, cfg.Name, logger); err != nil { + if err := ensureRegistryDoesNotExist(ctx, cfg.RegistryPort, logger); err != nil { return nil, nil, err } } // this is the cleanup function, we always return it now so the caller can decide to use it or not + // Note: we create a fresh context here rather than using the passed-in ctx, because cleanup + // may be called after the parent context is cancelled (e.g., after Ctrl+C signal) cleanup := func() { + cleanupCtx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() logger.Debug("🗑️ Deleting cluster...") - if err := k3dclient.ClusterDelete(ctx, runtimes.Docker, &k3dConfig.Cluster, k3d.ClusterDeleteOpts{}); err != nil { + if err := k3dclient.ClusterDelete(cleanupCtx, runtimes.Docker, &k3dConfig.Cluster, k3d.ClusterDeleteOpts{}); err != nil { logger.Errorf("Failed to delete cluster: %v", err) } else { logger.Info("✅ Cluster deleted successfully") diff --git a/operator/e2e/setup/shared_cluster.go b/operator/e2e/setup/shared_cluster.go index 2b8b7495..6f222d37 100644 --- a/operator/e2e/setup/shared_cluster.go +++ b/operator/e2e/setup/shared_cluster.go @@ -113,41 +113,11 @@ func (scm *SharedClusterManager) Setup(ctx context.Context, testImages []string) // Track whether setup completed successfully var setupSuccessful bool - // Configuration for maximum cluster size needed (28 worker nodes + 3 server nodes) - customCfg := ClusterConfig{ - Name: "shared-e2e-test-cluster", - ControlPlaneNodes: 1, - WorkerNodes: 30, // Maximum needed across all tests - WorkerMemory: "150m", // 150m memory per agent node to fit one workload pod - Image: "rancher/k3s:v1.33.5-k3s1", - HostPort: "6560", // Use a different port to avoid conflicts - LoadBalancerPort: "8090:80", - EnableRegistry: true, - RegistryPort: "5001", - NodeLabels: []NodeLabel{ - { - Key: "node_role.e2e.grove.nvidia.com", - // k3s refers to worker nodes as agent nodes - Value: "agent", - NodeFilters: []string{"agent:*"}, - }, - // we currently don't want GPUs in e2e tests as validator is causing issues - { - Key: "nvidia.com/gpu.deploy.operands", - Value: "false", - // k3s refers to worker nodes as agent nodes - NodeFilters: []string{"server:*", "agent:*"}, - }, - }, - WorkerNodeTaints: []NodeTaint{ - { - Key: "node_role.e2e.grove.nvidia.com", - // k3s refers to worker nodes as agent nodes - Value: "agent", - Effect: "NoSchedule", - }, - }, - } + // Use the centralized cluster config with overrides for shared test cluster + customCfg := DefaultClusterConfig() + customCfg.Name = "shared-e2e-test-cluster" + customCfg.HostPort = "6560" // Use a different port to avoid conflicts + customCfg.LoadBalancerPort = "8090:80" scm.registryPort = customCfg.RegistryPort @@ -182,7 +152,7 @@ func (scm *SharedClusterManager) Setup(ctx context.Context, testImages []string) scm.dynamicClient = dynamicClient // Setup test images in registry - if err := setupRegistryTestImages(scm.registryPort, testImages); err != nil { + if err := SetupRegistryTestImages(scm.registryPort, testImages); err != nil { return fmt.Errorf("failed to setup registry test images: %w", err) } @@ -495,8 +465,8 @@ func (scm *SharedClusterManager) Teardown() { } } -// setupRegistryTestImages sets up test images in the registry -func setupRegistryTestImages(registryPort string, images []string) error { +// SetupRegistryTestImages pulls images and pushes them to the local k3d registry. +func SetupRegistryTestImages(registryPort string, images []string) error { if len(images) == 0 { return nil } diff --git a/operator/go.mod b/operator/go.mod index 03ba96fa..81bb3ba3 100644 --- a/operator/go.mod +++ b/operator/go.mod @@ -6,6 +6,7 @@ require ( github.com/NVIDIA/KAI-scheduler v0.12.0 github.com/ai-dynamo/grove/operator/api v0.0.0 github.com/ai-dynamo/grove/scheduler/api v0.0.0 + github.com/alecthomas/kong v1.13.0 github.com/docker/docker v28.3.3+incompatible github.com/go-logr/logr v1.4.3 github.com/go-logr/zapr v1.3.0 diff --git a/operator/go.sum b/operator/go.sum index 03c075fd..ced6a8df 100644 --- a/operator/go.sum +++ b/operator/go.sum @@ -29,6 +29,12 @@ github.com/NVIDIA/KAI-scheduler v0.12.0/go.mod h1:56asdZ2ewWn2ALmPM93zxxGWi+8MX3 github.com/Shopify/logrus-bugsnag v0.0.0-20170309145241-6dbc35f2c30d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= +github.com/alecthomas/assert/v2 v2.11.0 h1:2Q9r3ki8+JYXvGsDyBXwH3LcJ+WK5D0gc5E8vS6K3D0= +github.com/alecthomas/assert/v2 v2.11.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k= +github.com/alecthomas/kong v1.13.0 h1:5e/7XC3ugvhP1DQBmTS+WuHtCbcv44hsohMgcvVxSrA= +github.com/alecthomas/kong v1.13.0/go.mod h1:wrlbXem1CWqUV5Vbmss5ISYhsVPkBb1Yo7YKJghju2I= +github.com/alecthomas/repr v0.5.2 h1:SU73FTI9D1P5UNtvseffFSGmdNci/O6RsqzeXJtP0Qs= +github.com/alecthomas/repr v0.5.2/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= @@ -235,6 +241,8 @@ github.com/hashicorp/golang-lru/v2 v2.0.5 h1:wW7h1TG88eUIJ2i69gaE3uNVtEPIagzhGvH github.com/hashicorp/golang-lru/v2 v2.0.5/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= +github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=