@@ -18,108 +18,22 @@ import (
1818func NewAssistCommand () * cobra.Command {
1919 var command = & cobra.Command {
2020 Use : "assist" ,
21- Short : "Assist-AI related utilities " ,
22- Long : `Assist-AI related utilities for container runtime environments
21+ Short : "AI-powered chaos engineering assistance " ,
22+ Long : `AI-powered chaos engineering assistance for krknctl
2323
2424Available Commands:
25- check Check GPU support in container runtime
2625 run Run AI-powered chaos engineering assistance
2726
28- GPU Auto-Detection:
29- Lightspeed automatically detects your GPU type! No manual flags needed.
30-
31- Supported GPU Types:
32- • Apple Silicon (M1, M2, M3, M4 with Metal)
33- • NVIDIA GPUs (CUDA, GeForce, Quadro, Tesla)
34-
35- Future support planned:
36- • AMD GPUs (Radeon, FirePro, Instinct with ROCm)
37- • Intel GPUs (Arc, Iris, UHD Graphics)
27+ The assist service uses a lightweight AI model with FAISS vector search
28+ to provide intelligent command suggestions and documentation search.
3829
3930Examples:
40- krknctl assist check
41- krknctl assist run
42- krknctl assist run --no-gpu` ,
31+ krknctl assist run` ,
4332 }
4433
45- // GPU auto-detection - no manual flags needed anymore!
46-
47- // Add no-gpu flag for CPU-only mode
48- command .PersistentFlags ().Bool ("no-gpu" , false , "Use CPU-only mode (no GPU acceleration)" )
49-
5034 return command
5135}
5236
53- // GPU auto-detection - no manual flag parsing needed anymore!
54-
55- func NewAssistCheckCommand (
56- providerFactory * factory.ProviderFactory ,
57- scenarioOrchestrator * scenarioorchestrator.ScenarioOrchestrator ,
58- config config.Config ,
59- ) * cobra.Command {
60- var command = & cobra.Command {
61- Use : "check" ,
62- Short : "Check GPU support in container runtime" ,
63- Long : `Check whether the container runtime (Podman or Docker) has GPU support available
64-
65- Lightspeed automatically tests all supported GPU types to detect your hardware.
66-
67- Examples:
68- krknctl assist check` ,
69- Args : cobra .NoArgs ,
70- RunE : func (cmd * cobra.Command , args []string ) error {
71- // Print container runtime info
72- (* scenarioOrchestrator ).PrintContainerRuntime ()
73-
74- // Check if Docker is being used - Lightspeed only supports Podman
75- if (* scenarioOrchestrator ).GetContainerRuntime () == orchestratormodels .Docker {
76- return fmt .Errorf ("❌ Assist requires Podman container runtime. " +
77- "Docker is not supported for GPU acceleration" )
78- }
79-
80- // Get container runtime socket
81- socket , err := (* scenarioOrchestrator ).GetContainerRuntimeSocket (nil )
82- if err != nil {
83- return fmt .Errorf ("failed to get container runtime socket: %w" , err )
84- }
85-
86- // Connect to container runtime
87- ctx , err := (* scenarioOrchestrator ).Connect (* socket )
88- if err != nil {
89- return fmt .Errorf ("failed to connect to container runtime: %w" , err )
90- }
91-
92- // Get no-gpu flag
93- noGPU , _ := cmd .Flags ().GetBool ("no-gpu" )
94-
95- // Create platform GPU detector
96- detector := assist .NewPlatformGPUDetector (config )
97-
98- // Auto-detect GPU acceleration
99- fmt .Println ("\n 🔍 Detecting GPU acceleration..." )
100- gpuType := detector .DetectGPUAcceleration (ctx , noGPU )
101-
102- // Get configuration
103- imageURI , _ , deviceMounts , err := detector .AutoSelectAssistConfig (ctx , noGPU )
104- if err != nil {
105- return fmt .Errorf ("failed to get assist configuration: %w" , err )
106- }
107-
108- // Format and print result
109- fmt .Printf ("\n ✅ GPU acceleration: %s\n " , detector .GetGPUDescription (gpuType ))
110- fmt .Printf ("📦 Container image: %s\n " , imageURI )
111- if len (deviceMounts ) > 0 {
112- fmt .Printf ("🔗 Device mounts: %v\n " , deviceMounts )
113- } else {
114- fmt .Printf ("🔗 Device mounts: none (CPU-only)\n " )
115- }
116-
117- return nil
118- },
119- }
120-
121- return command
122- }
12337
12438// buildAssistRegistryFromFlags builds assist registry configuration from command flags
12539func buildAssistRegistryFromFlags (cmd * cobra.Command , config config.Config ) (* models.RegistryV2 , error ) {
@@ -167,31 +81,24 @@ func NewAssistRunCommand(
16781 Short : "Run AI-powered chaos engineering assistance" ,
16882 Long : `Run AI-powered chaos engineering assistance with Retrieval-Augmented Generation (RAG)
16983
170- This command automatically detects your GPU and deploys a lightweight AI model that can answer
171- questions about krknctl usage, chaos engineering scenarios, and provide intelligent command
172- suggestions based on natural language.
84+ This command deploys a lightweight AI model that can answer questions about krknctl usage,
85+ chaos engineering scenarios, and provide intelligent command suggestions based on natural language.
17386
17487The system uses:
175- - Automatic GPU detection (Apple Silicon, NVIDIA)
176- - GPU-accelerated inference for fast responses
88+ - FAISS vector search for fast document retrieval
17789- Live documentation indexing
178- - Llama 3.2:1B model optimized for chaos engineering domain
90+ - Llama model optimized for chaos engineering domain
17991
18092Examples:
181- krknctl assist run # Auto-detect GPU
182- krknctl assist run --no-gpu # Force CPU-only mode` ,
93+ krknctl assist run` ,
18394 Args : cobra .NoArgs ,
18495 RunE : func (cmd * cobra.Command , args []string ) error {
185- // Get flags
186- noGPU , _ := cmd .Flags ().GetBool ("no-gpu" )
187-
18896 // Print container runtime info
18997 (* scenarioOrchestrator ).PrintContainerRuntime ()
19098
191- // Check if Docker is being used - Lightspeed only supports Podman
99+ // Check if Docker is being used - assist requires Podman
192100 if (* scenarioOrchestrator ).GetContainerRuntime () == orchestratormodels .Docker {
193- return fmt .Errorf ("❌ assist requires Podman container runtime. " +
194- "Docker is not supported for GPU acceleration" )
101+ return fmt .Errorf ("❌ assist requires Podman container runtime" )
195102 }
196103
197104 // Get container runtime socket
@@ -212,30 +119,19 @@ Examples:
212119 return fmt .Errorf ("failed to build assist registry configuration: %w" , err )
213120 }
214121
215- // Step 1: Auto-detect GPU acceleration
216- fmt .Println ("🔍 detecting GPU acceleration..." )
217-
218- // Create platform GPU detector
219- detector := assist .NewPlatformGPUDetector (config )
220- gpuType := detector .DetectGPUAcceleration (ctx , noGPU )
221-
222- fmt .Printf ("✅ GPU acceleration: %s\n " , detector .GetGPUDescription (gpuType ))
223-
224- // Step 2: Deploy RAG model container
225- fmt .Println ("\n 🚀 deploying assist model..." )
122+ // Deploy RAG model container
123+ fmt .Println ("🚀 deploying assist model..." )
226124
227125 // Create spinners for the operations
228126 pullSpinner := NewSpinnerWithSuffix (" pulling RAG model image..." )
229127 thinkingSpinner := NewSpinnerWithSuffix (" thinking..." , 37 )
230128
231- ragResult , err := assist .DeployAssistModelWithGPUType (ctx , gpuType , * scenarioOrchestrator , config , registry , detector , pullSpinner )
129+ ragResult , err := assist .DeployAssistModel (ctx , * scenarioOrchestrator , config , registry , pullSpinner )
232130 if err != nil {
233- // Handle GPU-related errors with helpful suggestions
234- enhancedErr := detector .HandleContainerError (err , gpuType )
235- return fmt .Errorf ("failed to deploy RAG model: %w" , enhancedErr )
131+ return fmt .Errorf ("failed to deploy RAG model: %w" , err )
236132 }
237133
238- // Step 3: Health check
134+ // Health check
239135 fmt .Println ("\n 🩺 performing health check..." )
240136
241137 healthOK , err := assist .PerformAssistHealthCheck (ragResult .ContainerID , ragResult .HostPort , * scenarioOrchestrator , ctx , config )
@@ -249,7 +145,7 @@ Examples:
249145
250146 fmt .Println ("✅ assist service is ready!" )
251147
252- // Step 4: Start interactive prompt
148+ // Start interactive prompt
253149 fmt .Printf ("\n 🚂 starting interactive assist service on port %s...\n " ,
254150 ragResult .HostPort )
255151 fmt .Println ("type your chaos engineering questions and get intelligent krknctl" +
0 commit comments