diff --git a/docs/guides/chat.mdx b/docs/guides/chat.mdx index cd8b60f01..7945215dd 100644 --- a/docs/guides/chat.mdx +++ b/docs/guides/chat.mdx @@ -72,6 +72,11 @@ gaia chat # Show performance metrics gaia chat --stats ``` + +```bash Verbose Output +# Show detailed agent output (thoughts, goals, tool results) +gaia chat --verbose +``` diff --git a/docs/guides/code.mdx b/docs/guides/code.mdx index bb0a05bb9..ed9566baf 100644 --- a/docs/guides/code.mdx +++ b/docs/guides/code.mdx @@ -171,11 +171,18 @@ your-app/ ## Debug and Trace Options + + ```bash + gaia-code "Create a todo tracking app in nextjs" --verbose + ``` + Show detailed agent output (thoughts, goals, plans, tool results) + + ```bash gaia-code "Create a todo tracking app in nextjs" --debug ``` - See internal decision logs + See internal decision logs (implies --verbose) diff --git a/docs/reference/cli.mdx b/docs/reference/cli.mdx index dccd201da..fbf06a791 100644 --- a/docs/reference/cli.mdx +++ b/docs/reference/cli.mdx @@ -1222,9 +1222,14 @@ All commands support these global options: | Option | Type | Default | Description | |--------|------|---------|-------------| +| `--verbose` | flag | off | Show detailed agent output (thoughts, goals, plans, tool results). Default is minimal, clean output. | | `--logging-level` | string | INFO | Logging verbosity [DEBUG, INFO, WARNING, ERROR, CRITICAL] | | `-v, --version` | flag | - | Show program's version and exit | + +By default, GAIA uses a **minimal console** that shows only LLM answers, brief tool status with spinners, and errors. Use `--verbose` to see the full step-by-step agent output including thoughts, goals, plans, and detailed tool results. + + --- ## Troubleshooting diff --git a/docs/sdk/core/agent-system.mdx b/docs/sdk/core/agent-system.mdx index dd2330c26..955bc25a2 100644 --- a/docs/sdk/core/agent-system.mdx +++ b/docs/sdk/core/agent-system.mdx @@ -365,6 +365,7 @@ agent = MyAgent( max_steps=20, # Max reasoning loop iterations streaming=True, # Stream responses token-by-token silent_mode=False, # Suppress console output + verbose=False, # Use full AgentConsole output (default: MinimalConsole) # === Debugging === debug_prompts=False, # Print raw prompts to console @@ -464,7 +465,8 @@ agent = MyAgent( |-----------|-----------|------------|-----------| | `max_steps` | `3` - Quick tasks | `20` - Complex workflows | Speed vs. thoroughness | | `streaming` | `False` - Wait for complete response | `True` - See tokens as generated | Latency vs. responsiveness | -| `silent_mode` | `False` - See all output | `True` - Only get results | Visibility vs. clean output | +| `verbose` | `False` - Clean, minimal output (default) | `True` - Full step-by-step agent output | Clarity vs. visibility | +| `silent_mode` | `False` - See output | `True` - Only get results | Visibility vs. programmatic use | --- diff --git a/docs/sdk/core/console.mdx b/docs/sdk/core/console.mdx index e11927572..6c6615bdf 100644 --- a/docs/sdk/core/console.mdx +++ b/docs/sdk/core/console.mdx @@ -8,7 +8,7 @@ icon: "terminal" -**Import:** `from gaia.agents.base.console import AgentConsole, SilentConsole` +**Import:** `from gaia.agents.base.console import MinimalConsole, AgentConsole, SilentConsole` --- @@ -20,23 +20,54 @@ icon: "terminal" --- -## AgentConsole (Rich CLI Output) +## MinimalConsole (Default) + +```python +from gaia.agents.base.console import MinimalConsole + +class MyAgent(Agent): + def _create_console(self): + # Clean, minimal output (default behavior) + return MinimalConsole() + +# Shows: +# - LLM answers (streamed as markdown) +# - Tool calls (one-line with spinner) +# - Brief tool output (key results, not raw JSON) +# - Errors and warnings (inline) +# +# Suppresses: +# - Agent internals (thoughts, goals, plans, step numbers) +# - Full JSON dumps of tool arguments/results +# - Decorative panels and separators +``` + +`MinimalConsole` is the **default** output handler for all agents. It provides a clean, fast user experience by showing only what matters: answers, brief tool status, and errors. + +Use `--verbose` on the CLI (or `verbose=True` in code) to switch to `AgentConsole`. + +--- + +## AgentConsole (Verbose CLI Output) ```python from gaia.agents.base.console import AgentConsole class MyAgent(Agent): def _create_console(self): - # Rich console with syntax highlighting + # Rich console with full step-by-step output return AgentConsole() # Displays: # - Colored step headers -# - Syntax-highlighted tool calls +# - Thoughts, goals, and plans +# - Syntax-highlighted tool calls with full JSON # - Progress indicators -# - Formatted results +# - Formatted results with panels ``` +`AgentConsole` is used when `--verbose` is passed on the CLI or `verbose=True` in the agent constructor. It provides maximum visibility into the agent's reasoning process, useful for development and debugging. + --- ## SilentConsole (No Output) @@ -77,7 +108,8 @@ agent = MyAgent(output_handler=handler) | Handler | Use Case | Output | Best For | |---------|----------|--------|----------| -| `AgentConsole` | Interactive CLI | Rich, colored text | Development, debugging | +| `MinimalConsole` | Interactive CLI (default) | Clean, minimal text | End users, production | +| `AgentConsole` | Verbose CLI (`--verbose`) | Rich, colored text | Development, debugging | | `SilentConsole` | Headless operation | None | APIs, tests, background | | `SSEOutputHandler` | Streaming API | Server-Sent Events | Web applications | diff --git a/docs/spec/console.mdx b/docs/spec/console.mdx index cf1d63502..9ae8d9b4f 100644 --- a/docs/spec/console.mdx +++ b/docs/spec/console.mdx @@ -7,9 +7,9 @@ title: "Console Output Handlers" -**Component:** OutputHandler, AgentConsole, SilentConsole, SSEOutputHandler +**Component:** OutputHandler, MinimalConsole, AgentConsole, SilentConsole, SSEOutputHandler **Module:** `gaia.agents.base.console`, `gaia.api.sse_handler` -**Import:** `from gaia.agents.base.console import OutputHandler, AgentConsole, SilentConsole` +**Import:** `from gaia.agents.base.console import OutputHandler, MinimalConsole, AgentConsole, SilentConsole` --- @@ -32,22 +32,25 @@ The console output system provides a unified interface for handling agent output ```mermaid %%{init: {'theme':'base', 'themeVariables': { 'primaryColor':'#ED1C24', 'primaryTextColor':'#fff', 'primaryBorderColor':'#C8171E', 'lineColor':'#F4484D', 'secondaryColor':'#2d2d2d', 'tertiaryColor':'#f5f5f5', 'fontFamily': 'system-ui, -apple-system, sans-serif'}}}%% flowchart TD - A(["OUTPUT HANDLER"]) --> B(["AGENT CONSOLE"]) + A(["OUTPUT HANDLER"]) --> B(["MINIMAL CONSOLE (default)"]) + A --> E(["AGENT CONSOLE (--verbose)"]) A --> C(["SILENT CONSOLE"]) A --> D(["SSE HANDLER"]) style A fill:#ED1C24,stroke:#C8171E,stroke-width:2px,color:#fff style B fill:#F4484D,stroke:#ED1C24,stroke-width:2px,color:#fff + style E fill:#F4484D,stroke:#ED1C24,stroke-width:2px,color:#fff style C fill:#F4484D,stroke:#ED1C24,stroke-width:2px,color:#fff style D fill:#F4484D,stroke:#ED1C24,stroke-width:2px,color:#fff - linkStyle 0,1,2 stroke:#ED1C24,stroke-width:2px + linkStyle 0,1,2,3 stroke:#ED1C24,stroke-width:2px ``` | Component | Type | Purpose | |-----------|------|---------| | **OUTPUT HANDLER** | Abstract Base Class | Defines interface for all output handlers | -| **AGENT CONSOLE** | Implementation | Rich CLI output with colors, spinners, syntax highlighting | +| **MINIMAL CONSOLE** | Implementation (default) | Clean, minimal output — answers, brief tool status, errors | +| **AGENT CONSOLE** | Implementation (`--verbose`) | Rich CLI output with colors, spinners, syntax highlighting, full step-by-step detail | | **SILENT CONSOLE** | Implementation | Suppressed output for testing and API mode | | **SSE HANDLER** | Implementation | Server-Sent Events streaming for API clients | diff --git a/src/gaia/agents/base/agent.py b/src/gaia/agents/base/agent.py index 674bc029b..35c7293bc 100644 --- a/src/gaia/agents/base/agent.py +++ b/src/gaia/agents/base/agent.py @@ -16,7 +16,7 @@ import uuid from typing import Any, Dict, List, Optional -from gaia.agents.base.console import AgentConsole, SilentConsole +from gaia.agents.base.console import AgentConsole, MinimalConsole, SilentConsole from gaia.agents.base.errors import format_execution_trace from gaia.agents.base.tools import _TOOL_REGISTRY @@ -76,6 +76,7 @@ def __init__( show_stats: bool = False, silent_mode: bool = False, debug: bool = False, + verbose: bool = False, output_handler=None, max_plan_iterations: int = 3, max_consecutive_repeats: int = 4, @@ -99,7 +100,9 @@ def __init__( show_stats: If True, displays LLM performance stats after each response (default: False) silent_mode: If True, suppresses all console output for JSON-only usage (default: False) debug: If True, enables debug output for troubleshooting (default: False) - output_handler: Custom OutputHandler for displaying agent output (default: None, creates console based on silent_mode) + verbose: If True, uses the full AgentConsole with detailed step-by-step output. + If False (default), uses MinimalConsole with clean, minimal output. + output_handler: Custom OutputHandler for displaying agent output (default: None, creates console based on mode flags) max_plan_iterations: Maximum number of plan-execute-replan cycles (default: 3, 0 = unlimited) max_consecutive_repeats: Maximum consecutive identical tool calls before stopping (default: 4) min_context_size: Minimum context size required for this agent (default: 32768). @@ -120,6 +123,7 @@ def __init__( self.show_stats = show_stats self.silent_mode = silent_mode self.debug = debug + self.verbose = verbose self.last_result = None # Store the most recent result self.max_plan_iterations = max_plan_iterations self.max_consecutive_repeats = max_consecutive_repeats @@ -348,7 +352,12 @@ def _get_system_prompt(self) -> str: def _create_console(self): """ Create and return a console output handler. - Returns SilentConsole if in silent_mode, otherwise AgentConsole. + + Priority: + 1. SilentConsole if silent_mode (JSON-only, no output) + 2. AgentConsole if verbose (full step-by-step debug output) + 3. MinimalConsole (default — clean, minimal user experience) + Subclasses can override this to provide domain-specific console output. """ if self.silent_mode: @@ -356,7 +365,9 @@ def _create_console(self): # This would be true for JSON-only output or when output_dir is set silence_final_answer = getattr(self, "output_dir", None) is not None return SilentConsole(silence_final_answer=silence_final_answer) - return AgentConsole() + if self.verbose: + return AgentConsole() + return MinimalConsole() @abc.abstractmethod def _register_tools(self): diff --git a/src/gaia/agents/base/console.py b/src/gaia/agents/base/console.py index 76a897caa..ab362af09 100644 --- a/src/gaia/agents/base/console.py +++ b/src/gaia/agents/base/console.py @@ -1257,6 +1257,8 @@ def print_warning(self, warning_message: str): Args: warning_message: Warning message to display """ + if warning_message is None: + warning_message = "Unknown warning" if self.rich_available: self.console.print() # Add newline before self.console.print( @@ -1280,6 +1282,8 @@ def print_streaming_text( end_of_stream: Whether this is the last chunk """ # Accumulate text in the buffer + if text_chunk is None: + text_chunk = "" self.streaming_buffer += text_chunk # Print the chunk directly to console @@ -1971,6 +1975,404 @@ def _generate_file_preview_panel(self, title_prefix: str) -> Panel: ) +class MinimalConsole(OutputHandler): + """ + A clean, minimal console for a fast and snappy user experience. + + Shows: + - LLM answers (streamed as markdown) + - Tool calls (one-line with spinner) + - Brief tool output (key results, not raw JSON) + - Errors and warnings (inline, no panels) + - File operations (one-line status) + + Suppresses: + - Agent internals (thought, goal, plan, step numbers) + - Full JSON dumps of tool arguments/results + - Emoji, decorative panels, separators + + Use --verbose to switch back to the full AgentConsole output. + """ + + # Map tool names to short, human-readable action descriptions + TOOL_ACTIONS = { + # RAG tools + "list_indexed_documents": "Checking indexed documents", + "query_documents": "Searching documents", + "query_specific_file": "Searching file", + "search_indexed_chunks": "Searching chunks", + "index_document": "Indexing document", + "index_directory": "Indexing directory", + "dump_document": "Exporting document", + "summarize_document": "Summarizing document", + "rag_status": "Checking RAG status", + # File tools + "search_file": "Searching for files", + "search_directory": "Searching directories", + "search_file_content": "Searching file content", + "read_file": "Reading file", + "write_file": "Writing file", + "add_watch_directory": "Watching directory", + # Shell tools + "run_shell_command": "Running command", + # Code tools + "generate_code": "Generating code", + "run_python_file": "Running Python", + "run_pytest": "Running tests", + "edit_python_file": "Editing file", + "edit_file": "Editing file", + "format_code": "Formatting code", + "validate_syntax": "Validating syntax", + # Project tools + "create_project": "Creating project", + "list_files": "Listing files", + "install_dependencies": "Installing dependencies", + } + + def __init__(self): + self.streaming_buffer = "" + self.progress = ProgressIndicator() + self._console = Console(highlight=False) if RICH_AVAILABLE else None + self.file_preview_live = None # Compatibility with AgentConsole + + @property + def console(self): + """Expose Rich Console for compatibility with code that accesses self.console.console.""" + return self._console + + # === Core: suppress agent internals === + + def print_processing_start(self, query: str, max_steps: int, model_id: str = None): + """No visible output -- the user already typed the query.""" + + def print_step_header(self, step_num: int, step_limit: int): + """Suppressed.""" + + def print_state_info(self, state_message: str): + """Suppressed.""" + + def print_thought(self, thought: str): + """Suppressed.""" + + def print_goal(self, goal: str): + """Suppressed.""" + + def print_plan(self, plan: List[Any], current_step: int = None): + """Suppressed.""" + + def print_step_paused(self, description: str): + """Suppressed.""" + + def print_command_executing(self, command: str): + """Suppressed.""" + + def print_agent_selected(self, agent_name: str, language: str, project_type: str): + """Suppressed.""" + + # === Tool calls: spinner + brief result === + + def print_tool_usage(self, tool_name: str): + """Start a spinner with a short action description.""" + action = self.TOOL_ACTIONS.get(tool_name, tool_name.replace("_", " ").capitalize()) + self.start_progress(action) + + def print_tool_complete(self): + """Stop the spinner.""" + self.stop_progress() + + def pretty_print_json(self, data: Dict[str, Any], title: str = None): + """Show a brief summary of tool results, not raw JSON. + + Extracts the most useful information from tool output and displays + it as a compact one-liner or short block. + """ + if not data or not isinstance(data, dict): + return + + # Extract meaningful summary from common result patterns + summary = self._summarize_tool_result(data) + if summary: + if RICH_AVAILABLE and self._console: + self._console.print(f" [dim]{summary}[/dim]") + else: + print(f" {summary}") + + def _summarize_tool_result(self, data: Dict[str, Any]) -> Optional[str]: + """Extract a brief human-readable summary from tool result data.""" + # Skip tool args (title="Tool Arguments") — only show results + # Results have useful content; args are implementation details + + # Common patterns in tool results: + if "error" in data: + return None # Errors are handled by print_error + + if "results" in data and isinstance(data["results"], list): + count = len(data["results"]) + return f"Found {count} result{'s' if count != 1 else ''}" + + if "chunks" in data and isinstance(data["chunks"], list): + count = len(data["chunks"]) + return f"Found {count} chunk{'s' if count != 1 else ''}" + + if "files" in data and isinstance(data["files"], list): + count = len(data["files"]) + return f"Found {count} file{'s' if count != 1 else ''}" + + if "documents" in data and isinstance(data["documents"], list): + count = len(data["documents"]) + return f"{count} document{'s' if count != 1 else ''} indexed" + + if "output" in data and isinstance(data["output"], str): + output = data["output"].strip() + if output: + # Show first line of command output, truncated + first_line = output.split("\n")[0][:120] + lines = output.count("\n") + 1 + if lines > 1: + return f"{first_line} (+{lines - 1} more lines)" + return first_line + + if "content" in data and isinstance(data["content"], str): + content = data["content"].strip() + if content: + lines = content.count("\n") + 1 + chars = len(content) + return f"{lines} line{'s' if lines != 1 else ''}, {chars} chars" + + if "success" in data: + if data["success"]: + msg = data.get("message", "Done") + return str(msg)[:120] if isinstance(msg, str) else "Done" + else: + msg = data.get("message", data.get("error", "Failed")) + return str(msg)[:120] if isinstance(msg, str) else "Failed" + + if "status" in data: + return str(data["status"])[:120] + + return None + + # === Progress === + + def start_progress(self, message: str, show_timer: bool = False): + self.progress.start(message, show_timer=show_timer) + + def stop_progress(self): + self.progress.stop() + + # === Status messages: inline, no panels === + + def print_error(self, error_message: str): + if error_message is None: + error_message = "Unknown error" + self.stop_progress() + if RICH_AVAILABLE and self._console: + self._console.print(f"[bold red]Error:[/bold red] {error_message}") + else: + print(f"Error: {error_message}") + + def print_warning(self, warning_message: str): + if warning_message is None: + warning_message = "Unknown warning" + if RICH_AVAILABLE and self._console: + self._console.print(f"[yellow]Warning:[/yellow] {warning_message}") + else: + print(f"Warning: {warning_message}") + + def print_info(self, message: str): + if RICH_AVAILABLE and self._console: + self._console.print(f"[dim]{message}[/dim]") + else: + print(message) + + def print_success(self, message: str): + if RICH_AVAILABLE and self._console: + self._console.print(f"[green]{message}[/green]") + else: + print(message) + + # === Streaming: pass through directly === + + def print_streaming_text(self, text_chunk: str, end_of_stream: bool = False): + if text_chunk is None: + text_chunk = "" + self.streaming_buffer += text_chunk + print(text_chunk, end="", flush=True) + if end_of_stream: + print() + + def get_streaming_buffer(self) -> str: + result = self.streaming_buffer + self.streaming_buffer = "" + return result + + # === Final answer: rendered markdown, no box === + + def print_final_answer(self, answer: str, streaming: bool = True): + if RICH_AVAILABLE and self._console: + from rich.markdown import Markdown as RichMarkdown + + self._console.print() + self._console.print(RichMarkdown(answer)) + self._console.print() + else: + print(f"\n{answer}\n") + + def print_repeated_tool_warning(self): + self.print_warning("Agent is repeating the same action. Stopping.") + + def print_completion(self, steps_taken: int, steps_limit: int): + if steps_taken >= steps_limit: + self.print_warning(f"Stopped after {steps_taken}/{steps_limit} steps") + + # === Stats: show if explicitly requested === + + def display_stats(self, stats: Dict[str, Any]): + if not stats: + return + if RICH_AVAILABLE: + from rich.table import Table + + table = Table(show_header=True, header_style="bold") + table.add_column("Metric", style="dim") + table.add_column("Value", justify="right") + for key, value in stats.items(): + if value is not None: + if isinstance(value, float): + table.add_row(key, f"{value:.2f}") + else: + table.add_row(key, str(value)) + if self._console: + self._console.print(table) + else: + for key, value in stats.items(): + if value is not None: + print(f" {key}: {value}") + + # === File operations: brief one-liners === + + def print_file_created(self, filename: str, size: int = 0, extension: str = ""): + self.print_info(f"Created {filename}") + + def print_file_modified(self, filename: str): + self.print_info(f"Modified {filename}") + + def print_file_deleted(self, filename: str): + self.print_info(f"Deleted {filename}") + + def print_file_moved(self, src_filename: str, dest_filename: str): + self.print_info(f"Moved {src_filename} -> {dest_filename}") + + # === Model status === + + def print_model_loading(self, model_name: str): + self.start_progress(f"Loading {model_name}") + + def print_model_ready(self, model_name: str, already_loaded: bool = False): + self.stop_progress() + + # === File preview (CodeAgent): suppressed in minimal mode === + + def start_file_preview(self, filename: str, max_lines: int = 15, title_prefix: str = ""): + """Suppressed -- file previews are verbose debug output.""" + + def update_file_preview(self, content_chunk: str): + """Suppressed.""" + + def stop_file_preview(self): + """Suppressed.""" + + def print_diff(self, diff: str, filename: str = ""): + """Show diff filename only, not the full diff content.""" + if filename: + self.print_info(f"Changed {filename}") + + # === Remaining methods === + + def print_extraction_start(self, image_num: int, page_num: int, mime_type: str): + """Suppressed.""" + + def print_extraction_complete(self, chars: int, image_num: int, elapsed_seconds: float, size_kb: float): + """Suppressed.""" + + def print_ready_for_input(self): + """Suppressed.""" + + def print_processing_step(self, step_num: int, total_steps: int, step_name: str, status: str = "running"): + """Suppressed.""" + + def print_processing_pipeline_start(self, filename: str, total_steps: int): + self.start_progress(f"Processing {filename}") + + def print_processing_pipeline_complete(self, filename: str, success: bool, elapsed_seconds: float, patient_name: str = None, is_duplicate: bool = False): + self.stop_progress() + if success: + self.print_info(f"Processed {filename} ({elapsed_seconds:.1f}s)") + else: + self.print_warning(f"Failed to process {filename}") + + def print_checklist(self, items: List[Any], current_idx: int): + """Suppressed.""" + + def print_checklist_reasoning(self, reasoning: str): + """Suppressed.""" + + # === Download methods === + + def print_download_start(self, model_name: str) -> None: + """Show download starting.""" + self.print_info(f"Downloading {model_name}...") + + def print_download_progress( + self, + percent: int, + bytes_downloaded: int, + bytes_total: int, + speed_mbps: float = 0.0, + ) -> None: + """Show download progress inline.""" + import sys as _sys + + if bytes_total > 1024**3: + dl_str = f"{bytes_downloaded / 1024**3:.2f} GB" + total_str = f"{bytes_total / 1024**3:.2f} GB" + elif bytes_total > 1024**2: + dl_str = f"{bytes_downloaded / 1024**2:.0f} MB" + total_str = f"{bytes_total / 1024**2:.0f} MB" + else: + dl_str = f"{bytes_downloaded / 1024:.0f} KB" + total_str = f"{bytes_total / 1024:.0f} KB" + + progress_line = f" {percent:3d}% {dl_str} / {total_str}" + if speed_mbps > 0.1: + progress_line += f" @ {speed_mbps:.0f} MB/s" + _sys.stdout.write(f"\r{progress_line:<60}") + _sys.stdout.flush() + + def print_download_complete(self, model_name: str = None) -> None: + """Show download complete.""" + print() # Newline after progress bar + msg = f"Downloaded {model_name}" if model_name else "Download complete" + self.print_info(msg) + + def print_download_error(self, error_message: str, model_name: str = None) -> None: + """Show download error.""" + print() # Newline after progress bar + msg = f"Download failed for {model_name}: {error_message}" if model_name else f"Download failed: {error_message}" + self.print_error(msg) + + def print_download_skipped(self, model_name: str, reason: str = "already downloaded") -> None: + """Show download skipped.""" + self.print_info(f"{model_name} ({reason})") + + def print(self, *args, **kwargs): + """Direct print pass-through.""" + if RICH_AVAILABLE and self._console: + self._console.print(*args, **kwargs) + else: + print(*args, **kwargs) + + class SilentConsole(OutputHandler): """ A silent console that suppresses all output for JSON-only mode. @@ -2141,7 +2543,7 @@ def print_success(self, message: str): def print_file_created(self, filename: str, size: int = 0, extension: str = ""): """No-op implementation.""" - def print_file_modified(self, filename: str, size: int = 0): + def print_file_modified(self, filename: str): """No-op implementation.""" def print_file_deleted(self, filename: str): diff --git a/src/gaia/agents/blender/agent.py b/src/gaia/agents/blender/agent.py index b0e07326c..7bdf8e178 100644 --- a/src/gaia/agents/blender/agent.py +++ b/src/gaia/agents/blender/agent.py @@ -8,7 +8,6 @@ from typing import Any, Dict, Optional from gaia.agents.base.agent import Agent -from gaia.agents.base.console import AgentConsole from gaia.agents.base.tools import tool from gaia.agents.blender.core.scene import generate_scene_diagnosis_code from gaia.mcp.blender_mcp_client import MCPClient @@ -34,6 +33,9 @@ def __init__( output_dir: str = None, streaming: bool = False, show_stats: bool = True, + verbose: bool = False, + silent_mode: bool = False, + debug: bool = False, ): """ Initialize the BlenderAgent with MCP client and LLM client. @@ -47,6 +49,9 @@ def __init__( output_dir: Directory for storing JSON output files (default: current directory) streaming: If True, enables real-time streaming of LLM responses (default: False) show_stats: If True, displays LLM performance stats after each response (default: True) + verbose: If True, uses full AgentConsole output (default: False) + silent_mode: If True, suppresses all console output (default: False) + debug: If True, enables debug output (default: False) """ # Initialize the MCP client for Blender communication self.mcp = mcp if mcp else MCPClient() @@ -60,20 +65,14 @@ def __init__( output_dir=output_dir, streaming=streaming, show_stats=show_stats, + verbose=verbose, + silent_mode=silent_mode, + debug=debug, ) # Register Blender-specific tools self._register_tools() - def _create_console(self) -> AgentConsole: - """ - Create and return a Agent-specific console output handler. - - Returns: - A AgentConsole instance - """ - return AgentConsole() - def _get_system_prompt(self) -> str: """Generate the system prompt for the Blender agent.""" # Get formatted tools from registry diff --git a/src/gaia/agents/chat/agent.py b/src/gaia/agents/chat/agent.py index f0a659e97..e561b5cef 100644 --- a/src/gaia/agents/chat/agent.py +++ b/src/gaia/agents/chat/agent.py @@ -48,6 +48,7 @@ class ChatAgentConfig: show_prompts: bool = False show_stats: bool = False silent_mode: bool = False + verbose: bool = False # Use --verbose for full AgentConsole output output_dir: Optional[str] = None # RAG settings @@ -169,6 +170,7 @@ def __init__(self, config: Optional[ChatAgentConfig] = None): show_stats=config.show_stats, silent_mode=config.silent_mode, debug=config.debug, + verbose=config.verbose, ) # Index initial documents (only if RAG is available) @@ -364,14 +366,19 @@ def _get_system_prompt(self) -> str: return prompt def _create_console(self): - """Create console for chat agent.""" - from gaia.agents.base.console import SilentConsole + """Create console for chat agent. + + Overrides base to ensure final answer is always shown even in silent mode. + """ + from gaia.agents.base.console import MinimalConsole, SilentConsole if self.silent_mode: # For chat agent, we ALWAYS want to show the final answer # Even in silent mode, the user needs to see the response return SilentConsole(silence_final_answer=False) - return AgentConsole() + if self.verbose: + return AgentConsole() + return MinimalConsole() def _generate_search_keys(self, query: str) -> List[str]: """ diff --git a/src/gaia/agents/code/agent.py b/src/gaia/agents/code/agent.py index 2609a41aa..ca9361652 100644 --- a/src/gaia/agents/code/agent.py +++ b/src/gaia/agents/code/agent.py @@ -172,16 +172,6 @@ def _get_system_prompt(self, _user_input: Optional[str] = None) -> str: """ return get_system_prompt(language=self.language, project_type=self.project_type) - def _create_console(self): - """Create console for Code agent output. - - Returns: - AgentConsole or SilentConsole: Console instance - """ - if self.silent_mode: - return SilentConsole() - return AgentConsole() - def _register_tools(self) -> None: """Register Code-specific tools from mixins.""" # Register all tools from consolidated mixins diff --git a/src/gaia/agents/code/cli.py b/src/gaia/agents/code/cli.py index 4acab411b..af4957c7b 100644 --- a/src/gaia/agents/code/cli.py +++ b/src/gaia/agents/code/cli.py @@ -144,9 +144,11 @@ def cmd_run(args): # Use RoutingAgent to determine language and project type if query: # Prepare agent configuration from CLI args + verbose_mode = getattr(args, "verbose", False) or args.debug agent_config = { "silent_mode": args.silent, "debug": args.debug, + "verbose": verbose_mode, "show_prompts": args.show_prompts, "max_steps": args.max_steps, "use_claude": args.use_claude, @@ -162,9 +164,11 @@ def cmd_run(args): else: # Interactive mode - start with default Python agent # User can still benefit from routing per query + verbose_mode = getattr(args, "verbose", False) or args.debug agent = CodeAgent( silent_mode=args.silent, debug=args.debug, + verbose=verbose_mode, show_prompts=args.show_prompts, max_steps=args.max_steps, use_claude=args.use_claude, @@ -288,6 +292,11 @@ def main(): action="store_true", help="Enable debug logging", ) + parser.add_argument( + "--verbose", + action="store_true", + help="Show detailed agent output (thoughts, goals, plans, tool results). Default is minimal output.", + ) parser.add_argument( "--silent", "-s", diff --git a/src/gaia/agents/docker/agent.py b/src/gaia/agents/docker/agent.py index 80096db51..06950dfb1 100644 --- a/src/gaia/agents/docker/agent.py +++ b/src/gaia/agents/docker/agent.py @@ -150,12 +150,6 @@ def _get_system_prompt(self) -> str: - Include proper copyright header in Dockerfile - Use save_dockerfile to write the Dockerfile you generated""" - def _create_console(self): - """Create console for Docker agent output.""" - if self.silent_mode: - return SilentConsole() - return AgentConsole() - def _register_tools(self): """Register Docker-specific tools.""" diff --git a/src/gaia/agents/jira/agent.py b/src/gaia/agents/jira/agent.py index abb160bf0..ea9e2a870 100644 --- a/src/gaia/agents/jira/agent.py +++ b/src/gaia/agents/jira/agent.py @@ -238,16 +238,6 @@ def _get_system_prompt(self) -> str: return prompt - def _create_console(self): - """Create console for Jira agent output. - - Returns: - AgentConsole or SilentConsole: Console instance based on silent_mode setting - """ - if self.silent_mode: - return SilentConsole() - return AgentConsole() - def initialize(self) -> Dict[str, Any]: """ Discover and cache Jira instance configuration. diff --git a/src/gaia/apps/docker/app.py b/src/gaia/apps/docker/app.py index 91f2b6bc4..be1a8c194 100644 --- a/src/gaia/apps/docker/app.py +++ b/src/gaia/apps/docker/app.py @@ -69,6 +69,7 @@ def __init__( show_stats=self.debug or self.verbose, silent_mode=False, # Always show agent steps for compelling demos debug=self.debug, + verbose=self.verbose, ) # Configure logging based on debug flag diff --git a/src/gaia/apps/jira/app.py b/src/gaia/apps/jira/app.py index 49059ad2e..906da4735 100644 --- a/src/gaia/apps/jira/app.py +++ b/src/gaia/apps/jira/app.py @@ -68,6 +68,7 @@ def __init__( show_stats=self.debug or self.verbose, silent_mode=False, # Always show agent steps for compelling demos debug=self.debug, + verbose=self.verbose, ) # Configure logging based on debug flag diff --git a/src/gaia/cli.py b/src/gaia/cli.py index ec8ca661f..2d6a41770 100644 --- a/src/gaia/cli.py +++ b/src/gaia/cli.py @@ -514,6 +514,9 @@ async def async_main(action, **kwargs): "stream", "no_lemonade_check", "list_tools", + "verbose", + "trace", + "max_steps", } excluded_params = cli_params | audio_params | llm_provider_params client_params = {k: v for k, v in kwargs.items() if k not in excluded_params} @@ -538,11 +541,9 @@ async def async_main(action, **kwargs): from gaia.agents.chat.app import interactive_mode try: - # Use silent mode when debug is off to hide intermediate processing - # SilentConsole will still stream the final answer query = kwargs.get("query") debug_mode = kwargs.get("debug", False) - use_silent_mode = not debug_mode # Hide processing steps unless debugging + verbose_mode = kwargs.get("verbose", False) or debug_mode # Create configuration with CLI values config = ChatAgentConfig( @@ -558,7 +559,7 @@ async def async_main(action, **kwargs): streaming=kwargs.get("stream", False), show_prompts=kwargs.get("show_prompts", False), show_stats=kwargs.get("show_stats", False), - silent_mode=use_silent_mode, + verbose=verbose_mode, debug=debug_mode, rag_documents=kwargs.get("index", []), watch_directories=kwargs.get("watch", []), @@ -750,6 +751,11 @@ def main(): action="store_true", help="Skip Lemonade server check (for CI/testing without Lemonade)", ) + parent_parser.add_argument( + "--verbose", + action="store_true", + help="Show detailed agent output (thoughts, goals, plans, tool results). Default is minimal output.", + ) # Create subparsers for different commands subparsers = parser.add_subparsers(dest="action", help="Action to perform") @@ -933,9 +939,6 @@ def main(): action="store_true", help="Minimal output, suppress progress indicators", ) - summarize_parser.add_argument( - "--verbose", action="store_true", help="Detailed output with debug information" - ) summarize_parser.add_argument( "--combined-prompt", action="store_true", @@ -1079,12 +1082,6 @@ def main(): default=8765, help="MCP bridge port (default: 8765)", ) - jira_parser.add_argument( - "-v", - "--verbose", - action="store_true", - help="Enable verbose output", - ) jira_parser.add_argument( "-d", "--debug", @@ -1108,12 +1105,6 @@ def main(): default=".", help="Directory to analyze/containerize (default: current directory)", ) - docker_parser.add_argument( - "-v", - "--verbose", - action="store_true", - help="Enable verbose output", - ) docker_parser.add_argument( "--debug", action="store_true", @@ -2014,11 +2005,6 @@ def main(): default="gaia.mcp.log", help="Log file path for background mode (default: gaia.mcp.log)", ) - mcp_start_parser.add_argument( - "--verbose", - action="store_true", - help="Enable verbose logging for all HTTP requests", - ) mcp_start_parser.add_argument( "--ctx-size", type=int, @@ -4730,8 +4716,6 @@ def handle_jira_command(args): # Pass the arguments directly to the Jira app # The app expects certain arguments, so we need to ensure they're set - if not hasattr(args, "verbose"): - args.verbose = False if not hasattr(args, "debug"): args.debug = False if not hasattr(args, "model"): @@ -4779,8 +4763,6 @@ def handle_docker_command(args): # Pass the arguments directly to the Docker app # The app expects certain arguments, so we need to ensure they're set - if not hasattr(args, "verbose"): - args.verbose = False if not hasattr(args, "debug"): args.debug = False if not hasattr(args, "model"): @@ -5339,6 +5321,8 @@ def handle_blender_command(args): base_url = getattr(args, "base_url", None) # Create the BlenderAgent + debug_mode = getattr(args, "debug", False) + verbose_mode = getattr(args, "verbose", False) or debug_mode agent = BlenderAgent( mcp=mcp_client, model_id=args.model, @@ -5348,6 +5332,8 @@ def handle_blender_command(args): streaming=args.stream, show_stats=args.show_stats, debug_prompts=args.debug_prompts, + verbose=verbose_mode, + debug=debug_mode, ) # Run in interactive mode if specified