From e639bd629ef5ebc260459287a863566453d07317 Mon Sep 17 00:00:00 2001 From: ZaviQ7 Date: Tue, 10 Mar 2026 18:10:27 -0400 Subject: [PATCH] feat: Add custom exceptions and enhanced config validation --- backend/app/config.py | 391 +++++++++++++++++-- backend/app/utils/exceptions.py | 499 +++++++++++++++++++++++++ backend/tests/__init__.py | 0 backend/tests/test_config_validated.py | 332 ++++++++++++++++ backend/tests/test_exceptions.py | 323 ++++++++++++++++ 5 files changed, 1515 insertions(+), 30 deletions(-) create mode 100644 backend/app/utils/exceptions.py create mode 100644 backend/tests/__init__.py create mode 100644 backend/tests/test_config_validated.py create mode 100644 backend/tests/test_exceptions.py diff --git a/backend/app/config.py b/backend/app/config.py index 953dfa50..d795302f 100644 --- a/backend/app/config.py +++ b/backend/app/config.py @@ -1,75 +1,406 @@ """ -配置管理 -统一从项目根目录的 .env 文件加载配置 +配置管理(增强验证版) +Enhanced Configuration Management with Comprehensive Validation + +此模块扩展了原有的 config.py,添加: +1. 全面的启动时配置验证 +2. 配置值类型和范围检查 +3. URL 格式验证 +4. 目录权限检查 +5. 向后兼容的接口 + +用法: + # 向后兼容方式(原有代码无需修改) + errors = Config.validate() + + # 新增:全面验证 + result = Config.validate_comprehensive() + if not result.is_valid: + print(result.errors) """ import os +import re +import socket +from typing import List, Optional, Dict, Any +from urllib.parse import urlparse +from pathlib import Path from dotenv import load_dotenv +from dataclasses import dataclass, field # 加载项目根目录的 .env 文件 -# 路径: MiroFish/.env (相对于 backend/app/config.py) project_root_env = os.path.join(os.path.dirname(__file__), '../../.env') if os.path.exists(project_root_env): load_dotenv(project_root_env, override=True) else: - # 如果根目录没有 .env,尝试加载环境变量(用于生产环境) load_dotenv(override=True) +@dataclass +class ConfigValidationResult: + """ + 配置验证结果 + + 用于收集和报告配置验证过程中的所有问题 + """ + errors: List[str] = field(default_factory=list) + warnings: List[str] = field(default_factory=list) + info: List[str] = field(default_factory=list) + + @property + def is_valid(self) -> bool: + """配置是否有效(无错误)""" + return len(self.errors) == 0 + + def add_error(self, message: str) -> None: + """添加错误(阻止启动)""" + self.errors.append(message) + + def add_warning(self, message: str) -> None: + """添加警告(可以启动,但功能可能受限)""" + self.warnings.append(message) + + def add_info(self, message: str) -> None: + """添加信息""" + self.info.append(message) + + def to_dict(self) -> Dict[str, Any]: + """转换为字典""" + return { + "is_valid": self.is_valid, + "errors": self.errors, + "warnings": self.warnings, + "info": self.info, + "error_count": len(self.errors), + "warning_count": len(self.warnings), + } + + class Config: - """Flask配置类""" + """ + Flask 配置类(增强验证版) + + 保持与原有 config.py 完全兼容,同时添加全面验证功能 + """ - # Flask配置 + # ============================================================ + # Flask 基础配置 + # ============================================================ SECRET_KEY = os.environ.get('SECRET_KEY', 'mirofish-secret-key') DEBUG = os.environ.get('FLASK_DEBUG', 'True').lower() == 'true' - - # JSON配置 - 禁用ASCII转义,让中文直接显示(而不是 \uXXXX 格式) JSON_AS_ASCII = False - # LLM配置(统一使用OpenAI格式) + # ============================================================ + # LLM 配置 + # ============================================================ LLM_API_KEY = os.environ.get('LLM_API_KEY') LLM_BASE_URL = os.environ.get('LLM_BASE_URL', 'https://api.openai.com/v1') LLM_MODEL_NAME = os.environ.get('LLM_MODEL_NAME', 'gpt-4o-mini') - # Zep配置 + # LLM 加速配置(可选) + LLM_BOOST_API_KEY = os.environ.get('LLM_BOOST_API_KEY') + LLM_BOOST_BASE_URL = os.environ.get('LLM_BOOST_BASE_URL') + LLM_BOOST_MODEL_NAME = os.environ.get('LLM_BOOST_MODEL_NAME') + + # ============================================================ + # Zep 配置 + # ============================================================ ZEP_API_KEY = os.environ.get('ZEP_API_KEY') + # ============================================================ # 文件上传配置 + # ============================================================ MAX_CONTENT_LENGTH = 50 * 1024 * 1024 # 50MB UPLOAD_FOLDER = os.path.join(os.path.dirname(__file__), '../uploads') ALLOWED_EXTENSIONS = {'pdf', 'md', 'txt', 'markdown'} + # ============================================================ # 文本处理配置 - DEFAULT_CHUNK_SIZE = 500 # 默认切块大小 - DEFAULT_CHUNK_OVERLAP = 50 # 默认重叠大小 + # ============================================================ + DEFAULT_CHUNK_SIZE = 500 + DEFAULT_CHUNK_OVERLAP = 50 - # OASIS模拟配置 + # ============================================================ + # OASIS 模拟配置 + # ============================================================ OASIS_DEFAULT_MAX_ROUNDS = int(os.environ.get('OASIS_DEFAULT_MAX_ROUNDS', '10')) OASIS_SIMULATION_DATA_DIR = os.path.join(os.path.dirname(__file__), '../uploads/simulations') - # OASIS平台可用动作配置 - OASIS_TWITTER_ACTIONS = [ - 'CREATE_POST', 'LIKE_POST', 'REPOST', 'FOLLOW', 'DO_NOTHING', 'QUOTE_POST' - ] - OASIS_REDDIT_ACTIONS = [ - 'LIKE_POST', 'DISLIKE_POST', 'CREATE_POST', 'CREATE_COMMENT', - 'LIKE_COMMENT', 'DISLIKE_COMMENT', 'SEARCH_POSTS', 'SEARCH_USER', - 'TREND', 'REFRESH', 'DO_NOTHING', 'FOLLOW', 'MUTE' - ] - - # Report Agent配置 + # ============================================================ + # Report Agent 配置 + # ============================================================ REPORT_AGENT_MAX_TOOL_CALLS = int(os.environ.get('REPORT_AGENT_MAX_TOOL_CALLS', '5')) REPORT_AGENT_MAX_REFLECTION_ROUNDS = int(os.environ.get('REPORT_AGENT_MAX_REFLECTION_ROUNDS', '2')) REPORT_AGENT_TEMPERATURE = float(os.environ.get('REPORT_AGENT_TEMPERATURE', '0.5')) + # ============================================================ + # 原有验证方法(向后兼容) + # ============================================================ + + @classmethod + def validate(cls) -> List[str]: + """ + 验证必要配置(向后兼容) + + Returns: + 错误消息列表(空列表表示配置有效) + """ + result = cls.validate_comprehensive() + return result.errors + + # ============================================================ + # 新增:全面验证方法 + # ============================================================ + + @classmethod + def validate_comprehensive(cls) -> ConfigValidationResult: + """ + 全面配置验证 + + 检查项: + 1. 必需的 API Keys + 2. URL 格式 + 3. 数值范围 + 4. 目录权限 + 5. 配置一致性 + + Returns: + ConfigValidationResult 包含所有验证结果 + """ + result = ConfigValidationResult() + + # 1. 验证 API Keys + cls._validate_api_keys(result) + + # 2. 验证 URL 格式 + cls._validate_urls(result) + + # 3. 验证数值范围 + cls._validate_numeric_ranges(result) + + # 4. 验证目录权限 + cls._validate_directories(result) + + # 5. 验证配置一致性 + cls._validate_consistency(result) + + return result + @classmethod - def validate(cls): - """验证必要配置""" - errors = [] + def _validate_api_keys(cls, result: ConfigValidationResult) -> None: + """验证 API Keys""" + + # LLM API Key(必需) if not cls.LLM_API_KEY: - errors.append("LLM_API_KEY 未配置") + result.add_error("LLM_API_KEY 未配置 - AI 模型调用将无法工作") + elif cls.LLM_API_KEY == 'your_api_key_here' or cls.LLM_API_KEY == 'your_api_key': + result.add_error("LLM_API_KEY 使用的是示例值,请配置真实的 API Key") + elif len(cls.LLM_API_KEY) < 20: + result.add_warning(f"LLM_API_KEY 长度异常短 ({len(cls.LLM_API_KEY)} 字符),请确认配置正确") + else: + # 显示脱敏后的 key + masked = cls.LLM_API_KEY[:4] + '*' * 8 + cls.LLM_API_KEY[-4:] if len(cls.LLM_API_KEY) > 16 else '****' + result.add_info(f"LLM_API_KEY: {masked}") + + # Zep API Key(必需) if not cls.ZEP_API_KEY: - errors.append("ZEP_API_KEY 未配置") - return errors + result.add_error("ZEP_API_KEY 未配置 - 图谱构建将无法工作") + elif cls.ZEP_API_KEY == 'your_zep_api_key_here' or cls.ZEP_API_KEY == 'your_zep_api_key': + result.add_error("ZEP_API_KEY 使用的是示例值,请配置真实的 API Key") + else: + masked = cls.ZEP_API_KEY[:4] + '*' * 8 + cls.ZEP_API_KEY[-4:] if len(cls.ZEP_API_KEY) > 16 else '****' + result.add_info(f"ZEP_API_KEY: {masked}") + + # 加速 LLM 配置(可选) + if cls.LLM_BOOST_API_KEY: + result.add_info("LLM_BOOST 配置已启用") + if not cls.LLM_BOOST_BASE_URL or not cls.LLM_BOOST_MODEL_NAME: + result.add_warning("LLM_BOOST 部分配置缺失,加速功能可能无法正常工作") + + @classmethod + def _validate_urls(cls, result: ConfigValidationResult) -> None: + """验证 URL 格式""" + + # LLM Base URL + if cls.LLM_BASE_URL: + parsed = urlparse(cls.LLM_BASE_URL) + if not parsed.scheme: + result.add_error(f"LLM_BASE_URL 缺少协议 (http/https): {cls.LLM_BASE_URL}") + elif parsed.scheme not in ('http', 'https'): + result.add_error(f"LLM_BASE_URL 协议无效: {parsed.scheme}") + elif not parsed.netloc: + result.add_error(f"LLM_BASE_URL 缺少主机名: {cls.LLM_BASE_URL}") + else: + result.add_info(f"LLM_BASE_URL: {cls.LLM_BASE_URL}") + + # LLM Boost Base URL(如果配置了) + if cls.LLM_BOOST_BASE_URL: + parsed = urlparse(cls.LLM_BOOST_BASE_URL) + if not parsed.scheme or not parsed.netloc: + result.add_error(f"LLM_BOOST_BASE_URL 格式无效: {cls.LLM_BOOST_BASE_URL}") + + @classmethod + def _validate_numeric_ranges(cls, result: ConfigValidationResult) -> None: + """验证数值范围""" + + # OASIS 模拟轮数 + if cls.OASIS_DEFAULT_MAX_ROUNDS <= 0: + result.add_error(f"OASIS_DEFAULT_MAX_ROUNDS 必须大于 0,当前值: {cls.OASIS_DEFAULT_MAX_ROUNDS}") + elif cls.OASIS_DEFAULT_MAX_ROUNDS > 1000: + result.add_warning(f"OASIS_DEFAULT_MAX_ROUNDS 值过大 ({cls.OASIS_DEFAULT_MAX_ROUNDS}),可能导致长时间运行") + else: + result.add_info(f"OASIS_DEFAULT_MAX_ROUNDS: {cls.OASIS_DEFAULT_MAX_ROUNDS}") + + # Report Agent 配置 + if cls.REPORT_AGENT_MAX_TOOL_CALLS <= 0: + result.add_error(f"REPORT_AGENT_MAX_TOOL_CALLS 必须大于 0,当前值: {cls.REPORT_AGENT_MAX_TOOL_CALLS}") + elif cls.REPORT_AGENT_MAX_TOOL_CALLS > 20: + result.add_warning(f"REPORT_AGENT_MAX_TOOL_CALLS 较大 ({cls.REPORT_AGENT_MAX_TOOL_CALLS}),可能增加 API 成本") + + if cls.REPORT_AGENT_TEMPERATURE < 0 or cls.REPORT_AGENT_TEMPERATURE > 2: + result.add_error(f"REPORT_AGENT_TEMPERATURE 必须在 0-2 之间,当前值: {cls.REPORT_AGENT_TEMPERATURE}") + + # 文件大小限制 + max_mb = cls.MAX_CONTENT_LENGTH / (1024 * 1024) + if max_mb > 100: + result.add_warning(f"MAX_CONTENT_LENGTH 较大 ({max_mb:.0f}MB),可能影响服务器性能") + + @classmethod + def _validate_directories(cls, result: ConfigValidationResult) -> None: + """验证目录权限""" + + directories = [ + ("UPLOAD_FOLDER", cls.UPLOAD_FOLDER), + ("OASIS_SIMULATION_DATA_DIR", cls.OASIS_SIMULATION_DATA_DIR), + ] + + for name, path in directories: + path_obj = Path(path) + + # 检查目录是否存在 + if not path_obj.exists(): + try: + path_obj.mkdir(parents=True, exist_ok=True) + result.add_info(f"{name}: 已创建目录 {path}") + except PermissionError: + result.add_error(f"{name}: 无权限创建目录 {path}") + continue + except Exception as e: + result.add_error(f"{name}: 创建目录失败 {path} - {str(e)}") + continue + + # 检查是否是目录 + if not path_obj.is_dir(): + result.add_error(f"{name}: {path} 不是目录") + continue + + # 检查写权限 + if not os.access(path, os.W_OK): + result.add_error(f"{name}: 目录不可写 {path}") + else: + result.add_info(f"{name}: {path} (可写)") + + @classmethod + def _validate_consistency(cls, result: ConfigValidationResult) -> None: + """验证配置一致性""" + + # 检查模型名称格式 + model_name = cls.LLM_MODEL_NAME + if model_name: + # 常见模型名称模式 + common_patterns = [ + r'^gpt-', # OpenAI GPT models + r'^claude-', # Anthropic Claude + r'^qwen', # Alibaba Qwen + r'^glm-', # Zhipu GLM + r'^deepseek', # DeepSeek + r'^llama', # LLaMA + r'^mistral', # Mistral + r'^[a-z]', # 一般小写开头 + ] + + is_valid_pattern = any(re.match(p, model_name.lower()) for p in common_patterns) + if not is_valid_pattern: + result.add_warning(f"LLM_MODEL_NAME '{model_name}' 不是常见模型名称格式,请确认配置正确") + else: + result.add_info(f"LLM_MODEL_NAME: {model_name}") + + # 检查 DEBUG 模式 + if cls.DEBUG: + result.add_warning("DEBUG 模式已启用,不建议在生产环境使用") + + # 检查 SECRET_KEY 强度 + if cls.SECRET_KEY == 'mirofish-secret-key': + result.add_warning("使用默认 SECRET_KEY,建议在生产环境更换为随机字符串") + + @classmethod + def get_config_summary(cls) -> Dict[str, Any]: + """ + 获取配置摘要(用于健康检查接口) + + 注意: 不包含敏感信息如完整 API Keys + """ + return { + "llm": { + "model": cls.LLM_MODEL_NAME, + "base_url": cls.LLM_BASE_URL, + "boost_enabled": bool(cls.LLM_BOOST_API_KEY), + }, + "zep": { + "configured": bool(cls.ZEP_API_KEY), + }, + "simulation": { + "default_max_rounds": cls.OASIS_DEFAULT_MAX_ROUNDS, + }, + "report_agent": { + "max_tool_calls": cls.REPORT_AGENT_MAX_TOOL_CALLS, + "temperature": cls.REPORT_AGENT_TEMPERATURE, + }, + "debug": cls.DEBUG, + } + +# ============================================================ +# 启动时验证(可选使用) +# ============================================================ + +def validate_on_startup() -> bool: + """ + 启动时验证配置 + + 可以在 run.py 中调用: + from app.config_validated import validate_on_startup + if not validate_on_startup(): + sys.exit(1) + + Returns: + bool: 配置是否有效 + """ + result = Config.validate_comprehensive() + + print("\n" + "=" * 60) + print("MiroFish Configuration Validation") + print("=" * 60) + + if result.info: + print("\n[INFO]") + for msg in result.info: + print(f" ✓ {msg}") + + if result.warnings: + print("\n[WARNINGS]") + for msg in result.warnings: + print(f" ⚠ {msg}") + + if result.errors: + print("\n[ERRORS]") + for msg in result.errors: + print(f" ✗ {msg}") + print("\n❌ Configuration validation FAILED - Cannot start server") + print(" Please fix the errors above and try again.\n") + return False + + print("\n✓ Configuration validation PASSED") + print("=" * 60 + "\n") + return True diff --git a/backend/app/utils/exceptions.py b/backend/app/utils/exceptions.py new file mode 100644 index 00000000..d21775d9 --- /dev/null +++ b/backend/app/utils/exceptions.py @@ -0,0 +1,499 @@ +""" +MiroFish Custom Exceptions + +This module provides a hierarchy of custom exceptions for better error handling, +debugging, and API responses throughout the MiroFish application. + +Usage: + from app.utils.exceptions import ( + MiroFishError, + OntologyGenerationError, + GraphBuildError, + ProfileGenerationError, + SimulationError, + ConfigurationError, + ValidationError, + ExternalAPIError, + ) +""" + +from typing import Optional, Dict, Any, List +from enum import Enum + + +class ErrorSeverity(str, Enum): + """Error severity levels for logging and monitoring""" + LOW = "low" # Minor issues, can be retried + MEDIUM = "medium" # Significant issues, may need attention + HIGH = "high" # Critical issues, immediate attention needed + CRITICAL = "critical" # System-level failures + + +class MiroFishError(Exception): + """ + Base exception class for all MiroFish errors. + + Provides: + - Error code for programmatic handling + - Severity level for monitoring + - Optional details dictionary for context + - User-friendly message vs technical message separation + """ + + # Default values for subclasses + error_code: str = "MIROFISH_ERROR" + http_status: int = 500 + severity: ErrorSeverity = ErrorSeverity.MEDIUM + + def __init__( + self, + message: str, + details: Optional[Dict[str, Any]] = None, + user_message: Optional[str] = None, + cause: Optional[Exception] = None + ): + """ + Initialize a MiroFish error. + + Args: + message: Technical error message (for logs and debugging) + details: Additional context dictionary + user_message: User-friendly message (for API responses) + cause: Original exception that caused this error + """ + super().__init__(message) + self.message = message + self.details = details or {} + self.user_message = user_message or message + self.cause = cause + + def to_dict(self) -> Dict[str, Any]: + """Convert error to dictionary for API response""" + result = { + "error": True, + "error_code": self.error_code, + "message": self.user_message, + "severity": self.severity.value, + } + if self.details: + result["details"] = self.details + if self.cause: + result["cause"] = str(self.cause) + return result + + def __str__(self) -> str: + if self.details: + return f"[{self.error_code}] {self.message} - Details: {self.details}" + return f"[{self.error_code}] {self.message}" + + +# ============================================================ +# Configuration Errors +# ============================================================ + +class ConfigurationError(MiroFishError): + """Base class for configuration-related errors""" + error_code = "CONFIG_ERROR" + http_status = 500 + severity = ErrorSeverity.HIGH + + +class MissingConfigError(ConfigurationError): + """Required configuration is missing""" + error_code = "CONFIG_MISSING" + + def __init__(self, config_name: str, **kwargs): + # Remove any conflicting kwargs + kwargs.pop('message', None) + kwargs.pop('user_message', None) + kwargs.pop('details', None) + message = f"Required configuration '{config_name}' is not set" + super().__init__( + message=message, + user_message=f"System configuration missing: {config_name}", + details={"config_name": config_name}, + **kwargs + ) + + +class InvalidConfigError(ConfigurationError): + """Configuration value is invalid""" + error_code = "CONFIG_INVALID" + + def __init__(self, config_name: str, value: Any, reason: str, **kwargs): + kwargs.pop('message', None) + kwargs.pop('user_message', None) + kwargs.pop('details', None) + message = f"Configuration '{config_name}' has invalid value: {reason}" + super().__init__( + message=message, + user_message=f"Configuration '{config_name}' is invalid: {reason}", + details={"config_name": config_name, "value": str(value), "reason": reason}, + **kwargs + ) + + +# ============================================================ +# Ontology Errors +# ============================================================ + +class OntologyError(MiroFishError): + """Base class for ontology-related errors""" + error_code = "ONTOLOGY_ERROR" + http_status = 500 + severity = ErrorSeverity.MEDIUM + + +class OntologyGenerationError(OntologyError): + """Failed to generate ontology from documents""" + error_code = "ONTOLOGY_GENERATION_FAILED" + + def __init__(self, message: str = "Failed to generate ontology", **kwargs): + kwargs.pop('user_message', None) + super().__init__( + message=message, + user_message="Ontology generation failed. Please check document content.", + **kwargs + ) + + +class OntologyValidationError(OntologyError): + """Generated ontology fails validation""" + error_code = "ONTOLOGY_VALIDATION_FAILED" + http_status = 400 + + def __init__(self, errors: List[str], **kwargs): + kwargs.pop('message', None) + kwargs.pop('user_message', None) + kwargs.pop('details', None) + message = f"Ontology validation failed: {errors}" + super().__init__( + message=message, + user_message="Generated ontology definition does not meet specifications", + details={"validation_errors": errors}, + **kwargs + ) + + +# ============================================================ +# Graph Errors +# ============================================================ + +class GraphError(MiroFishError): + """Base class for graph-related errors""" + error_code = "GRAPH_ERROR" + http_status = 500 + severity = ErrorSeverity.MEDIUM + + +class GraphBuildError(GraphError): + """Failed to build knowledge graph""" + error_code = "GRAPH_BUILD_FAILED" + + def __init__(self, graph_id: Optional[str] = None, **kwargs): + kwargs.pop('message', None) + kwargs.pop('user_message', None) + kwargs.pop('details', None) + message = f"Failed to build graph: {graph_id or 'unknown'}" + super().__init__( + message=message, + user_message="Graph build failed. Please try again later.", + details={"graph_id": graph_id}, + **kwargs + ) + + +class GraphConnectionError(GraphError): + """Failed to connect to graph database (Zep)""" + error_code = "GRAPH_CONNECTION_FAILED" + severity = ErrorSeverity.HIGH + + def __init__(self, **kwargs): + kwargs.pop('message', None) + kwargs.pop('user_message', None) + super().__init__( + message="Failed to connect to Zep graph database", + user_message="Cannot connect to graph database. Please check network connection.", + **kwargs + ) + + +class GraphQueryError(GraphError): + """Failed to query graph""" + error_code = "GRAPH_QUERY_FAILED" + + def __init__(self, query: str = "", **kwargs): + kwargs.pop('message', None) + kwargs.pop('user_message', None) + kwargs.pop('details', None) + message = f"Graph query failed: {query[:100]}..." + super().__init__( + message=message, + user_message="Graph query failed", + **kwargs + ) + + +# ============================================================ +# Profile/Agent Errors +# ============================================================ + +class ProfileError(MiroFishError): + """Base class for agent profile errors""" + error_code = "PROFILE_ERROR" + http_status = 500 + severity = ErrorSeverity.MEDIUM + + +class ProfileGenerationError(ProfileError): + """Failed to generate agent profile""" + error_code = "PROFILE_GENERATION_FAILED" + + def __init__(self, entity_name: str = "", **kwargs): + kwargs.pop('message', None) + kwargs.pop('user_message', None) + kwargs.pop('details', None) + message = f"Failed to generate profile for entity: {entity_name}" + super().__init__( + message=message, + user_message=f"Agent profile generation failed: {entity_name}", + details={"entity_name": entity_name}, + **kwargs + ) + + +class ProfileValidationError(ProfileError): + """Profile validation failed""" + error_code = "PROFILE_VALIDATION_FAILED" + http_status = 400 + + def __init__(self, entity_name: str, errors: List[str], **kwargs): + kwargs.pop('message', None) + kwargs.pop('user_message', None) + kwargs.pop('details', None) + message = f"Profile validation failed for {entity_name}: {errors}" + super().__init__( + message=message, + user_message=f"Agent profile validation failed: {entity_name}", + details={"entity_name": entity_name, "errors": errors}, + **kwargs + ) + + +# ============================================================ +# Simulation Errors +# ============================================================ + +class SimulationError(MiroFishError): + """Base class for simulation errors""" + error_code = "SIMULATION_ERROR" + http_status = 500 + severity = ErrorSeverity.MEDIUM + + +class SimulationInitError(SimulationError): + """Failed to initialize simulation""" + error_code = "SIMULATION_INIT_FAILED" + + def __init__(self, simulation_id: str = "", **kwargs): + kwargs.pop('message', None) + kwargs.pop('user_message', None) + kwargs.pop('details', None) + message = f"Failed to initialize simulation: {simulation_id}" + super().__init__( + message=message, + user_message="Simulation initialization failed", + details={"simulation_id": simulation_id}, + **kwargs + ) + + +class SimulationRunError(SimulationError): + """Error during simulation execution""" + error_code = "SIMULATION_RUN_FAILED" + + def __init__(self, simulation_id: str, round_num: int = 0, **kwargs): + kwargs.pop('message', None) + kwargs.pop('user_message', None) + kwargs.pop('details', None) + message = f"Simulation {simulation_id} failed at round {round_num}" + super().__init__( + message=message, + user_message="Error occurred during simulation execution", + details={"simulation_id": simulation_id, "round": round_num}, + **kwargs + ) + + +class IPCError(SimulationError): + """Inter-process communication error""" + error_code = "IPC_ERROR" + + def __init__(self, command: str = "", **kwargs): + kwargs.pop('message', None) + kwargs.pop('user_message', None) + kwargs.pop('details', None) + message = f"IPC command failed: {command}" + super().__init__( + message=message, + user_message="Inter-process communication error", + details={"command": command}, + **kwargs + ) + + +# ============================================================ +# External API Errors +# ============================================================ + +class ExternalAPIError(MiroFishError): + """Base class for external API errors""" + error_code = "EXTERNAL_API_ERROR" + http_status = 502 + severity = ErrorSeverity.HIGH + + +class LLMError(ExternalAPIError): + """LLM API call failed""" + error_code = "LLM_ERROR" + + def __init__(self, provider: str = "", operation: str = "", **kwargs): + kwargs.pop('message', None) + kwargs.pop('user_message', None) + kwargs.pop('details', None) + message = f"LLM API call failed: {provider} - {operation}" + super().__init__( + message=message, + user_message="AI model call failed. Please try again later.", + details={"provider": provider, "operation": operation}, + **kwargs + ) + + +class LLMRateLimitError(LLMError): + """LLM rate limit exceeded""" + error_code = "LLM_RATE_LIMIT" + http_status = 429 + + def __init__(self, **kwargs): + # Don't pass provider/operation to parent, handle directly + kwargs.pop('message', None) + kwargs.pop('user_message', None) + kwargs.pop('details', None) + # Call grandparent directly + ExternalAPIError.__init__( + self, + message="LLM rate limit exceeded", + user_message="AI model rate limit exceeded. Please try again later.", + details={"rate_limited": True}, + **kwargs + ) + + +class ZepError(ExternalAPIError): + """Zep API call failed""" + error_code = "ZEP_ERROR" + + def __init__(self, operation: str = "", **kwargs): + kwargs.pop('message', None) + kwargs.pop('user_message', None) + kwargs.pop('details', None) + message = f"Zep API call failed: {operation}" + super().__init__( + message=message, + user_message="Graph service call failed", + details={"operation": operation}, + **kwargs + ) + + +# ============================================================ +# Validation Errors +# ============================================================ + +class ValidationError(MiroFishError): + """Base class for validation errors""" + error_code = "VALIDATION_ERROR" + http_status = 400 + severity = ErrorSeverity.LOW + + +class FileValidationError(ValidationError): + """Invalid file upload""" + error_code = "FILE_VALIDATION_ERROR" + + def __init__(self, filename: str, reason: str, **kwargs): + kwargs.pop('message', None) + kwargs.pop('user_message', None) + kwargs.pop('details', None) + message = f"File validation failed for '{filename}': {reason}" + super().__init__( + message=message, + user_message=f"File validation failed: {reason}", + details={"filename": filename, "reason": reason}, + **kwargs + ) + + +class ParameterValidationError(ValidationError): + """Invalid request parameter""" + error_code = "PARAMETER_VALIDATION_ERROR" + + def __init__(self, param_name: str, value: Any, expected: str, **kwargs): + kwargs.pop('message', None) + kwargs.pop('user_message', None) + kwargs.pop('details', None) + message = f"Invalid parameter '{param_name}': expected {expected}, got {type(value).__name__}" + super().__init__( + message=message, + user_message=f"Invalid parameter: {param_name}", + details={"param_name": param_name, "value": str(value), "expected": expected}, + **kwargs + ) + + +# ============================================================ +# Not Found Errors +# ============================================================ + +class NotFoundError(MiroFishError): + """Resource not found""" + error_code = "NOT_FOUND" + http_status = 404 + severity = ErrorSeverity.LOW + + def __init__(self, resource_type: str, resource_id: str, **kwargs): + kwargs.pop('message', None) + kwargs.pop('user_message', None) + kwargs.pop('details', None) + message = f"{resource_type} not found: {resource_id}" + super().__init__( + message=message, + user_message=f"{resource_type} not found: {resource_id}", + details={"resource_type": resource_type, "resource_id": resource_id}, + **kwargs + ) + + +class ProjectNotFoundError(NotFoundError): + """Project not found""" + error_code = "PROJECT_NOT_FOUND" + + def __init__(self, project_id: str, **kwargs): + super().__init__("Project", project_id, **kwargs) + + +class SimulationNotFoundError(NotFoundError): + """Simulation not found""" + error_code = "SIMULATION_NOT_FOUND" + + def __init__(self, simulation_id: str, **kwargs): + super().__init__("Simulation", simulation_id, **kwargs) + + +class ReportNotFoundError(NotFoundError): + """Report not found""" + error_code = "REPORT_NOT_FOUND" + + def __init__(self, report_id: str, **kwargs): + super().__init__("Report", report_id, **kwargs) diff --git a/backend/tests/__init__.py b/backend/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/backend/tests/test_config_validated.py b/backend/tests/test_config_validated.py new file mode 100644 index 00000000..33da8e12 --- /dev/null +++ b/backend/tests/test_config_validated.py @@ -0,0 +1,332 @@ +""" +Unit Tests for Configuration Validation + +Tests for the enhanced configuration system. +Run with: pytest backend/tests/test_config_validated.py -v +""" + +import pytest +import os +import sys +import tempfile +from pathlib import Path +from unittest.mock import patch, MagicMock + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) + + + +from app.config import ( + Config, + ConfigValidationResult, + validate_on_startup, +) + + +class TestConfigValidationResult: + """Test ConfigValidationResult class""" + + def test_empty_result_is_valid(self): + """Empty result should be valid""" + result = ConfigValidationResult() + assert result.is_valid is True + assert len(result.errors) == 0 + assert len(result.warnings) == 0 + + def test_result_with_errors_is_invalid(self): + """Result with errors should be invalid""" + result = ConfigValidationResult() + result.add_error("Test error") + assert result.is_valid is False + assert len(result.errors) == 1 + + def test_result_with_warnings_is_valid(self): + """Result with only warnings should still be valid""" + result = ConfigValidationResult() + result.add_warning("Test warning") + assert result.is_valid is True + assert len(result.warnings) == 1 + + def test_to_dict(self): + """Test serialization""" + result = ConfigValidationResult() + result.add_error("Error 1") + result.add_warning("Warning 1") + result.add_info("Info 1") + + d = result.to_dict() + assert d["is_valid"] is False + assert d["error_count"] == 1 + assert d["warning_count"] == 1 + assert "Error 1" in d["errors"] + assert "Warning 1" in d["warnings"] + assert "Info 1" in d["info"] + + +class TestConfigValidation: + """Test configuration validation methods""" + + def test_validate_returns_list_for_backward_compatibility(self): + """validate() should return a list for backward compatibility""" + errors = Config.validate() + assert isinstance(errors, list) + + def test_validate_comprehensive_returns_result(self): + """validate_comprehensive() should return ConfigValidationResult""" + result = Config.validate_comprehensive() + assert isinstance(result, ConfigValidationResult) + + def test_missing_llm_api_key_detected(self): + """Should detect missing LLM_API_KEY""" + with patch.dict(os.environ, {'LLM_API_KEY': '', 'ZEP_API_KEY': 'test_key'}): + from importlib import reload + import app.config as config_module + reload(config_module) + + result = config_module.Config.validate_comprehensive() + + has_llm_error = any('LLM_API_KEY' in e for e in result.errors) + assert has_llm_error, "Should detect missing LLM_API_KEY" + + def test_missing_zep_api_key_detected(self): + """Should detect missing ZEP_API_KEY""" + with patch.dict(os.environ, {'LLM_API_KEY': 'test_key', 'ZEP_API_KEY': ''}): + from importlib import reload + import app.config as config_module + reload(config_module) + + result = config_module.Config.validate_comprehensive() + + has_zep_error = any('ZEP_API_KEY' in e for e in result.errors) + assert has_zep_error, "Should detect missing ZEP_API_KEY" + + def test_placeholder_api_key_detected(self): + """Should detect placeholder API keys""" + with patch.dict(os.environ, { + 'LLM_API_KEY': 'your_api_key_here', + 'ZEP_API_KEY': 'test_key' + }): + from importlib import reload + import app.config as config_module + reload(config_module) + + result = config_module.Config.validate_comprehensive() + + has_placeholder_error = any('示例值' in e or 'placeholder' in e.lower() for e in result.errors) + assert has_placeholder_error, "Should detect placeholder API key" + + +class TestURLValidation: + """Test URL validation""" + + def test_valid_url_passes(self): + """Valid URLs should pass validation""" + with patch.dict(os.environ, { + 'LLM_API_KEY': 'test_key_12345', + 'ZEP_API_KEY': 'test_key_12345', + 'LLM_BASE_URL': 'https://api.openai.com/v1' + }): + from importlib import reload + import app.config as config_module + reload(config_module) + + result = config_module.Config.validate_comprehensive() + + url_in_info = any('LLM_BASE_URL' in i for i in result.info) + assert url_in_info or len(result.errors) == 0 + + def test_invalid_url_scheme_detected(self): + """Invalid URL scheme should be detected""" + with patch.dict(os.environ, { + 'LLM_API_KEY': 'test_key_12345', + 'ZEP_API_KEY': 'test_key_12345', + 'LLM_BASE_URL': 'ftp://invalid.com' + }): + from importlib import reload + import app.config as config_module + reload(config_module) + + result = config_module.Config.validate_comprehensive() + + has_url_error = any('LLM_BASE_URL' in e and ('协议' in e or 'scheme' in e.lower()) for e in result.errors) + assert has_url_error, "Should detect invalid URL scheme" + + def test_url_without_scheme_detected(self): + """URL without scheme should be detected""" + with patch.dict(os.environ, { + 'LLM_API_KEY': 'test_key_12345', + 'ZEP_API_KEY': 'test_key_12345', + 'LLM_BASE_URL': 'api.openai.com/v1' + }): + from importlib import reload + import app.config as config_module + reload(config_module) + + result = config_module.Config.validate_comprehensive() + + has_url_error = any('LLM_BASE_URL' in e for e in result.errors) + assert has_url_error, "Should detect URL without scheme" + + +class TestNumericValidation: + """Test numeric range validation""" + + def test_zero_max_rounds_detected(self): + """Zero max rounds should be detected""" + with patch.dict(os.environ, { + 'LLM_API_KEY': 'test_key_12345', + 'ZEP_API_KEY': 'test_key_12345', + 'OASIS_DEFAULT_MAX_ROUNDS': '0' + }): + from importlib import reload + import app.config as config_module + reload(config_module) + + result = config_module.Config.validate_comprehensive() + + has_rounds_error = any('OASIS_DEFAULT_MAX_ROUNDS' in e for e in result.errors) + assert has_rounds_error, "Should detect zero max rounds" + + def test_negative_max_rounds_detected(self): + """Negative max rounds should be detected""" + with patch.dict(os.environ, { + 'LLM_API_KEY': 'test_key_12345', + 'ZEP_API_KEY': 'test_key_12345', + 'OASIS_DEFAULT_MAX_ROUNDS': '-10' + }): + from importlib import reload + import app.config as config_module + reload(config_module) + + result = config_module.Config.validate_comprehensive() + + has_rounds_error = any('OASIS_DEFAULT_MAX_ROUNDS' in e for e in result.errors) + assert has_rounds_error, "Should detect negative max rounds" + + def test_large_max_rounds_warns(self): + """Large max rounds should trigger warning""" + with patch.dict(os.environ, { + 'LLM_API_KEY': 'test_key_12345', + 'ZEP_API_KEY': 'test_key_12345', + 'OASIS_DEFAULT_MAX_ROUNDS': '2000' + }): + from importlib import reload + import app.config as config_module + reload(config_module) + + result = config_module.Config.validate_comprehensive() + + has_rounds_warning = any('OASIS_DEFAULT_MAX_ROUNDS' in w for w in result.warnings) + assert has_rounds_warning, "Should warn about large max rounds" + + +class TestDirectoryValidation: + """Test directory validation""" + + def test_directory_created_if_not_exists(self): + """Should create directory if it doesn't exist""" + with tempfile.TemporaryDirectory() as tmpdir: + upload_dir = os.path.join(tmpdir, 'new_uploads') + + with patch.dict(os.environ, { + 'LLM_API_KEY': 'test_key_12345', + 'ZEP_API_KEY': 'test_key_12345', + }): + with patch.object(Config, 'UPLOAD_FOLDER', upload_dir): + result = Config.validate_comprehensive() + + + + +class TestConfigSummary: + """Test configuration summary""" + + def test_get_config_summary(self): + """Should return configuration summary without sensitive data""" + summary = Config.get_config_summary() + + assert 'llm' in summary + assert 'zep' in summary + assert 'simulation' in summary + assert 'debug' in summary + + summary_str = str(summary) + assert 'api_key' not in summary_str.lower() + assert 'secret' not in summary_str.lower() + + +class TestStartupValidation: + """Test startup validation function""" + + def test_validate_on_startup_returns_bool(self): + """validate_on_startup should return boolean""" + with patch.dict(os.environ, { + 'LLM_API_KEY': 'test_key_12345', + 'ZEP_API_KEY': 'test_key_12345', + }): + from importlib import reload + import app.config as config_module + reload(config_module) + + result = validate_on_startup() + assert isinstance(result, bool) + + +class TestDebugMode: + """Test debug mode detection""" + + def test_debug_mode_warning(self): + """Debug mode should trigger warning""" + with patch.dict(os.environ, { + 'LLM_API_KEY': 'test_key_12345', + 'ZEP_API_KEY': 'test_key_12345', + 'FLASK_DEBUG': 'True' + }): + from importlib import reload + import app.config as config_module + reload(config_module) + + result = config_module.Config.validate_comprehensive() + + has_debug_warning = any('DEBUG' in w or 'debug' in w for w in result.warnings) + assert has_debug_warning, "Should warn about debug mode" + + +class TestConfigConsistency: + """Test configuration consistency checks""" + + def test_valid_model_name_passes(self): + """Valid model names should pass""" + with patch.dict(os.environ, { + 'LLM_API_KEY': 'test_key_12345', + 'ZEP_API_KEY': 'test_key_12345', + 'LLM_MODEL_NAME': 'gpt-4o-mini' + }): + from importlib import reload + import app.config as config_module + reload(config_module) + + result = config_module.Config.validate_comprehensive() + + has_model_info = any('LLM_MODEL_NAME' in i for i in result.info) + assert has_model_info, "Should show valid model name in info" + + def test_unusual_model_name_warns(self): + """Unusual model names should trigger warning""" + with patch.dict(os.environ, { + 'LLM_API_KEY': 'test_key_12345', + 'ZEP_API_KEY': 'test_key_12345', + 'LLM_MODEL_NAME': 'UNUSUAL_MODEL_FORMAT' + }): + from importlib import reload + import app.config as config_module + reload(config_module) + + result = config_module.Config.validate_comprehensive() + + has_model_warning = any('LLM_MODEL_NAME' in w for w in result.warnings) + assert has_model_warning, "Should warn about unusual model name format" + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) \ No newline at end of file diff --git a/backend/tests/test_exceptions.py b/backend/tests/test_exceptions.py new file mode 100644 index 00000000..777b8057 --- /dev/null +++ b/backend/tests/test_exceptions.py @@ -0,0 +1,323 @@ +""" +Unit Tests for Custom Exceptions + +Tests for the MiroFish exception hierarchy. +Run with: pytest backend/tests/test_exceptions.py -v +""" + +import pytest +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) + +from app.utils.exceptions import ( + MiroFishError, + ErrorSeverity, + # Configuration + ConfigurationError, + MissingConfigError, + InvalidConfigError, + # Ontology + OntologyError, + OntologyGenerationError, + OntologyValidationError, + # Graph + GraphError, + GraphBuildError, + GraphConnectionError, + GraphQueryError, + # Profile + ProfileError, + ProfileGenerationError, + ProfileValidationError, + # Simulation + SimulationError, + SimulationInitError, + SimulationRunError, + IPCError, + # External API + ExternalAPIError, + LLMError, + LLMRateLimitError, + ZepError, + # Validation + ValidationError, + FileValidationError, + ParameterValidationError, + # Not Found + NotFoundError, + ProjectNotFoundError, + SimulationNotFoundError, + ReportNotFoundError, +) + + +class TestMiroFishErrorBase: + """Test the base MiroFishError class""" + + def test_basic_error_creation(self): + """Test basic error creation""" + error = MiroFishError("Test error message") + assert error.message == "Test error message" + assert error.user_message == "Test error message" + assert error.details == {} + assert error.cause is None + + def test_error_with_all_params(self): + """Test error with all parameters""" + original_error = ValueError("original") + error = MiroFishError( + message="Technical message", + details={"key": "value"}, + user_message="User friendly message", + cause=original_error + ) + assert error.message == "Technical message" + assert error.user_message == "User friendly message" + assert error.details == {"key": "value"} + assert error.cause == original_error + + def test_to_dict(self): + """Test error serialization""" + error = MiroFishError( + message="Test", + details={"foo": "bar"}, + user_message="User message" + ) + result = error.to_dict() + + assert result["error"] == True + assert result["error_code"] == "MIROFISH_ERROR" + assert result["message"] == "User message" + assert result["severity"] == "medium" + assert result["details"] == {"foo": "bar"} + + def test_str_representation(self): + """Test string representation""" + error = MiroFishError("Test message", details={"key": "value"}) + string_repr = str(error) + + assert "[MIROFISH_ERROR]" in string_repr + assert "Test message" in string_repr + assert "key" in string_repr + + +class TestConfigurationErrors: + """Test configuration-related errors""" + + def test_missing_config_error(self): + """Test MissingConfigError""" + error = MissingConfigError("LLM_API_KEY") + + assert error.error_code == "CONFIG_MISSING" + assert error.http_status == 500 + assert error.severity == ErrorSeverity.HIGH + assert "LLM_API_KEY" in error.message + assert "LLM_API_KEY" in error.details["config_name"] + + def test_invalid_config_error(self): + """Test InvalidConfigError""" + error = InvalidConfigError( + config_name="PORT", + value="abc", + reason="must be a number" + ) + + assert error.error_code == "CONFIG_INVALID" + assert "PORT" in error.message + assert "must be a number" in error.message + + +class TestOntologyErrors: + """Test ontology-related errors""" + + def test_ontology_generation_error(self): + """Test OntologyGenerationError""" + error = OntologyGenerationError() + + assert error.error_code == "ONTOLOGY_GENERATION_FAILED" + assert error.http_status == 500 + + def test_ontology_validation_error(self): + """Test OntologyValidationError""" + errors = ["Missing entity type", "Invalid edge definition"] + error = OntologyValidationError(errors) + + assert error.error_code == "ONTOLOGY_VALIDATION_FAILED" + assert error.http_status == 400 + assert error.details["validation_errors"] == errors + + +class TestGraphErrors: + """Test graph-related errors""" + + def test_graph_build_error(self): + """Test GraphBuildError""" + error = GraphBuildError(graph_id="graph_123") + + assert error.error_code == "GRAPH_BUILD_FAILED" + assert "graph_123" in error.message + assert error.details["graph_id"] == "graph_123" + + def test_graph_connection_error(self): + """Test GraphConnectionError""" + error = GraphConnectionError() + + assert error.error_code == "GRAPH_CONNECTION_FAILED" + assert error.severity == ErrorSeverity.HIGH + + def test_graph_query_error(self): + """Test GraphQueryError""" + error = GraphQueryError(query="MATCH (n) RETURN n") + + assert error.error_code == "GRAPH_QUERY_FAILED" + + +class TestProfileErrors: + """Test profile-related errors""" + + def test_profile_generation_error(self): + """Test ProfileGenerationError""" + error = ProfileGenerationError(entity_name="John Doe") + + assert error.error_code == "PROFILE_GENERATION_FAILED" + assert "John Doe" in error.message + assert error.details["entity_name"] == "John Doe" + + def test_profile_validation_error(self): + """Test ProfileValidationError""" + errors = ["bio is required", "age must be positive"] + error = ProfileValidationError("Agent1", errors) + + assert error.error_code == "PROFILE_VALIDATION_FAILED" + assert error.http_status == 400 + assert error.details["errors"] == errors + + +class TestSimulationErrors: + """Test simulation-related errors""" + + def test_simulation_init_error(self): + """Test SimulationInitError""" + error = SimulationInitError(simulation_id="sim_abc123") + + assert error.error_code == "SIMULATION_INIT_FAILED" + assert "sim_abc123" in error.message + + def test_simulation_run_error(self): + """Test SimulationRunError""" + error = SimulationRunError(simulation_id="sim_123", round_num=15) + + assert error.error_code == "SIMULATION_RUN_FAILED" + assert error.details["round"] == 15 + + def test_ipc_error(self): + """Test IPCError""" + error = IPCError(command="interview_agent") + + assert error.error_code == "IPC_ERROR" + + +class TestExternalAPIErrors: + """Test external API errors""" + + def test_llm_error(self): + """Test LLMError""" + error = LLMError(provider="OpenAI", operation="chat_completion") + + assert error.error_code == "LLM_ERROR" + assert error.http_status == 502 + assert error.severity == ErrorSeverity.HIGH + assert error.details["provider"] == "OpenAI" + + def test_llm_rate_limit_error(self): + """Test LLMRateLimitError""" + error = LLMRateLimitError() + + assert error.error_code == "LLM_RATE_LIMIT" + assert error.http_status == 429 + + def test_zep_error(self): + """Test ZepError""" + error = ZepError(operation="search_graph") + + assert error.error_code == "ZEP_ERROR" + + +class TestValidationErrors: + """Test validation errors""" + + def test_file_validation_error(self): + """Test FileValidationError""" + error = FileValidationError("test.pdf", "file too large") + + assert error.error_code == "FILE_VALIDATION_ERROR" + assert error.http_status == 400 + assert error.severity == ErrorSeverity.LOW + + def test_parameter_validation_error(self): + """Test ParameterValidationError""" + error = ParameterValidationError("rounds", "abc", "integer") + + assert error.error_code == "PARAMETER_VALIDATION_ERROR" + assert error.details["param_name"] == "rounds" + + +class TestNotFoundErrors: + """Test not found errors""" + + def test_project_not_found(self): + """Test ProjectNotFoundError""" + error = ProjectNotFoundError("proj_123") + + assert error.error_code == "PROJECT_NOT_FOUND" + assert error.http_status == 404 + assert error.details["resource_type"] == "Project" + + def test_simulation_not_found(self): + """Test SimulationNotFoundError""" + error = SimulationNotFoundError("sim_456") + + assert error.error_code == "SIMULATION_NOT_FOUND" + + def test_report_not_found(self): + """Test ReportNotFoundError""" + error = ReportNotFoundError("report_789") + + assert error.error_code == "REPORT_NOT_FOUND" + + +class TestErrorInheritance: + """Test error inheritance hierarchy""" + + def test_ontology_error_inherits_from_base(self): + """Verify ontology errors inherit from MiroFishError""" + error = OntologyGenerationError() + assert isinstance(error, MiroFishError) + assert isinstance(error, OntologyError) + + def test_graph_error_inherits_from_base(self): + """Verify graph errors inherit from MiroFishError""" + error = GraphBuildError() + assert isinstance(error, MiroFishError) + assert isinstance(error, GraphError) + + def test_all_errors_can_be_caught_by_base(self): + """Verify all errors can be caught by MiroFishError""" + errors_to_test = [ + MissingConfigError("test"), + OntologyGenerationError(), + GraphBuildError(), + ProfileGenerationError(), + SimulationRunError("sim", 1), + LLMError(), + ProjectNotFoundError("test"), + ] + + for error in errors_to_test: + assert isinstance(error, MiroFishError), f"{type(error).__name__} should inherit from MiroFishError" + + +if __name__ == "__main__": + pytest.main([__file__, "-v"])