diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..345406a --- /dev/null +++ b/.env.example @@ -0,0 +1,13 @@ +# Environment variables for lippytm ChatGPT.AI +# Copy this file to .env and set your values + +# OpenAI API Configuration +OPENAI_API_KEY=your_openai_api_key_here + +# Alternative environment variable names +LIPPYTM_OPENAI_API_KEY=your_openai_api_key_here +LIPPYTM_BACKEND=openai +LIPPYTM_MODEL=gpt-3.5-turbo +LIPPYTM_MAX_TOKENS=1000 +LIPPYTM_TEMPERATURE=0.7 +LIPPYTM_LOG_LEVEL=INFO \ No newline at end of file diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..9696afa --- /dev/null +++ b/.gitignore @@ -0,0 +1,68 @@ +# Configuration files +*.env +.env.local +config.yaml +config.yml + +# Python +__pycache__/ +*.py[cod] +*$py.class +*.so +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +*.manifest +*.spec + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Virtual environments +venv/ +env/ +ENV/ + +# IDEs +.vscode/ +.idea/ +*.swp +*.swo + +# OS +.DS_Store +Thumbs.db + +# Logs +*.log +logs/ + +# Temporary files +*.tmp +*.temp +/tmp/ \ No newline at end of file diff --git a/README.md b/README.md index 7053aa1..7b0d15c 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,259 @@ # AI-Time-Machines -adding AI Agents to everything with Time Machines + +Adding AI Agents to everything with Time Machines + +## lippytm ChatGPT.AI + +A modular, extensible ChatGPT-like AI bot with time machine capabilities. This intelligent assistant can help users with various tasks, provide insightful responses, and engage in meaningful conversations while leveraging knowledge from different time periods. + +### Features + +- **Multi-Backend Support**: Works with OpenAI API, local Transformers models, or echo mode +- **Modular Architecture**: Extensible design for easy enhancement and customization +- **Interactive CLI**: Beautiful command-line interface with colored output +- **Configuration Management**: Flexible configuration via files or environment variables +- **Conversation Management**: Save, clear, and export conversation history +- **Time Machine Capabilities**: Historical context and knowledge integration +- **Comprehensive Logging**: Detailed logging for debugging and monitoring + +### Quick Start + +1. **Installation**: + ```bash + git clone https://github.com/lippytm/AI-Time-Machines.git + cd AI-Time-Machines + pip install -r requirements.txt + pip install -e . + ``` + +2. **Configuration**: + ```bash + # Copy example configuration + cp config.example.yaml config.yaml + cp .env.example .env + + # Edit config.yaml or .env with your API keys and preferences + ``` + +3. **Run the Bot**: + ```bash + # Interactive mode (default) + lippytm-chatgpt + + # Single message + lippytm-chatgpt --message "Hello, how are you?" + + # Specify backend + lippytm-chatgpt --backend openai --model gpt-4 + ``` + +### Configuration + +#### Configuration File (config.yaml) + +```yaml +backend: "openai" +openai_api_key: "your-api-key" +openai_model: "gpt-3.5-turbo" +max_tokens: 1000 +temperature: 0.7 +system_prompt: "You are lippytm ChatGPT.AI..." +``` + +#### Environment Variables + +```bash +export OPENAI_API_KEY="your-api-key" +export LIPPYTM_BACKEND="openai" +export LIPPYTM_MODEL="gpt-3.5-turbo" +``` + +### Usage Examples + +#### Interactive Chat +```bash +$ lippytm-chatgpt +╔══════════════════════════════════════════════════╗ +║ lippytm ChatGPT.AI ║ +║ Intelligent AI with Time Machines ║ +╚══════════════════════════════════════════════════╝ + +You: Hello! What can you help me with? +AI: Hello! I'm lippytm ChatGPT.AI, your intelligent assistant with time machine capabilities... + +You: /help +Available commands: + help - Show this help message + clear - Clear conversation history + config - Show current configuration + summary - Show conversation summary + save - Save conversation to file + quit/exit - Exit the application +``` + +#### Command Line Options +```bash +# Show help +lippytm-chatgpt --help + +# Use specific backend +lippytm-chatgpt --backend transformers + +# Single message with verbose output +lippytm-chatgpt --message "Explain quantum computing" --verbose + +# Use custom config file +lippytm-chatgpt --config /path/to/config.yaml +``` + +### Architecture + +``` +lippytm_chatgpt/ +├── __init__.py # Package initialization +├── cli.py # Command-line interface +├── core/ # Core AI functionality +│ └── __init__.py # ChatGPT AI implementation +├── config/ # Configuration management +│ └── __init__.py # ConfigManager class +├── ui/ # User interface components +└── utils/ # Utility functions + └── __init__.py # Helper functions +``` + +### Supported Backends + +#### OpenAI Backend +- Uses OpenAI's GPT models (GPT-3.5, GPT-4) +- Requires OpenAI API key +- Best performance and capabilities + +#### Transformers Backend +- Uses local Hugging Face models +- No API key required +- Works offline +- Examples: DialoGPT, BlenderBot + +#### Echo Backend +- Simple testing/demo mode +- No external dependencies +- Returns formatted echo responses + +### API Reference + +#### ChatGPTAI Class + +```python +from lippytm_chatgpt.core import ChatGPTAI +from lippytm_chatgpt.config import ConfigManager + +# Initialize +config_manager = ConfigManager() +ai_bot = ChatGPTAI(config_manager.get_config_dict()) + +# Generate response +response = ai_bot.generate_response("Hello!") + +# Get conversation summary +summary = ai_bot.get_conversation_summary() + +# Clear conversation +ai_bot.clear_conversation() +``` + +#### ConfigManager Class + +```python +from lippytm_chatgpt.config import ConfigManager + +# Initialize with custom config +config = ConfigManager("path/to/config.yaml") + +# Get configuration values +backend = config.get("backend") +model = config.get("openai_model") + +# Set configuration values +config.set("temperature", 0.8) + +# Save configuration +config.save_config("new_config.yaml") +``` + +### Advanced Features + +#### Custom System Prompts +```yaml +system_prompt: | + You are a specialized assistant for software development. + Focus on providing accurate, well-documented code examples + and best practices for modern development. +``` + +#### Conversation Export +```bash +# In interactive mode, use the 'save' command +You: /save +Conversation saved to conversation_20231201_143022.txt +``` + +#### Multiple Configurations +```bash +# Development config +lippytm-chatgpt --config configs/dev.yaml + +# Production config +lippytm-chatgpt --config configs/prod.yaml +``` + +### Development + +#### Project Structure +- **Core Module**: Main AI logic and conversation management +- **Config Module**: Configuration loading and management +- **CLI Module**: Command-line interface and interactive chat +- **Utils Module**: Helper functions and utilities + +#### Adding New Backends +1. Extend the `ChatGPTAI` class in `core/__init__.py` +2. Add backend-specific configuration options +3. Implement the generation method for your backend +4. Update documentation + +#### Running Tests +```bash +# Install development dependencies +pip install -e ".[dev]" + +# Run tests (if test infrastructure exists) +python -m pytest tests/ +``` + +### Contributing + +1. Fork the repository +2. Create a feature branch (`git checkout -b feature/amazing-feature`) +3. Commit your changes (`git commit -m 'Add amazing feature'`) +4. Push to the branch (`git push origin feature/amazing-feature`) +5. Open a Pull Request + +### License + +This project is licensed under the GNU General Public License v3.0 - see the [LICENSE](LICENSE) file for details. + +### Support + +For support, issues, or questions: +- Create an issue on GitHub +- Check the documentation +- Review configuration examples + +### Changelog + +#### v1.0.0 +- Initial release +- Multi-backend support (OpenAI, Transformers, Echo) +- Interactive CLI interface +- Configuration management system +- Conversation history and export +- Comprehensive documentation diff --git a/config.example.yaml b/config.example.yaml new file mode 100644 index 0000000..8696214 --- /dev/null +++ b/config.example.yaml @@ -0,0 +1,35 @@ +# Example configuration for lippytm ChatGPT.AI +# Copy this file to config.yaml and customize as needed + +# AI Backend Configuration +backend: "openai" # Options: "openai", "transformers", "echo" + +# OpenAI Configuration (if using OpenAI backend) +openai_api_key: "" # Set your OpenAI API key here or use OPENAI_API_KEY env var +openai_model: "gpt-3.5-turbo" # Options: gpt-3.5-turbo, gpt-4, etc. + +# Local Model Configuration (if using transformers backend) +local_model: "microsoft/DialoGPT-medium" # Hugging Face model name + +# Generation Parameters +max_tokens: 1000 +temperature: 0.7 +max_conversation_history: 50 + +# System Prompt +system_prompt: | + You are lippytm ChatGPT.AI, an intelligent assistant with time machine capabilities. + You can help users with various tasks, provide insightful responses, and engage in + meaningful conversations. You have access to knowledge from different time periods + and can provide historical context when relevant. Be helpful, accurate, and engaging. + +# Logging Configuration +logging: + level: "INFO" # Options: DEBUG, INFO, WARNING, ERROR + format: "%(asctime)s - %(name)s - %(levelname)s - %(message)s" + +# UI Configuration +ui: + prompt_style: "modern" + show_timestamps: true + colorize_output: true \ No newline at end of file diff --git a/demo.py b/demo.py new file mode 100755 index 0000000..f8e20d9 --- /dev/null +++ b/demo.py @@ -0,0 +1,249 @@ +#!/usr/bin/env python3 +""" +Demonstration script for lippytm ChatGPT.AI +Shows various features and capabilities of the bot +""" + +import sys +import os +import time + +# Add project to path +project_root = os.path.dirname(os.path.abspath(__file__)) +sys.path.insert(0, project_root) + +from lippytm_chatgpt.config import ConfigManager +from lippytm_chatgpt.core import ChatGPTAI + +try: + from colorama import init as colorama_init, Fore, Style + colorama_init() + HAS_COLOR = True +except ImportError: + class Fore: + RED = GREEN = YELLOW = BLUE = MAGENTA = CYAN = WHITE = RESET = "" + class Style: + BRIGHT = DIM = RESET_ALL = "" + HAS_COLOR = False + + +def print_colored(text, color="", style=""): + """Print colored text if available""" + if HAS_COLOR: + print(f"{style}{color}{text}{Style.RESET_ALL}") + else: + print(text) + + +def print_section(title): + """Print a section header""" + print("\n" + "="*60) + print_colored(f" {title}", Fore.CYAN, Style.BRIGHT) + print("="*60) + + +def demo_basic_conversation(): + """Demonstrate basic conversation""" + print_section("Basic Conversation Demo") + + config_manager = ConfigManager() + ai_bot = ChatGPTAI(config_manager.get_config_dict()) + + print_colored(f"Backend: {ai_bot.ai_backend}", Fore.YELLOW) + print() + + demo_messages = [ + "Hello! What is lippytm ChatGPT.AI?", + "Tell me about time machines in science fiction", + "What year was the first computer invented?", + "How might AI change in the future?" + ] + + for msg in demo_messages: + print_colored(f"User: {msg}", Fore.GREEN) + response = ai_bot.generate_response(msg) + print_colored(f"AI: {response}", Fore.BLUE) + print() + time.sleep(1) # Brief pause for readability + + return ai_bot + + +def demo_conversation_management(ai_bot): + """Demonstrate conversation management features""" + print_section("Conversation Management Demo") + + # Show conversation summary + summary = ai_bot.get_conversation_summary() + print_colored("Conversation Summary:", Fore.MAGENTA, Style.BRIGHT) + for key, value in summary.items(): + print(f" {key}: {value}") + print() + + # Add a few more messages + ai_bot.generate_response("Save this conversation for later") + ai_bot.generate_response("Clear history when done") + + # Show updated summary + summary = ai_bot.get_conversation_summary() + print_colored(f"Updated: {summary['total_messages']} total messages", Fore.YELLOW) + print() + + +def demo_configuration(): + """Demonstrate configuration features""" + print_section("Configuration Demo") + + config_manager = ConfigManager() + + print_colored("Current Configuration:", Fore.CYAN, Style.BRIGHT) + config = config_manager.get_config_dict() + + important_settings = [ + 'backend', 'openai_model', 'max_tokens', 'temperature', + 'max_conversation_history', 'system_prompt' + ] + + for setting in important_settings: + value = config.get(setting, "Not set") + if setting == 'system_prompt' and len(str(value)) > 100: + value = str(value)[:100] + "..." + print(f" {setting}: {value}") + print() + + # Demonstrate dynamic configuration + print_colored("Dynamic Configuration Changes:", Fore.YELLOW, Style.BRIGHT) + config_manager.set('temperature', 0.9) + config_manager.set('max_tokens', 500) + print(" ✓ Updated temperature to 0.9") + print(" ✓ Updated max_tokens to 500") + print() + + +def demo_time_machine_features(): + """Demonstrate time machine themed features""" + print_section("Time Machine Features Demo") + + # Create config with time machine system prompt + time_machine_config = { + 'backend': 'echo', + 'system_prompt': ( + "You are lippytm ChatGPT.AI, an AI with time machine capabilities. " + "You can discuss historical events, predict future trends, and provide " + "perspectives from different time periods. You have knowledge spanning " + "from ancient history to speculative future scenarios." + ), + 'max_tokens': 1000, + 'temperature': 0.8 + } + + time_bot = ChatGPTAI(time_machine_config) + + time_messages = [ + "Take me to ancient Rome. What would daily life be like?", + "Fast forward to the year 2050. How might cities look?", + "What was the most important invention of the 20th century?", + "If you could prevent one historical disaster, which would it be?" + ] + + for msg in time_messages: + print_colored(f"Time Traveler: {msg}", Fore.MAGENTA) + response = time_bot.generate_response(msg) + print_colored(f"Time Machine AI: {response}", Fore.CYAN) + print() + time.sleep(1) + + +def demo_error_handling(): + """Demonstrate error handling""" + print_section("Error Handling Demo") + + config_manager = ConfigManager() + ai_bot = ChatGPTAI(config_manager.get_config_dict()) + + print_colored("Testing error handling with various scenarios:", Fore.YELLOW) + + # Test with empty input + try: + response = ai_bot.generate_response("") + print(f" ✓ Empty input handled: '{response[:50]}...'") + except Exception as e: + print(f" ✗ Empty input error: {e}") + + # Test with very long input + try: + long_input = "This is a very long message. " * 100 + response = ai_bot.generate_response(long_input) + print(f" ✓ Long input handled: '{response[:50]}...'") + except Exception as e: + print(f" ✗ Long input error: {e}") + + print() + + +def demo_utilities(): + """Demonstrate utility functions""" + print_section("Utility Functions Demo") + + from lippytm_chatgpt.utils import ( + format_message, get_timestamp, estimate_tokens, + validate_api_key, extract_code_blocks + ) + + # Test message formatting + long_text = "This is a very long message that should be wrapped properly to demonstrate the formatting utility function working correctly." + formatted = format_message(long_text, max_width=40) + print_colored("Message Formatting:", Fore.GREEN, Style.BRIGHT) + print(f"Original: {long_text}") + print(f"Formatted:\n{formatted}") + print() + + # Test timestamp + timestamp = get_timestamp() + print_colored(f"Current timestamp: {timestamp}", Fore.YELLOW) + + # Test token estimation + tokens = estimate_tokens(long_text) + print_colored(f"Estimated tokens: {tokens}", Fore.BLUE) + + # Test API key validation + test_keys = ["sk-1234567890abcdef1234567890", "invalid", ""] + for key in test_keys: + valid = validate_api_key(key) + print(f"Key '{key[:10]}...': {'✓' if valid else '✗'}") + + print() + + +def main(): + """Run the complete demonstration""" + print_colored("🤖 lippytm ChatGPT.AI - Comprehensive Demonstration", Fore.WHITE, Style.BRIGHT) + print_colored("=" * 60, Fore.WHITE) + + try: + # Run all demonstrations + ai_bot = demo_basic_conversation() + demo_conversation_management(ai_bot) + demo_configuration() + demo_time_machine_features() + demo_error_handling() + demo_utilities() + + print_section("Demonstration Complete") + print_colored("✨ All features demonstrated successfully!", Fore.GREEN, Style.BRIGHT) + print_colored("🚀 lippytm ChatGPT.AI is ready for use!", Fore.CYAN, Style.BRIGHT) + + print("\nNext steps:") + print("1. Configure your API keys in config.yaml or .env") + print("2. Run: ./lippytm-chatgpt --backend openai") + print("3. Start chatting with your AI assistant!") + + except Exception as e: + print_colored(f"❌ Demonstration failed: {e}", Fore.RED, Style.BRIGHT) + return 1 + + return 0 + + +if __name__ == '__main__': + sys.exit(main()) \ No newline at end of file diff --git a/lippytm-chatgpt b/lippytm-chatgpt new file mode 100755 index 0000000..08a3216 --- /dev/null +++ b/lippytm-chatgpt @@ -0,0 +1,14 @@ +#!/usr/bin/env python3 +"""Entry point script for lippytm ChatGPT.AI""" + +import sys +import os + +# Add the project root to Python path for development +project_root = os.path.dirname(os.path.abspath(__file__)) +sys.path.insert(0, project_root) + +from lippytm_chatgpt.cli import main + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/lippytm_chatgpt/__init__.py b/lippytm_chatgpt/__init__.py new file mode 100644 index 0000000..cf0fe4d --- /dev/null +++ b/lippytm_chatgpt/__init__.py @@ -0,0 +1,7 @@ +""" +lippytm ChatGPT.AI - A modular, extensible ChatGPT-like AI bot +""" + +__version__ = "1.0.0" +__author__ = "lippytm" +__description__ = "A ChatGPT-like AI bot with time machine capabilities" \ No newline at end of file diff --git a/lippytm_chatgpt/cli.py b/lippytm_chatgpt/cli.py new file mode 100644 index 0000000..8c0baa1 --- /dev/null +++ b/lippytm_chatgpt/cli.py @@ -0,0 +1,272 @@ +"""Command Line Interface for lippytm ChatGPT.AI""" + +import click +import logging +import sys +from typing import Optional +from pathlib import Path + +try: + from colorama import init as colorama_init, Fore, Style + COLORAMA_AVAILABLE = True + colorama_init() +except ImportError: + COLORAMA_AVAILABLE = False + # Fallback for no colorama + class Fore: + RED = GREEN = YELLOW = BLUE = MAGENTA = CYAN = WHITE = RESET = "" + class Style: + BRIGHT = DIM = RESET_ALL = "" + +from lippytm_chatgpt.core import ChatGPTAI +from lippytm_chatgpt.config import ConfigManager +from lippytm_chatgpt.utils import format_message, get_timestamp + + +class ChatInterface: + """Interactive chat interface for the AI bot""" + + def __init__(self, config_manager: ConfigManager): + """Initialize chat interface + + Args: + config_manager: Configuration manager instance + """ + self.config_manager = config_manager + self.config = config_manager.get_config_dict() + self.ai_bot = ChatGPTAI(self.config) + self.logger = logging.getLogger(__name__) + + # UI settings + self.colorize = self.config.get('ui', {}).get('colorize_output', True) and COLORAMA_AVAILABLE + self.show_timestamps = self.config.get('ui', {}).get('show_timestamps', True) + + def print_colored(self, text: str, color: str = "", style: str = ""): + """Print colored text if colorama is available""" + if self.colorize: + print(f"{style}{color}{text}{Style.RESET_ALL}") + else: + print(text) + + def print_welcome(self): + """Print welcome message""" + welcome_text = f""" +╔══════════════════════════════════════════════════╗ +║ lippytm ChatGPT.AI ║ +║ Intelligent AI with Time Machines ║ +╚══════════════════════════════════════════════════╝ + +Backend: {self.ai_bot.ai_backend} +Model: {self.config.get('openai_model', 'N/A')} + +Type 'help' for commands or 'quit' to exit. + """ + self.print_colored(welcome_text, Fore.CYAN, Style.BRIGHT) + + def print_help(self): + """Print help information""" + help_text = """ +Available commands: + help - Show this help message + clear - Clear conversation history + config - Show current configuration + summary - Show conversation summary + save - Save conversation to file + quit/exit - Exit the application + +Just type your message to chat with the AI! + """ + self.print_colored(help_text, Fore.YELLOW) + + def handle_command(self, command: str) -> bool: + """Handle special commands + + Args: + command: Command string + + Returns: + True if should continue, False if should exit + """ + command = command.lower().strip() + + if command in ['quit', 'exit']: + self.print_colored("Goodbye! Thanks for using lippytm ChatGPT.AI", Fore.GREEN) + return False + + elif command == 'help': + self.print_help() + + elif command == 'clear': + self.ai_bot.clear_conversation() + self.print_colored("Conversation history cleared.", Fore.GREEN) + + elif command == 'config': + self.show_config() + + elif command == 'summary': + self.show_summary() + + elif command == 'save': + self.save_conversation() + + else: + self.print_colored(f"Unknown command: {command}. Type 'help' for available commands.", Fore.RED) + + return True + + def show_config(self): + """Show current configuration""" + config_info = f""" +Current Configuration: + Backend: {self.ai_bot.ai_backend} + Model: {self.config.get('openai_model', 'N/A')} + Max Tokens: {self.config.get('max_tokens', 'N/A')} + Temperature: {self.config.get('temperature', 'N/A')} + History Length: {self.config.get('max_conversation_history', 'N/A')} + """ + self.print_colored(config_info, Fore.BLUE) + + def show_summary(self): + """Show conversation summary""" + summary = self.ai_bot.get_conversation_summary() + summary_text = f""" +Conversation Summary: + Total Messages: {summary['total_messages']} + User Messages: {summary['user_messages']} + Assistant Messages: {summary['assistant_messages']} + Backend: {summary['backend']} + Start Time: {summary['start_time'] or 'N/A'} + Last Activity: {summary['last_activity'] or 'N/A'} + """ + self.print_colored(summary_text, Fore.MAGENTA) + + def save_conversation(self): + """Save conversation to file""" + if not self.ai_bot.conversation_history: + self.print_colored("No conversation to save.", Fore.YELLOW) + return + + filename = f"conversation_{get_timestamp()}.txt" + try: + with open(filename, 'w', encoding='utf-8') as f: + f.write("lippytm ChatGPT.AI Conversation Log\n") + f.write("=" * 40 + "\n\n") + + for msg in self.ai_bot.conversation_history: + timestamp = msg.timestamp.strftime("%Y-%m-%d %H:%M:%S") + f.write(f"[{timestamp}] {msg.role.upper()}: {msg.content}\n\n") + + self.print_colored(f"Conversation saved to {filename}", Fore.GREEN) + + except Exception as e: + self.print_colored(f"Failed to save conversation: {e}", Fore.RED) + + def run_interactive(self): + """Run interactive chat session""" + self.print_welcome() + + try: + while True: + # Get user input + if self.colorize: + prompt = f"{Fore.GREEN}You: {Style.RESET_ALL}" + else: + prompt = "You: " + + try: + user_input = input(prompt).strip() + except (EOFError, KeyboardInterrupt): + print() + self.print_colored("Goodbye! Thanks for using lippytm ChatGPT.AI", Fore.GREEN) + break + + if not user_input: + continue + + # Handle commands + if user_input.startswith('/') or user_input in ['help', 'quit', 'exit', 'clear', 'config', 'summary', 'save']: + command = user_input.lstrip('/') + if not self.handle_command(command): + break + continue + + # Generate AI response + try: + self.print_colored("AI: ", Fore.BLUE, Style.BRIGHT, end="") + response = self.ai_bot.generate_response(user_input) + + if self.colorize: + print(f"{Fore.WHITE}{response}{Style.RESET_ALL}") + else: + print(response) + + except Exception as e: + self.logger.error(f"Error in chat: {e}") + self.print_colored(f"Error: {e}", Fore.RED) + + print() # Add spacing + + except Exception as e: + self.logger.error(f"Unexpected error in interactive mode: {e}") + self.print_colored(f"An unexpected error occurred: {e}", Fore.RED) + + +@click.command() +@click.option('--config', '-c', type=click.Path(exists=True), help='Path to configuration file') +@click.option('--backend', '-b', type=click.Choice(['openai', 'transformers', 'echo']), help='AI backend to use') +@click.option('--model', '-m', help='Model to use (e.g., gpt-3.5-turbo)') +@click.option('--interactive', '-i', is_flag=True, default=True, help='Run in interactive mode') +@click.option('--message', help='Single message to process (non-interactive)') +@click.option('--verbose', '-v', is_flag=True, help='Enable verbose logging') +def main(config: Optional[str], backend: Optional[str], model: Optional[str], + interactive: bool, message: Optional[str], verbose: bool): + """lippytm ChatGPT.AI - Intelligent AI bot with time machine capabilities""" + + # Setup logging + if verbose: + logging.basicConfig(level=logging.DEBUG) + + try: + # Initialize configuration + config_manager = ConfigManager(config) + + # Override config with CLI options + if backend: + config_manager.set('backend', backend) + if model: + config_manager.set('openai_model', model) + + # Validate configuration + if not config_manager.validate_config(): + click.echo("Configuration validation failed. Please check your settings.", err=True) + sys.exit(1) + + # Create chat interface + chat_interface = ChatInterface(config_manager) + + if message: + # Single message mode + try: + response = chat_interface.ai_bot.generate_response(message) + click.echo(f"AI: {response}") + except Exception as e: + click.echo(f"Error: {e}", err=True) + sys.exit(1) + + elif interactive: + # Interactive mode + chat_interface.run_interactive() + + else: + click.echo("Please specify either --interactive or --message") + sys.exit(1) + + except Exception as e: + if verbose: + logging.exception("Application error") + click.echo(f"Application error: {e}", err=True) + sys.exit(1) + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/lippytm_chatgpt/config/__init__.py b/lippytm_chatgpt/config/__init__.py new file mode 100644 index 0000000..91a75f4 --- /dev/null +++ b/lippytm_chatgpt/config/__init__.py @@ -0,0 +1,218 @@ +"""Configuration management for lippytm ChatGPT.AI""" + +import os +import yaml +import json +import logging +from typing import Dict, Any, Optional +from pathlib import Path +from dotenv import load_dotenv + + +class ConfigManager: + """Manages configuration for the ChatGPT AI bot""" + + DEFAULT_CONFIG = { + 'backend': 'openai', # 'openai', 'transformers', 'echo' + 'openai_model': 'gpt-3.5-turbo', + 'local_model': 'microsoft/DialoGPT-medium', + 'max_tokens': 1000, + 'temperature': 0.7, + 'max_conversation_history': 50, + 'system_prompt': ( + "You are lippytm ChatGPT.AI, an intelligent assistant with time machine capabilities. " + "You can help users with various tasks, provide insightful responses, and engage in " + "meaningful conversations. You have access to knowledge from different time periods " + "and can provide historical context when relevant." + ), + 'logging': { + 'level': 'INFO', + 'format': '%(asctime)s - %(name)s - %(levelname)s - %(message)s' + }, + 'ui': { + 'prompt_style': 'modern', + 'show_timestamps': True, + 'colorize_output': True + } + } + + def __init__(self, config_path: Optional[str] = None): + """Initialize configuration manager + + Args: + config_path: Path to configuration file + """ + self.config_path = config_path + self.config = self.DEFAULT_CONFIG.copy() + self.logger = logging.getLogger(__name__) + + # Load environment variables + load_dotenv() + + # Load configuration + self._load_config() + + # Setup logging + self._setup_logging() + + def _load_config(self): + """Load configuration from various sources""" + # 1. Load from file if provided + if self.config_path and os.path.exists(self.config_path): + self._load_config_file(self.config_path) + + # 2. Look for config files in standard locations + else: + config_locations = [ + 'config.yaml', + 'config.yml', + 'lippytm_chatgpt.yaml', + 'lippytm_chatgpt.yml', + os.path.expanduser('~/.lippytm_chatgpt.yaml'), + '/etc/lippytm_chatgpt.yaml' + ] + + for location in config_locations: + if os.path.exists(location): + self._load_config_file(location) + break + + # 3. Override with environment variables + self._load_env_config() + + def _load_config_file(self, filepath: str): + """Load configuration from YAML or JSON file""" + try: + with open(filepath, 'r') as f: + if filepath.endswith(('.yaml', '.yml')): + file_config = yaml.safe_load(f) + else: + file_config = json.load(f) + + if file_config: + self._deep_update(self.config, file_config) + self.logger.info(f"Loaded configuration from {filepath}") + + except Exception as e: + self.logger.error(f"Failed to load config from {filepath}: {e}") + + def _load_env_config(self): + """Load configuration from environment variables""" + env_mappings = { + 'LIPPYTM_OPENAI_API_KEY': 'openai_api_key', + 'OPENAI_API_KEY': 'openai_api_key', + 'LIPPYTM_BACKEND': 'backend', + 'LIPPYTM_MODEL': 'openai_model', + 'LIPPYTM_MAX_TOKENS': 'max_tokens', + 'LIPPYTM_TEMPERATURE': 'temperature', + 'LIPPYTM_LOG_LEVEL': 'logging.level' + } + + for env_var, config_key in env_mappings.items(): + value = os.getenv(env_var) + if value: + # Handle nested keys like 'logging.level' + if '.' in config_key: + keys = config_key.split('.') + target = self.config + for key in keys[:-1]: + target = target.setdefault(key, {}) + target[keys[-1]] = self._convert_value(value) + else: + self.config[config_key] = self._convert_value(value) + + def _convert_value(self, value: str) -> Any: + """Convert string values to appropriate types""" + # Try to convert to int + try: + return int(value) + except ValueError: + pass + + # Try to convert to float + try: + return float(value) + except ValueError: + pass + + # Try to convert to boolean + if value.lower() in ('true', 'false'): + return value.lower() == 'true' + + # Return as string + return value + + def _deep_update(self, base_dict: Dict, update_dict: Dict): + """Recursively update nested dictionary""" + for key, value in update_dict.items(): + if key in base_dict and isinstance(base_dict[key], dict) and isinstance(value, dict): + self._deep_update(base_dict[key], value) + else: + base_dict[key] = value + + def _setup_logging(self): + """Setup logging configuration""" + log_config = self.config.get('logging', {}) + level = getattr(logging, log_config.get('level', 'INFO').upper()) + format_str = log_config.get('format', '%(asctime)s - %(name)s - %(levelname)s - %(message)s') + + logging.basicConfig(level=level, format=format_str) + + def get(self, key: str, default: Any = None) -> Any: + """Get configuration value""" + keys = key.split('.') + value = self.config + + for k in keys: + if isinstance(value, dict) and k in value: + value = value[k] + else: + return default + + return value + + def set(self, key: str, value: Any): + """Set configuration value""" + keys = key.split('.') + target = self.config + + for k in keys[:-1]: + target = target.setdefault(k, {}) + + target[keys[-1]] = value + + def save_config(self, filepath: str): + """Save current configuration to file""" + try: + with open(filepath, 'w') as f: + yaml.dump(self.config, f, default_flow_style=False, indent=2) + self.logger.info(f"Configuration saved to {filepath}") + except Exception as e: + self.logger.error(f"Failed to save config to {filepath}: {e}") + + def get_config_dict(self) -> Dict[str, Any]: + """Get complete configuration as dictionary""" + return self.config.copy() + + def validate_config(self) -> bool: + """Validate configuration and return True if valid""" + valid = True + + # Check required fields based on backend + backend = self.get('backend', 'echo') + + if backend == 'openai': + if not self.get('openai_api_key'): + self.logger.warning("OpenAI API key not configured") + valid = False + + # Validate numeric values + if not isinstance(self.get('max_tokens', 0), (int, float)) or self.get('max_tokens', 0) <= 0: + self.logger.error("max_tokens must be a positive number") + valid = False + + if not 0 <= self.get('temperature', 0.7) <= 2: + self.logger.error("temperature must be between 0 and 2") + valid = False + + return valid \ No newline at end of file diff --git a/lippytm_chatgpt/core/__init__.py b/lippytm_chatgpt/core/__init__.py new file mode 100644 index 0000000..654d726 --- /dev/null +++ b/lippytm_chatgpt/core/__init__.py @@ -0,0 +1,188 @@ +"""Core AI bot implementation for lippytm ChatGPT.AI""" + +import os +import logging +from typing import Dict, List, Optional, Any +from dataclasses import dataclass +from datetime import datetime + +try: + import openai + OPENAI_AVAILABLE = True +except ImportError: + OPENAI_AVAILABLE = False + +try: + from transformers import pipeline + TRANSFORMERS_AVAILABLE = True +except ImportError: + TRANSFORMERS_AVAILABLE = False + + +@dataclass +class Message: + """Represents a conversation message""" + role: str # 'user', 'assistant', 'system' + content: str + timestamp: datetime + metadata: Optional[Dict[str, Any]] = None + + +class ChatGPTAI: + """Main ChatGPT AI bot implementation""" + + def __init__(self, config: Dict[str, Any]): + """Initialize the ChatGPT AI bot + + Args: + config: Configuration dictionary with API keys and settings + """ + self.config = config + self.conversation_history: List[Message] = [] + self.logger = logging.getLogger(__name__) + + # Initialize AI backend + self.ai_backend = self._initialize_backend() + + # Set system prompt + self.system_prompt = config.get('system_prompt', + "You are lippytm ChatGPT.AI, an intelligent assistant with time machine capabilities. " + "You can help users with various tasks and provide insightful responses.") + + def _initialize_backend(self) -> str: + """Initialize the AI backend based on configuration""" + backend_type = self.config.get('backend', 'openai') + + if backend_type == 'openai' and OPENAI_AVAILABLE: + api_key = self.config.get('openai_api_key') or os.getenv('OPENAI_API_KEY') + if api_key: + openai.api_key = api_key + self.logger.info("Initialized OpenAI backend") + return 'openai' + else: + self.logger.warning("OpenAI API key not found, falling back to local model") + + if TRANSFORMERS_AVAILABLE: + self.logger.info("Initialized local Transformers backend") + self._init_local_model() + return 'transformers' + + self.logger.warning("No AI backend available, using echo mode") + return 'echo' + + def _init_local_model(self): + """Initialize local transformer model""" + try: + model_name = self.config.get('local_model', 'microsoft/DialoGPT-medium') + self.local_pipeline = pipeline( + 'conversational', + model=model_name, + device=-1 # Use CPU + ) + except Exception as e: + self.logger.error(f"Failed to initialize local model: {e}") + self.ai_backend = 'echo' + + def add_message(self, role: str, content: str, metadata: Optional[Dict] = None): + """Add a message to conversation history""" + message = Message( + role=role, + content=content, + timestamp=datetime.now(), + metadata=metadata or {} + ) + self.conversation_history.append(message) + + # Keep only last N messages to manage context length + max_history = self.config.get('max_conversation_history', 50) + if len(self.conversation_history) > max_history: + self.conversation_history = self.conversation_history[-max_history:] + + def generate_response(self, user_input: str) -> str: + """Generate AI response to user input""" + # Add user message to history + self.add_message('user', user_input) + + try: + if self.ai_backend == 'openai': + response = self._generate_openai_response(user_input) + elif self.ai_backend == 'transformers': + response = self._generate_transformers_response(user_input) + else: + response = self._generate_echo_response(user_input) + + # Add assistant response to history + self.add_message('assistant', response) + return response + + except Exception as e: + self.logger.error(f"Error generating response: {e}") + error_response = "I apologize, but I encountered an error while processing your request. Please try again." + self.add_message('assistant', error_response) + return error_response + + def _generate_openai_response(self, user_input: str) -> str: + """Generate response using OpenAI API""" + messages = [] + + # Add system prompt + if self.system_prompt: + messages.append({"role": "system", "content": self.system_prompt}) + + # Add conversation history + for msg in self.conversation_history[-10:]: # Last 10 messages + if msg.role in ['user', 'assistant']: + messages.append({"role": msg.role, "content": msg.content}) + + try: + response = openai.ChatCompletion.create( + model=self.config.get('openai_model', 'gpt-3.5-turbo'), + messages=messages, + max_tokens=self.config.get('max_tokens', 1000), + temperature=self.config.get('temperature', 0.7) + ) + return response.choices[0].message.content.strip() + except Exception as e: + self.logger.error(f"OpenAI API error: {e}") + raise + + def _generate_transformers_response(self, user_input: str) -> str: + """Generate response using local transformer model""" + try: + # For conversational pipeline, we need to maintain conversation state + if not hasattr(self, '_conversation'): + from transformers import Conversation + self._conversation = Conversation() + + self._conversation.add_user_input(user_input) + response = self.local_pipeline(self._conversation) + return response.generated_responses[-1] + except Exception as e: + self.logger.error(f"Transformers error: {e}") + raise + + def _generate_echo_response(self, user_input: str) -> str: + """Simple echo response for testing/fallback""" + responses = [ + f"I understand you said: '{user_input}'. I'm currently in echo mode.", + f"Thank you for your message: '{user_input}'. Please configure an AI backend for intelligent responses.", + f"Message received: '{user_input}'. I'm lippytm ChatGPT.AI in demonstration mode." + ] + return responses[len(self.conversation_history) % len(responses)] + + def clear_conversation(self): + """Clear conversation history""" + self.conversation_history.clear() + if hasattr(self, '_conversation'): + delattr(self, '_conversation') + + def get_conversation_summary(self) -> Dict[str, Any]: + """Get summary of current conversation""" + return { + 'total_messages': len(self.conversation_history), + 'user_messages': len([m for m in self.conversation_history if m.role == 'user']), + 'assistant_messages': len([m for m in self.conversation_history if m.role == 'assistant']), + 'backend': self.ai_backend, + 'start_time': self.conversation_history[0].timestamp.isoformat() if self.conversation_history else None, + 'last_activity': self.conversation_history[-1].timestamp.isoformat() if self.conversation_history else None + } \ No newline at end of file diff --git a/lippytm_chatgpt/ui/__init__.py b/lippytm_chatgpt/ui/__init__.py new file mode 100644 index 0000000..5fdffc0 --- /dev/null +++ b/lippytm_chatgpt/ui/__init__.py @@ -0,0 +1 @@ +# UI Components for lippytm ChatGPT.AI \ No newline at end of file diff --git a/lippytm_chatgpt/utils/__init__.py b/lippytm_chatgpt/utils/__init__.py new file mode 100644 index 0000000..7044f37 --- /dev/null +++ b/lippytm_chatgpt/utils/__init__.py @@ -0,0 +1,301 @@ +"""Utility functions for lippytm ChatGPT.AI""" + +import re +import json +import datetime +from typing import Dict, Any, List, Optional +from dataclasses import asdict + + +def format_message(message: str, max_width: int = 80) -> str: + """Format message with proper line wrapping + + Args: + message: Message to format + max_width: Maximum line width + + Returns: + Formatted message + """ + words = message.split() + lines = [] + current_line = "" + + for word in words: + if len(current_line + " " + word) > max_width: + if current_line: + lines.append(current_line) + current_line = word + else: + lines.append(word) + else: + if current_line: + current_line += " " + word + else: + current_line = word + + if current_line: + lines.append(current_line) + + return "\n".join(lines) + + +def get_timestamp(format_str: str = "%Y%m%d_%H%M%S") -> str: + """Get current timestamp as string + + Args: + format_str: Timestamp format string + + Returns: + Formatted timestamp string + """ + return datetime.datetime.now().strftime(format_str) + + +def sanitize_filename(filename: str) -> str: + """Sanitize filename for safe filesystem usage + + Args: + filename: Original filename + + Returns: + Sanitized filename + """ + # Remove or replace invalid characters + sanitized = re.sub(r'[<>:"/\\|?*]', '_', filename) + + # Remove leading/trailing whitespace and periods + sanitized = sanitized.strip(' .') + + # Limit length + if len(sanitized) > 255: + sanitized = sanitized[:255] + + return sanitized + + +def parse_api_response(response: Dict[str, Any]) -> Optional[str]: + """Parse API response and extract message content + + Args: + response: API response dictionary + + Returns: + Extracted message content or None + """ + try: + # OpenAI format + if 'choices' in response and response['choices']: + choice = response['choices'][0] + if 'message' in choice: + return choice['message'].get('content', '').strip() + elif 'text' in choice: + return choice['text'].strip() + + # Generic format + if 'content' in response: + return response['content'].strip() + + if 'text' in response: + return response['text'].strip() + + return None + + except (KeyError, IndexError, TypeError): + return None + + +def estimate_tokens(text: str) -> int: + """Estimate token count for text (rough approximation) + + Args: + text: Input text + + Returns: + Estimated token count + """ + # Rough estimation: ~4 characters per token for English text + return len(text) // 4 + + +def truncate_conversation_history(history: List[Dict], max_tokens: int = 4000) -> List[Dict]: + """Truncate conversation history to fit within token limit + + Args: + history: List of message dictionaries + max_tokens: Maximum token limit + + Returns: + Truncated history list + """ + if not history: + return [] + + # Calculate tokens for each message + message_tokens = [] + for msg in history: + content = msg.get('content', '') + tokens = estimate_tokens(content) + message_tokens.append((msg, tokens)) + + # Keep adding messages from the end until we hit the limit + total_tokens = 0 + result = [] + + for msg, tokens in reversed(message_tokens): + if total_tokens + tokens > max_tokens: + break + total_tokens += tokens + result.insert(0, msg) + + return result + + +def validate_api_key(api_key: str) -> bool: + """Validate API key format + + Args: + api_key: API key to validate + + Returns: + True if valid format, False otherwise + """ + if not api_key or not isinstance(api_key, str): + return False + + # OpenAI API key format: sk-... + if api_key.startswith('sk-') and len(api_key) > 20: + return True + + # Generic validation: non-empty string with reasonable length + return 10 <= len(api_key.strip()) <= 200 + + +def safe_json_loads(json_str: str, default: Any = None) -> Any: + """Safely load JSON string with fallback + + Args: + json_str: JSON string to parse + default: Default value if parsing fails + + Returns: + Parsed JSON or default value + """ + try: + return json.loads(json_str) + except (json.JSONDecodeError, TypeError): + return default + + +def safe_json_dumps(obj: Any, default: str = "{}") -> str: + """Safely dump object to JSON string with fallback + + Args: + obj: Object to serialize + default: Default JSON string if serialization fails + + Returns: + JSON string or default value + """ + try: + return json.dumps(obj, indent=2, ensure_ascii=False) + except (TypeError, ValueError): + return default + + +def extract_code_blocks(text: str) -> List[Dict[str, str]]: + """Extract code blocks from markdown-formatted text + + Args: + text: Input text with potential code blocks + + Returns: + List of dictionaries with 'language' and 'code' keys + """ + pattern = r'```(\w+)?\n(.*?)\n```' + matches = re.findall(pattern, text, re.DOTALL) + + code_blocks = [] + for language, code in matches: + code_blocks.append({ + 'language': language or 'text', + 'code': code.strip() + }) + + return code_blocks + + +def mask_sensitive_data(text: str) -> str: + """Mask sensitive data in text for logging + + Args: + text: Input text + + Returns: + Text with sensitive data masked + """ + # Mask API keys + text = re.sub(r'sk-[a-zA-Z0-9]{20,}', 'sk-***MASKED***', text) + + # Mask email addresses + text = re.sub(r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b', '***EMAIL***', text) + + # Mask potential passwords/tokens (sequences of 16+ alphanumeric chars) + text = re.sub(r'\b[A-Za-z0-9]{16,}\b', '***TOKEN***', text) + + return text + + +class ConversationExporter: + """Export conversations to various formats""" + + @staticmethod + def to_json(conversation_history: List, metadata: Optional[Dict] = None) -> str: + """Export conversation to JSON format""" + export_data = { + 'metadata': metadata or {}, + 'export_timestamp': datetime.datetime.now().isoformat(), + 'conversation': [] + } + + for msg in conversation_history: + if hasattr(msg, '__dict__'): + # Convert dataclass to dict + msg_dict = asdict(msg) + # Convert datetime to string + if 'timestamp' in msg_dict and hasattr(msg_dict['timestamp'], 'isoformat'): + msg_dict['timestamp'] = msg_dict['timestamp'].isoformat() + export_data['conversation'].append(msg_dict) + else: + export_data['conversation'].append(msg) + + return json.dumps(export_data, indent=2, ensure_ascii=False) + + @staticmethod + def to_markdown(conversation_history: List, metadata: Optional[Dict] = None) -> str: + """Export conversation to Markdown format""" + lines = ["# lippytm ChatGPT.AI Conversation Log", ""] + + if metadata: + lines.extend(["## Metadata", ""]) + for key, value in metadata.items(): + lines.append(f"- **{key}**: {value}") + lines.append("") + + lines.extend(["## Conversation", ""]) + + for msg in conversation_history: + if hasattr(msg, 'role') and hasattr(msg, 'content'): + role = msg.role.title() + content = msg.content + timestamp = "" + + if hasattr(msg, 'timestamp'): + timestamp = f" _{msg.timestamp.strftime('%H:%M:%S')}_" + + lines.append(f"### {role}{timestamp}") + lines.append("") + lines.append(content) + lines.append("") + + return "\n".join(lines) \ No newline at end of file diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..4f34c6d --- /dev/null +++ b/requirements.txt @@ -0,0 +1,14 @@ +# Core dependencies for lippytm ChatGPT.AI +openai>=1.3.0 +transformers>=4.35.0 +torch>=2.0.0 +numpy>=1.24.0 +python-dotenv>=1.0.0 +colorama>=0.4.6 +click>=8.1.0 +requests>=2.31.0 +pyyaml>=6.0 + +# Optional dependencies for advanced features +tiktoken>=0.5.0 +langchain>=0.0.300 \ No newline at end of file diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..777d112 --- /dev/null +++ b/setup.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python3 +"""Setup script for lippytm ChatGPT.AI""" + +from setuptools import setup, find_packages + +with open("README.md", "r", encoding="utf-8") as fh: + long_description = fh.read() + +with open("requirements.txt", "r", encoding="utf-8") as fh: + requirements = [line.strip() for line in fh if line.strip() and not line.startswith("#")] + +setup( + name="lippytm-chatgpt-ai", + version="1.0.0", + author="lippytm", + description="A ChatGPT-like AI bot with time machine capabilities", + long_description=long_description, + long_description_content_type="text/markdown", + packages=find_packages(), + classifiers=[ + "Development Status :: 4 - Beta", + "Intended Audience :: Developers", + "License :: OSI Approved :: GNU General Public License v3 (GPLv3)", + "Operating System :: OS Independent", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + ], + python_requires=">=3.8", + install_requires=requirements, + entry_points={ + "console_scripts": [ + "lippytm-chatgpt=lippytm_chatgpt.cli:main", + ], + }, +) \ No newline at end of file diff --git a/setup.sh b/setup.sh new file mode 100755 index 0000000..27dcaea --- /dev/null +++ b/setup.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash +# Quick setup script for lippytm ChatGPT.AI + +set -e + +echo "🤖 Setting up lippytm ChatGPT.AI..." +echo "" + +# Check Python version +echo "Checking Python version..." +python3 --version +echo "" + +# Install dependencies +echo "Installing Python dependencies..." +pip install -r requirements.txt --quiet +echo "✓ Dependencies installed" +echo "" + +# Copy example config files +if [ ! -f "config.yaml" ]; then + cp config.example.yaml config.yaml + echo "✓ Created config.yaml from example" +fi + +if [ ! -f ".env" ]; then + cp .env.example .env + echo "✓ Created .env from example" +fi + +# Make scripts executable +chmod +x lippytm-chatgpt +chmod +x demo.py +echo "✓ Made scripts executable" +echo "" + +# Test installation +echo "Testing installation..." +python3 test_installation.py +echo "" + +echo "🎉 Setup complete!" +echo "" +echo "Next steps:" +echo "1. Edit config.yaml or .env with your API keys" +echo "2. Run: ./lippytm-chatgpt --help" +echo "3. Try: ./demo.py" +echo "4. Start chatting: ./lippytm-chatgpt --backend echo" +echo "" +echo "For OpenAI backend, set your API key:" +echo " export OPENAI_API_KEY='your-api-key-here'" +echo " ./lippytm-chatgpt --backend openai" \ No newline at end of file diff --git a/test_installation.py b/test_installation.py new file mode 100644 index 0000000..ebbec68 --- /dev/null +++ b/test_installation.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python3 +"""Quick test script for lippytm ChatGPT.AI""" + +import sys +import os + +# Add the project root to Python path +project_root = os.path.dirname(os.path.abspath(__file__)) +sys.path.insert(0, project_root) + +try: + from lippytm_chatgpt.config import ConfigManager + from lippytm_chatgpt.core import ChatGPTAI + + print("✓ lippytm ChatGPT.AI modules imported successfully") + + # Test configuration + config_manager = ConfigManager() + print("✓ Configuration manager initialized") + + # Test AI bot initialization + ai_bot = ChatGPTAI(config_manager.get_config_dict()) + print(f"✓ AI bot initialized with backend: {ai_bot.ai_backend}") + + # Test response generation + test_message = "Hello, this is a test message." + response = ai_bot.generate_response(test_message) + print(f"✓ Response generated: {response[:50]}...") + + # Test conversation summary + summary = ai_bot.get_conversation_summary() + print(f"✓ Conversation summary: {summary['total_messages']} messages") + + print("\n🎉 All tests passed! lippytm ChatGPT.AI is working correctly.") + +except ImportError as e: + print(f"✗ Import error: {e}") + print("Please install dependencies: pip install -r requirements.txt") + sys.exit(1) + +except Exception as e: + print(f"✗ Test failed: {e}") + sys.exit(1) \ No newline at end of file