This repository was archived by the owner on Oct 21, 2025. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
Reproducible outputs #10
Merged
Merged
Changes from 3 commits
Commits
Show all changes
4 commits
Select commit
Hold shift + click to select a range
File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -10,8 +10,9 @@ | |
| class LLMBackend(ABC): | ||
| """Abstract base class for LLM backends.""" | ||
|
|
||
| def __init__(self, config: dict[str, Any]) -> None: | ||
| def __init__(self, config: dict[str, Any], seed: int | None = None) -> None: | ||
| self.config = config | ||
| self.seed = seed | ||
|
|
||
| @abstractmethod | ||
| def generate( | ||
|
|
@@ -62,15 +63,16 @@ def test_connection(self) -> bool: | |
| class OllamaBackend(LLMBackend): | ||
| """Ollama backend implementation.""" | ||
|
|
||
| def __init__(self, config: dict[str, Any]) -> None: | ||
| super().__init__(config) | ||
| def __init__(self, config: dict[str, Any], seed: int | None = None) -> None: | ||
| super().__init__(config, seed) | ||
| # Import here to avoid circular imports | ||
| from src.utils.model_client import OllamaClient | ||
|
|
||
| self.client = OllamaClient( | ||
| host=config.get("host", "localhost"), | ||
| port=config.get("port", 11434), | ||
| model=config.get("model", "gpt-oss:20b"), | ||
| seed=seed, | ||
| ) | ||
|
|
||
| def generate( | ||
|
|
@@ -82,6 +84,10 @@ def generate( | |
| stream: bool = False, | ||
| ) -> ModelResponse: | ||
| """Generate response from Ollama model.""" | ||
| # For reproducibility, use temperature=0 when seed is set | ||
| if self.seed is not None: | ||
| temperature = 0.0 | ||
|
|
||
| return self.client.generate( | ||
| prompt=prompt, | ||
| system_prompt=system_prompt, | ||
|
|
@@ -97,6 +103,10 @@ def chat( | |
| max_tokens: int | None = None, | ||
| ) -> ModelResponse: | ||
| """Multi-turn chat conversation with Ollama.""" | ||
| # For reproducibility, use temperature=0 when seed is set | ||
| if self.seed is not None: | ||
| temperature = 0.0 | ||
|
|
||
| return self.client.chat( | ||
| messages=messages, | ||
| temperature=temperature, | ||
|
|
@@ -127,8 +137,8 @@ def pull_model(self) -> bool: | |
| class OpenRouterBackend(LLMBackend): | ||
| """OpenRouter backend implementation.""" | ||
|
|
||
| def __init__(self, config: dict[str, Any]) -> None: | ||
| super().__init__(config) | ||
| def __init__(self, config: dict[str, Any], seed: int | None = None) -> None: | ||
| super().__init__(config, seed) | ||
| import logging | ||
|
|
||
| import openai | ||
|
|
@@ -163,6 +173,10 @@ def generate( | |
| stream: bool = False, | ||
| ) -> ModelResponse: | ||
| """Generate response from OpenRouter model.""" | ||
| # For reproducibility, use temperature=0 when seed is set | ||
| if self.seed is not None: | ||
| temperature = 0.0 | ||
|
||
|
|
||
| start_time = time.time() | ||
|
|
||
| messages = [] | ||
|
|
@@ -171,15 +185,23 @@ def generate( | |
| messages.append({"role": "user", "content": prompt}) | ||
|
|
||
| try: | ||
| response = self.client.chat.completions.create( | ||
| model=self.model, | ||
| messages=messages, | ||
| temperature=temperature, | ||
| max_tokens=max_tokens, | ||
| stream=stream, | ||
| timeout=self.timeout, | ||
| extra_headers=self._get_headers(), | ||
| ) | ||
| # Build request parameters | ||
| request_params = { | ||
| "model": self.model, | ||
| "messages": messages, | ||
| "temperature": temperature, | ||
| "stream": stream, | ||
| "timeout": self.timeout, | ||
| "extra_headers": self._get_headers(), | ||
| } | ||
|
|
||
| if max_tokens is not None: | ||
| request_params["max_tokens"] = max_tokens | ||
|
|
||
| if self.seed is not None: | ||
| request_params["seed"] = self.seed | ||
|
|
||
| response = self.client.chat.completions.create(**request_params) | ||
|
|
||
| response_time = time.time() - start_time | ||
|
|
||
|
|
@@ -220,17 +242,29 @@ def chat( | |
| max_tokens: int | None = None, | ||
| ) -> ModelResponse: | ||
| """Multi-turn chat conversation with OpenRouter.""" | ||
| # For reproducibility, use temperature=0 when seed is set | ||
| if self.seed is not None: | ||
| temperature = 0.0 | ||
jakozaur marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
|
|
||
| start_time = time.time() | ||
|
|
||
| try: | ||
| response = self.client.chat.completions.create( | ||
| model=self.model, | ||
| messages=messages, | ||
| temperature=temperature, | ||
| max_tokens=max_tokens, | ||
| timeout=self.timeout, | ||
| extra_headers=self._get_headers(), | ||
| ) | ||
| # Build request parameters | ||
| request_params = { | ||
| "model": self.model, | ||
| "messages": messages, | ||
| "temperature": temperature, | ||
| "timeout": self.timeout, | ||
| "extra_headers": self._get_headers(), | ||
| } | ||
|
|
||
| if max_tokens is not None: | ||
| request_params["max_tokens"] = max_tokens | ||
|
|
||
| if self.seed is not None: | ||
| request_params["seed"] = self.seed | ||
|
|
||
| response = self.client.chat.completions.create(**request_params) | ||
|
|
||
| response_time = time.time() - start_time | ||
|
|
||
|
|
@@ -290,16 +324,16 @@ def list_models(self) -> list[str]: | |
| return [] | ||
|
|
||
|
|
||
| def create_backend(settings: dict[str, Any]) -> LLMBackend: | ||
| def create_backend(settings: dict[str, Any], seed: int | None = None) -> LLMBackend: | ||
| """Factory function to create appropriate backend based on settings.""" | ||
| backend_config = settings.get("backend", {}) | ||
| provider = backend_config.get("provider", "ollama") | ||
|
|
||
| if provider == "ollama": | ||
| ollama_config = settings.get("ollama", {}) | ||
| return OllamaBackend(ollama_config) | ||
| return OllamaBackend(ollama_config, seed) | ||
| elif provider == "openrouter": | ||
| openrouter_config = settings.get("openrouter", {}) | ||
| return OpenRouterBackend(openrouter_config) | ||
| return OpenRouterBackend(openrouter_config, seed) | ||
| else: | ||
| raise ValueError(f"Unsupported backend provider: {provider}") | ||
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -2,34 +2,42 @@ | |
|
|
||
| import subprocess | ||
| import time | ||
| from typing import Any | ||
| from typing import TYPE_CHECKING, Any, Union | ||
|
|
||
| import requests | ||
| from requests.exceptions import Timeout | ||
| from src.models import ModelResponse, OllamaStatus | ||
| from src.utils.llm_backend import create_backend | ||
| from src.utils.settings_manager import settings_manager | ||
|
|
||
| if TYPE_CHECKING: | ||
| from src.utils.llm_backend import LLMBackend | ||
|
|
||
| def get_client() -> Any: | ||
|
|
||
| def get_client(seed: int | None = None) -> Union["LLMBackend", "OllamaClient"]: | ||
| """Factory function to get the configured LLM client.""" | ||
| from src.utils.llm_backend import create_backend | ||
|
|
||
| try: | ||
| settings = settings_manager.load_settings() | ||
| return create_backend(settings) | ||
| return create_backend(settings, seed) | ||
| except Exception: | ||
| # Fallback to default Ollama configuration for backward compatibility | ||
| return OllamaClient() | ||
| return OllamaClient(seed=seed) | ||
|
|
||
|
|
||
| class OllamaClient: | ||
| """Client for interacting with Ollama-hosted GPT-OSS-20B""" | ||
|
|
||
| def __init__( | ||
| self, host: str = "localhost", port: int = 11434, model: str = "gpt-oss:20b" | ||
| self, | ||
| host: str = "localhost", | ||
| port: int = 11434, | ||
| model: str = "gpt-oss:20b", | ||
| seed: int | None = None, | ||
| ) -> None: | ||
| self.base_url = f"http://{host}:{port}" | ||
| self.model = model | ||
| self.session = requests.Session() | ||
| self.seed = seed | ||
|
|
||
| def _make_request( | ||
| self, endpoint: str, data: dict[str, Any] | None = None, method: str = "POST" | ||
|
|
@@ -38,9 +46,9 @@ def _make_request( | |
| url = f"{self.base_url}/{endpoint}" | ||
| try: | ||
| if method.upper() == "GET": | ||
| response = self.session.get(url, timeout=180) | ||
| response = requests.get(url, timeout=180) | ||
| else: | ||
| response = self.session.post(url, json=data, timeout=180) | ||
| response = requests.post(url, json=data, timeout=180) | ||
| response.raise_for_status() | ||
| return response.json() | ||
| except requests.RequestException as e: | ||
|
|
@@ -202,6 +210,10 @@ def generate( | |
| stream: bool = False, | ||
| ) -> ModelResponse: | ||
| """Generate response from model""" | ||
| # For reproducibility, use temperature=0 when seed is set | ||
| if self.seed is not None: | ||
| temperature = 0.0 | ||
|
|
||
| start_time = time.time() | ||
|
|
||
| data = { | ||
|
|
@@ -219,6 +231,9 @@ def generate( | |
| if max_tokens: | ||
| data["options"]["num_predict"] = max_tokens | ||
|
|
||
| if self.seed is not None: | ||
| data["options"]["seed"] = self.seed | ||
|
|
||
| try: | ||
| response = self._make_request("api/generate", data) | ||
| response_time = time.time() - start_time | ||
|
|
@@ -251,6 +266,10 @@ def chat( | |
| max_tokens: int | None = None, | ||
| ) -> ModelResponse: | ||
| """Multi-turn chat conversation""" | ||
| # For reproducibility, use temperature=0 when seed is set | ||
| if self.seed is not None: | ||
| temperature = 0.0 | ||
|
||
|
|
||
| start_time = time.time() | ||
|
|
||
| data = { | ||
|
|
@@ -265,6 +284,9 @@ def chat( | |
| if max_tokens: | ||
| data["options"]["num_predict"] = max_tokens | ||
|
|
||
| if self.seed is not None: | ||
| data["options"]["seed"] = self.seed | ||
|
|
||
| try: | ||
| response = self._make_request("api/chat", data) | ||
| response_time = time.time() - start_time | ||
|
|
@@ -295,6 +317,18 @@ def get_backend_type(self) -> str: | |
| """Get the backend type identifier (for compatibility).""" | ||
| return "Ollama" | ||
|
|
||
| def get_model_name(self) -> str: | ||
| """Get the model name (for compatibility).""" | ||
| return self.model | ||
|
|
||
| def is_available(self) -> bool: | ||
| """Check if model is available (for compatibility).""" | ||
| return self.is_model_available() | ||
|
|
||
| def check_status(self) -> OllamaStatus: | ||
| """Check Ollama status (for compatibility).""" | ||
| return self.check_ollama_status() | ||
|
|
||
|
|
||
| def test_connection() -> bool | None: | ||
| """Test Ollama connection and model availability""" | ||
|
|
||
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Uh oh!
There was an error while loading. Please reload this page.