Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
30 commits
Select commit Hold shift + click to select a range
b49266b
base threading and AI request structure for future addons (highlighti…
LSchaffner Jan 15, 2026
4415b66
Threading handler and airequester now in current app
LSchaffner Jan 16, 2026
df7c5f2
config MAX_CONCURRENT_AI_REQUESTS works now
LSchaffner Jan 16, 2026
309a16c
fix for github test (config.dist)
LSchaffner Jan 16, 2026
aab9086
configs for ai and threading now under configurations
LSchaffner Jan 17, 2026
1a64f17
fix: feature flagged threading
LSchaffner Jan 25, 2026
eca8af4
Merge branch 'master' into wip/ls/threading_core_with_ai
LSchaffner Jan 25, 2026
a42dcf9
updated gitignore for all configs
LSchaffner Jan 25, 2026
c93cf24
Missed updating config.dist after creating dedicated configs
LSchaffner Jan 25, 2026
01eb623
Fix (Feature AI and Threading need to be off for git-test to work)
LSchaffner Jan 25, 2026
47f48ce
Feature flag for threading removed
LSchaffner Jan 27, 2026
71dda95
Standard Model name changeable through query, ai api method with more…
LSchaffner Feb 2, 2026
7514196
dispatcher update to maximize throughput
LSchaffner Feb 2, 2026
11df5f8
First tests for threading_core.py and small changes to pass tests
LSchaffner Feb 2, 2026
f548257
Merge branch 'master' into wip/ls/threading_core_with_ai
LSchaffner Mar 9, 2026
1cdc169
updated test cases in threading tests, to make them faster
LSchaffner Mar 9, 2026
4153285
highlighted desc now in a separate Thread and using threading core
LSchaffner Mar 9, 2026
5a263dc
Fix: Added missing Event parameter to test case.
LSchaffner Mar 9, 2026
baae224
Fix: a missing function call
LSchaffner Mar 10, 2026
012fe64
feat: support multiple providers and api methods with availability check
LSchaffner Mar 15, 2026
d0a47c5
Different, configurable numbers of simultaneous connections for diffe…
LSchaffner Mar 17, 2026
687d3bf
fix: failed tests (missing config after test_ai_provider)
LSchaffner Mar 17, 2026
094ad3f
fix: missing configuration copy
LSchaffner Mar 17, 2026
721ecf8
fix: test with patch for the config, to get it into threading_core
LSchaffner Mar 17, 2026
1247d8e
add per-provider semaphores to limit concurrent AI API requests
LSchaffner Mar 18, 2026
2613eba
refactor: replace print with logging and tighten docstrings
LSchaffner Mar 18, 2026
a1d0505
Merge branch 'master' into wip/ls/threading_core_with_ai
LSchaffner Mar 18, 2026
8240628
refactor: replace print with logging
LSchaffner Mar 18, 2026
7392c9d
added small tests for the ai_core_requests module
LSchaffner Mar 18, 2026
07a6a26
Merge branch 'master' into wip/ls/threading_core_with_ai
LSchaffner Mar 19, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,10 @@ jobs:
- name: Install pytest
run: pip install pytest pytest-cov coverage pytest-md pytest-emoji
- name: Set up configuration
run: cp ./hanfor/config.dist.py ./hanfor/config.py
run: |
cp ./hanfor/config.dist.py ./hanfor/config.py
cp ./hanfor/configuration/threading_config.dist.py ./hanfor/configuration/threading_config.py
cp ./hanfor/configuration/ai_config.dist.py ./hanfor/configuration/ai_config.py
- name: Install Z3
run: pysmt-install --z3 --confirm-agreement
- name: Run pytest
Expand Down
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
**/stored_sessions
config.py
hanfor/data/**
hanfor/configuration/ultimate_config.py
hanfor/configuration/*_config.py

**/__pycache__*
**/.webassets-cache/
Expand Down
Empty file added hanfor/ai_request/__init__.py
Empty file.
24 changes: 24 additions & 0 deletions hanfor/ai_request/ai_api_methods_abstract_class.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
import threading
from abc import ABC, abstractmethod
from typing import Optional


class AiApiMethod(ABC):
@property
@abstractmethod
def provider_names_which_work_with_api_method(self) -> list[str]:
"""All names of the AI models that work with this API method must be entered in a list here"""
pass

@abstractmethod
def query_api(
self,
query: str,
url: str,
api_key: str,
model_name: str,
other_params: Optional[dict],
stop_event: Optional[threading.Event],
) -> tuple[str | None, str]:
"""Sends a query to the AI API, returns (response, status)."""
pass
280 changes: 280 additions & 0 deletions hanfor/ai_request/ai_core_requests.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,280 @@
from threading import Event, Semaphore
from dataclasses import dataclass, field
from typing import Optional, Callable
from ai_request.ai_api_methods_abstract_class import AiApiMethod
from configuration import ai_config

import os
import importlib
import logging
import requests

from thread_handling.threading_core import ThreadHandler, ThreadTask, SchedulingClass, ThreadGroup, TaskResult


@dataclass(slots=True)
class ProviderEntry:
"""Data structure for provider information"""

maximum_concurrent_api_requests: int
url: str
api_key: str
models: dict[str, str]
default_model: str
api_methods: dict[str, AiApiMethod] = field(default_factory=dict)
semaphore: Semaphore = field(init=False)

def __post_init__(self):
self.semaphore = Semaphore(self.maximum_concurrent_api_requests)


class AiCatalogPrinter:
"""Prints AI provider catalog and model check results."""

def __init__(self, catalog: dict[str, ProviderEntry]):
self.__catalog = catalog

def print_catalog(self):
"""Logs all providers with their configuration."""
lines = ["\n" + "=" * 40]
for provider, entry in self.__catalog.items():
lines.append("-" * 40)
lines.append(f"Provider: {provider}")
lines.append(f" max_request {entry.maximum_concurrent_api_requests}")
lines.append(f" Semaphore: {entry.semaphore}")
lines.append(f" URL: {entry.url}")
lines.append(f" API Key: {entry.api_key[:8]}...")
lines.append(f" API Methods: {', '.join(entry.api_methods.keys()) if entry.api_methods else 'None'}")
lines.append(f" Models:")
for model, description in entry.models.items():
lines.append(f" - {model}: {description}")
lines.append(f" Default Model: {entry.default_model}")
lines.append("-" * 40)
lines.append(f" Default Provider: {ai_config.DEFAULT_PROVIDER}")
lines.append("=" * 40)
logging.info("\n".join(lines))

@staticmethod
def print_check_results(results: dict[str, dict[str, dict[str, str]]]):
"""Logs model check results grouped by provider and API method."""
lines = []
for provider, methods in results.items():
lines.append("\n" + "=" * 40)
lines.append(f"Provider: {provider}")
for method_name, models in methods.items():
lines.append(f" API Method: {method_name}")
for model, status in models.items():
icon = "✓" if status == "ok" else "✗"
lines.append(f" {icon} {model}: {status}")
lines.append("=" * 40)
logging.info("\n".join(lines))


class AiCatalogTester:
"""Tests all models of all providers against their registered API methods."""

def __init__(self, catalog: dict[str, ProviderEntry]):
self.__catalog = catalog

def check_all_models(self, stop_event: Event) -> dict[str, dict[str, dict[str, str]]]:
"""Tests every model of every provider with all registered api methods. Returns {provider: {method: {model: status}}}."""
results = {}
test_prompt = "Say 'ok'."

for provider_name, entry in self.__catalog.items():
if stop_event.is_set():
break

results[provider_name] = {}

if not self.__is_reachable(provider_name, entry, results):
continue

if not entry.api_methods:
results[provider_name]["__no_api_method__"] = {"__no_model__": "error: no api method available"}
continue

for method_name, method in entry.api_methods.items():
if stop_event.is_set():
break
results[provider_name][method_name] = {}
for model_name in entry.models:
if stop_event.is_set():
break
try:
response, status = method.query_api(
test_prompt, entry.url, entry.api_key, model_name, None, None
)
results[provider_name][method_name][model_name] = "ok" if response else f"error: {status}"
except Exception as e:
results[provider_name][method_name][model_name] = f"error: {e}"

return results

@staticmethod
def __is_reachable(provider_name: str, entry: ProviderEntry, results: dict) -> bool:
"""Returns True if the provider URL is reachable, otherwise writes the error into results."""
try:
requests.get(entry.url, timeout=3)
return True
except requests.exceptions.ConnectionError:
results[provider_name]["__unreachable__"] = {
"__no_model__": f"error: '{provider_name}' is not reachable at {entry.url}"
}
except requests.exceptions.Timeout:
results[provider_name]["__unreachable__"] = {
"__no_model__": f"error: '{provider_name}' timed out at {entry.url}"
}
return False


class AiRequest:
"""Loads and organizes AI providers and routes incoming requests to the correct API method."""

def __init__(self, thread_handler: ThreadHandler):
self.__thread_handler = thread_handler
self.__ai_model_catalog = self.__build_catalog()
self.__register_api_methods()
self.__validate_catalog()
self.__catalog_printer = AiCatalogPrinter(self.__ai_model_catalog)
self.__catalog_tester = AiCatalogTester(self.__ai_model_catalog)

self.print_catalog()
self.__thread_handler.submit(
ThreadTask(
self.check_all_models,
SchedulingClass.SYSTEM_CALL,
ThreadGroup.AI,
None,
self.print_check_results,
(),
{},
)
)

def ask_ai(
self,
prompt: str,
callback: Callable,
scheduling_class: SchedulingClass = SchedulingClass.SYSTEM_CALL,
provider: Optional[str] = None,
model_name: Optional[str] = None,
api_method_name: Optional[str] = None,
other_params: Optional[dict] = None,
) -> TaskResult:
"""
Submits an AI query asynchronously. Returns a TaskResult to poll via .done() or block via .result().
Result is also delivered to the callback.
"""

provider = self._resolve_provider(provider)
provider_entry = self.__ai_model_catalog[provider]
model_name = self._resolve_model(provider_entry, model_name)
method = self._resolve_method(provider_entry, api_method_name)
semaphore = provider_entry.semaphore

ai_task = ThreadTask(
method.query_api,
scheduling_class,
ThreadGroup.AI,
semaphore,
callback,
(
prompt,
provider_entry.url,
provider_entry.api_key,
model_name,
other_params,
),
{},
)
return self.__thread_handler.submit(ai_task)

def get_all_models(self) -> dict[str, ProviderEntry]:
return dict(self.__ai_model_catalog)

def check_all_models(self, stop_event: Event) -> dict[str, dict[str, dict[str, str]]]:
return self.__catalog_tester.check_all_models(stop_event)

def print_catalog(self) -> None:
self.__catalog_printer.print_catalog()

def print_check_results(self, tested_catalog: dict[str, dict[str, dict[str, str]]]):
self.__catalog_printer.print_check_results(tested_catalog)

def _resolve_provider(self, provider: Optional[str]) -> str:
if not provider or provider not in self.__ai_model_catalog:
if provider:
logging.warning(f"Provider: {provider} not found, will use: {ai_config.DEFAULT_PROVIDER}")
provider = ai_config.DEFAULT_PROVIDER
if provider not in self.__ai_model_catalog:
raise ValueError(f"Default provider '{ai_config.DEFAULT_PROVIDER}' not found in catalog.")
return provider

@staticmethod
def _resolve_model(provider_entry: ProviderEntry, model_name: Optional[str]) -> str:
if not model_name or model_name not in provider_entry.models:
if model_name:
logging.warning(f"Model: {model_name} not found, will use: {provider_entry.default_model}")
model_name = provider_entry.default_model
if model_name not in provider_entry.models:
raise ValueError(f"Default model '{provider_entry.default_model}' not found in catalog.")
return model_name

@staticmethod
def _resolve_method(provider_entry: ProviderEntry, api_method_name: Optional[str]) -> AiApiMethod:
if not api_method_name or api_method_name not in provider_entry.api_methods:
if api_method_name:
logging.warning(f"API method: {api_method_name} not found, will use first available.")
method = next(iter(provider_entry.api_methods.values()), None)
if not method:
raise ValueError(f"No api method available. Check your configuration.")
else:
method = provider_entry.api_methods[api_method_name]
return method

@staticmethod
def __build_catalog() -> dict[str, ProviderEntry]:
"""Builds the provider catalog from ai_config."""
return {provider: ProviderEntry(**data) for provider, data in ai_config.AI_PROVIDERS.items()}

@staticmethod
def __load_ai_api_methods() -> list[tuple[str, AiApiMethod]]:
"""Dynamically loads all AI API method classes from strategies folder."""
directory = os.path.join(os.path.dirname(os.path.abspath(__file__)), "api_request_methods/")
methods = []
base_package = "ai_request.api_request_methods"

for filename in os.listdir(directory):
if filename.endswith(".py") and filename != "__init__.py":
module_name = filename[:-3]
module_path = f"{base_package}.{module_name}"

try:
module = importlib.import_module(module_path)
for attr_name in dir(module):
attr = getattr(module, attr_name)
if isinstance(attr, type) and issubclass(attr, AiApiMethod) and attr is not AiApiMethod:
try:
instance = attr()
methods.append((module_name, instance))
except TypeError as e:
logging.warning(f"Cannot instantiate {attr_name} in {module_path}: {e}")
except ModuleNotFoundError as e:
logging.error(f"Error loading module {module_path}: {e}")
return methods

def __register_api_methods(self):
"""Assigns loaded API methods to matching providers in the catalog."""
for name, method in self.__load_ai_api_methods():
for provider in method.provider_names_which_work_with_api_method:
if provider not in self.__ai_model_catalog:
logging.warning(f"Provider '{provider}' from method '{name}' not found in catalog, skipping.")
continue
self.__ai_model_catalog[provider].api_methods[name] = method

def __validate_catalog(self):
"""Warns if any provider has no registered API method."""
for provider, entry in self.__ai_model_catalog.items():
if not entry.api_methods:
logging.warning(f"{provider} has no api method to use.")
Empty file.
40 changes: 40 additions & 0 deletions hanfor/ai_request/api_request_methods/ollama_standard_api.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
import logging
import threading
from typing import Optional

import requests
from configuration import ai_config
from ai_request import ai_api_methods_abstract_class


class OllamaStandard(ai_api_methods_abstract_class.AiApiMethod):
def query_api(
self,
query: str,
url: str,
api_key: str,
model_name: str,
other_params: Optional[dict],
stop_event: Optional[threading.Event],
) -> tuple[str | None, str]:
try:
response = requests.post(
url,
json={"model": model_name, "prompt": query, "stream": False},
)
response_json = response.json()
if "response" in response_json:
return response_json["response"], "ai_response_received"
else:
logging.error(f"Key 'response' not found in AI response: {response_json}")
return None, f"error_ai_response_format_{response_json}"
except requests.exceptions.RequestException as e:
logging.error(f"Request failed: {e}")
return None, f"error_ai_connection_{e}"
except ValueError as e:
logging.error(f"Invalid JSON in response: {e}")
return None, f"error_ai_response_format_{e}"

@property
def provider_names_which_work_with_api_method(self) -> list[str]:
return ["ollama"]
3 changes: 3 additions & 0 deletions hanfor/config.dist.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,9 @@
# Highlighting in description text
FEATURE_VARIABLE_DESCRIPTION_HIGHLIGHTING = False

# AI
FEATURE_AI = False

################################################################################
# Miscellaneous #
################################################################################
Expand Down
17 changes: 17 additions & 0 deletions hanfor/configuration/ai_config.dist.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
################################################################################
# AI API config #
################################################################################

AI_PROVIDERS = {
"PROVIDER_NAME": {
"maximum_concurrent_api_requests": 0,
"url": "PROVIDER_API_URL",
"api_key": "PROVIDER_API_KEY",
"default_model": "MODEL_NAME",
"models": {
"MODEL_NAME": "MODEL_DESCRIPTION",
},
},
}

DEFAULT_PROVIDER = "PROVIDER_NAME"
Loading
Loading