-
-
Notifications
You must be signed in to change notification settings - Fork 28
Expand file tree
/
Copy pathai_llm_client.py
More file actions
49 lines (40 loc) · 1.4 KB
/
ai_llm_client.py
File metadata and controls
49 lines (40 loc) · 1.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
# Copyright 2025 Pierre Verkest
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html)
import logging
from ollama import Client
from odoo import api, models
_logger = logging.getLogger(__name__)
class AiLlmClient(models.AbstractModel):
"""
Abstract model to provide a simple Python client for Ollama.
It resolves configuration dynamically and performs the HTTP calls.
"""
_name = "ai.llm.client"
_description = "AI LLM Client Wrapper"
@api.model
def _get_client(self):
url = (
self.env["ir.config_parameter"]
.sudo()
.get_param("ai_llm.ollama_url", "http://localhost:11434")
)
return Client(host=url)
@api.model
def chat(self, messages, model=None, options=None):
"""
Sends a chat request to Ollama.
:param messages: list of dicts [{'role': 'user', 'content': 'hello'}, ...]
:param options: dict of optional parameters (e.g. temperature)
:return: dict response from Ollama
"""
client = self._get_client()
if not model:
model = (
self.env["ir.config_parameter"]
.sudo()
.get_param("ai_llm.ollama_model", "llama3")
)
response = client.chat(
model=model, messages=messages, options=options, stream=False
)
return response.message.content