-
Notifications
You must be signed in to change notification settings - Fork 279
/
Copy pathllm_memory.py
34 lines (27 loc) · 973 Bytes
/
llm_memory.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
import asyncio
import sys
import traceback
from beeai_framework.adapters.ollama import OllamaChatModel
from beeai_framework.backend import AssistantMessage, SystemMessage, UserMessage
from beeai_framework.errors import FrameworkError
from beeai_framework.memory import UnconstrainedMemory
async def main() -> None:
memory = UnconstrainedMemory()
await memory.add_many(
[
SystemMessage("Always respond very concisely."),
UserMessage("Give me the first 5 prime numbers."),
]
)
llm = OllamaChatModel("llama3.1")
response = await llm.create(messages=memory.messages)
await memory.add(AssistantMessage(response.get_text_content()))
print("Conversation history")
for message in memory.messages:
print(f"{message.role}: {message.text}")
if __name__ == "__main__":
try:
asyncio.run(main())
except FrameworkError as e:
traceback.print_exc()
sys.exit(e.explain())