-
Notifications
You must be signed in to change notification settings - Fork 32
Expand file tree
/
Copy pathutils.py
More file actions
56 lines (50 loc) · 2.31 KB
/
utils.py
File metadata and controls
56 lines (50 loc) · 2.31 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
from typing import List
from gpt4all import GPT4All
import openai
import os
# TODO: make model configurable in config
def openai_ask(context: str = None, pages: List[int] = None, question: str = None, openai_api_key: str = None, openai_model: str = "gpt-3.5-turbo"):
print(question)
print(context)
print(pages)
# TODO: make answer to same language
if openai_model.startswith("gpt"):
openai.api_key = openai_api_key
completion = openai.ChatCompletion.create(
model=f"{openai_model}",
messages=[
{"role": "user", "content": f"Answer the following question: {question} based on that context: {context},"
" Make sure that the answer of you is in the same language then the question. if you can't just answer: I don't know"}
]
)
# TODO: save usage into db
return completion["choices"][0]["message"]["content"]
else:
print(f"Using local model: {openai_model}")
models_dir = os.path.join(os.getcwd(), "llmsmodels")
gptj = GPT4All(model_name=openai_model, model_path=models_dir)
messages = [
{"role": "user",
"content": f"Answer the following question: {question} based on that context: {context},"
" Make sure that the answer of you is in the same language then the question. if you can't just answer: I don't know"}
]
return gptj.chat_completion(messages=messages, streaming=False)
def openai_ask_no_aixplora_brain(question: str, openai_api_key: str = None, openai_model: str = "gpt-3.5-turbo"):
if openai_model.startswith("gpt"):
openai.api_key = openai_api_key
completion = openai.ChatCompletion.create(
model=f"{openai_model}",
messages=[
{"role": "user", "content": f"{question}"}
]
)
return completion["choices"][0]["message"]["content"]
else:
print(f"Using local model: {openai_model}")
models_dir = os.path.join(os.getcwd(), "llmsmodels")
gptj = GPT4All(model_name=openai_model, model_path=models_dir)
messages = [
{"role": "user",
"content": f"{question}"}
]
return gptj.chat_completion(messages=messages, streaming=False)