-
Notifications
You must be signed in to change notification settings - Fork 28
Expand file tree
/
Copy pathagentic_rag.py
More file actions
65 lines (51 loc) · 1.95 KB
/
agentic_rag.py
File metadata and controls
65 lines (51 loc) · 1.95 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
# import basics
import os
from dotenv import load_dotenv
from langchain.agents import AgentExecutor
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain.agents import create_tool_calling_agent
from langchain_core.prompts import PromptTemplate
from langchain_community.vectorstores import SupabaseVectorStore
from langchain_openai import OpenAIEmbeddings
from langchain import hub
from supabase.client import Client, create_client
from langchain_core.tools import tool
# load environment variables
load_dotenv()
# initiate supabase database
supabase_url = os.environ.get("SUPABASE_URL")
supabase_key = os.environ.get("SUPABASE_SERVICE_KEY")
supabase: Client = create_client(supabase_url, supabase_key)
# initiate embeddings model
embeddings = OpenAIEmbeddings(model="text-embedding-3-small")
# initiate vector store
vector_store = SupabaseVectorStore(
embedding=embeddings,
client=supabase,
table_name="documents",
query_name="match_documents",
)
# initiate large language model (temperature = 0)
llm = ChatOpenAI(temperature=0)
# fetch the prompt from the prompt hub
prompt = hub.pull("hwchase17/openai-functions-agent")
# create the tools
@tool(response_format="content_and_artifact")
def retrieve(query: str):
"""Retrieve information related to a query."""
retrieved_docs = vector_store.similarity_search(query, k=2)
serialized = "\n\n".join(
(f"Source: {doc.metadata}\n" f"Content: {doc.page_content}")
for doc in retrieved_docs
)
return serialized, retrieved_docs
# combine the tools and provide to the llm
tools = [retrieve]
agent = create_tool_calling_agent(llm, tools, prompt)
# create the agent executor
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
# invoke the agent
response = agent_executor.invoke({"input": "why is agentic rag better than naive rag?"})
# put the result on the screen
print(response["output"])