|
| 1 | +import logging |
| 2 | + |
| 3 | +logger = logging.getLogger(__name__) |
| 4 | + |
| 5 | +from hamilton import contrib |
| 6 | + |
| 7 | +with contrib.catch_import_errors(__name__, __file__, logger): |
| 8 | + import openai |
| 9 | + |
| 10 | + # use langchain implementation of vector store |
| 11 | + from langchain_community.vectorstores import FAISS |
| 12 | + from langchain_core.vectorstores import VectorStoreRetriever |
| 13 | + |
| 14 | + # use langchain embedding wrapper with vector store |
| 15 | + from langchain_openai import OpenAIEmbeddings |
| 16 | + |
| 17 | + |
| 18 | +def vector_store(input_texts: list[str]) -> VectorStoreRetriever: |
| 19 | + """A Vector store. This function populates and creates one for querying. |
| 20 | +
|
| 21 | + This is a cute function encapsulating the creation of a vector store. In real life |
| 22 | + you could replace this with a more complex function, or one that returns a |
| 23 | + client to an existing vector store. |
| 24 | +
|
| 25 | + :param input_texts: the input "text" i.e. documents to be stored. |
| 26 | + :return: a vector store that can be queried against. |
| 27 | + """ |
| 28 | + vectorstore = FAISS.from_texts(input_texts, embedding=OpenAIEmbeddings()) |
| 29 | + retriever = vectorstore.as_retriever() |
| 30 | + return retriever |
| 31 | + |
| 32 | + |
| 33 | +def context(question: str, vector_store: VectorStoreRetriever, top_k: int = 5) -> str: |
| 34 | + """This function returns the string context to put into a prompt for the RAG model. |
| 35 | +
|
| 36 | + :param question: the user question to use to search the vector store against. |
| 37 | + :param vector_store: the vector store to search against. |
| 38 | + :param top_k: the number of results to return. |
| 39 | + :return: a string with all the context. |
| 40 | + """ |
| 41 | + _results = vector_store.invoke(question, search_kwargs={"k": top_k}) |
| 42 | + return "\n".join(map(lambda d: d.page_content, _results)) |
| 43 | + |
| 44 | + |
| 45 | +def rag_prompt(context: str, question: str) -> str: |
| 46 | + """Creates a prompt that includes the question and context for the LLM to makse sense of. |
| 47 | +
|
| 48 | + :param context: the information context to use. |
| 49 | + :param question: the user question the LLM should answer. |
| 50 | + :return: the full prompt. |
| 51 | + """ |
| 52 | + template = ( |
| 53 | + "Answer the question based only on the following context:\n" |
| 54 | + "{context}\n\n" |
| 55 | + "Question: {question}" |
| 56 | + ) |
| 57 | + |
| 58 | + return template.format(context=context, question=question) |
| 59 | + |
| 60 | + |
| 61 | +def llm_client() -> openai.OpenAI: |
| 62 | + """The LLM client to use for the RAG model.""" |
| 63 | + return openai.OpenAI() |
| 64 | + |
| 65 | + |
| 66 | +def rag_response(rag_prompt: str, llm_client: openai.OpenAI) -> str: |
| 67 | + """Creates the RAG response from the LLM model for the given prompt. |
| 68 | +
|
| 69 | + :param rag_prompt: the prompt to send to the LLM. |
| 70 | + :param llm_client: the LLM client to use. |
| 71 | + :return: the response from the LLM. |
| 72 | + """ |
| 73 | + response = llm_client.chat.completions.create( |
| 74 | + model="gpt-3.5-turbo", |
| 75 | + messages=[{"role": "user", "content": rag_prompt}], |
| 76 | + ) |
| 77 | + return response.choices[0].message.content |
| 78 | + |
| 79 | + |
| 80 | +if __name__ == "__main__": |
| 81 | + import __init__ as hamilton_faiss_rag |
| 82 | + |
| 83 | + from hamilton import driver, lifecycle |
| 84 | + |
| 85 | + dr = ( |
| 86 | + driver.Builder() |
| 87 | + .with_modules(hamilton_faiss_rag) |
| 88 | + .with_config({}) |
| 89 | + # this prints the inputs and outputs of each step. |
| 90 | + .with_adapters(lifecycle.PrintLn(verbosity=2)) |
| 91 | + .build() |
| 92 | + ) |
| 93 | + dr.display_all_functions("dag.png") |
| 94 | + print( |
| 95 | + dr.execute( |
| 96 | + ["rag_response"], |
| 97 | + inputs={ |
| 98 | + "input_texts": [ |
| 99 | + "harrison worked at kensho", |
| 100 | + "stefan worked at Stitch Fix", |
| 101 | + ], |
| 102 | + "question": "where did stefan work?", |
| 103 | + }, |
| 104 | + ) |
| 105 | + ) |
0 commit comments