Skip to content

Commit 70d2ef1

Browse files
authored
Merge pull request #149 from arc53/bug/promptfixes
better prompts
2 parents 0fd39dd + 02aca04 commit 70d2ef1

File tree

4 files changed

+40
-25
lines changed

4 files changed

+40
-25
lines changed

application/app.py

Lines changed: 22 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -7,12 +7,14 @@
77
from flask import Flask, request, render_template
88
from langchain import FAISS
99
from langchain.llms import OpenAIChat
10-
from langchain import VectorDBQA, HuggingFaceHub, Cohere
10+
from langchain import VectorDBQA, HuggingFaceHub, Cohere, OpenAI
1111
from langchain.chains.question_answering import load_qa_chain
12-
from langchain.embeddings import OpenAIEmbeddings, HuggingFaceHubEmbeddings, CohereEmbeddings, HuggingFaceInstructEmbeddings
12+
from langchain.embeddings import OpenAIEmbeddings, HuggingFaceHubEmbeddings, CohereEmbeddings, \
13+
HuggingFaceInstructEmbeddings
1314
from langchain.prompts import PromptTemplate
1415
from error import bad_request
15-
# os.environ["LANGCHAIN_HANDLER"] = "langchain"
16+
17+
os.environ["LANGCHAIN_HANDLER"] = "langchain"
1618

1719
if os.getenv("LLM_NAME") is not None:
1820
llm_choice = os.getenv("LLM_NAME")
@@ -24,8 +26,6 @@
2426
else:
2527
embeddings_choice = "openai_text-embedding-ada-002"
2628

27-
28-
2929
if llm_choice == "manifest":
3030
from manifest import Manifest
3131
from langchain.llms.manifest import ManifestWrapper
@@ -53,6 +53,9 @@
5353
with open("combine_prompt_hist.txt", "r") as f:
5454
template_hist = f.read()
5555

56+
with open("question_prompt.txt", "r") as f:
57+
template_quest = f.read()
58+
5659
if os.getenv("API_KEY") is not None:
5760
api_key_set = True
5861
else:
@@ -76,7 +79,7 @@ def api_answer():
7679
data = request.get_json()
7780
question = data["question"]
7881
history = data["history"]
79-
print('-'*5)
82+
print('-' * 5)
8083
if not api_key_set:
8184
api_key = data["api_key"]
8285
else:
@@ -95,7 +98,7 @@ def api_answer():
9598
vectorstore = ""
9699
else:
97100
vectorstore = ""
98-
101+
#vectorstore = "outputs/inputs/"
99102
# loading the index and the store and the prompt template
100103
# Note if you have used other embeddings than OpenAI, you need to change the embeddings
101104
if embeddings_choice == "openai_text-embedding-ada-002":
@@ -110,13 +113,19 @@ def api_answer():
110113
# create a prompt template
111114
if history:
112115
history = json.loads(history)
113-
template_temp = template_hist.replace("{historyquestion}", history[0]).replace("{historyanswer}", history[1])
114-
c_prompt = PromptTemplate(input_variables=["summaries", "question"], template=template_temp, template_format="jinja2")
116+
template_temp = template_hist.replace("{historyquestion}", history[0]).replace("{historyanswer}",
117+
history[1])
118+
c_prompt = PromptTemplate(input_variables=["summaries", "question"], template=template_temp,
119+
template_format="jinja2")
115120
else:
116-
c_prompt = PromptTemplate(input_variables=["summaries", "question"], template=template, template_format="jinja2")
121+
c_prompt = PromptTemplate(input_variables=["summaries", "question"], template=template,
122+
template_format="jinja2")
117123

124+
q_prompt = PromptTemplate(input_variables=["context", "question"], template=template_quest,
125+
template_format="jinja2")
118126
if llm_choice == "openai":
119127
llm = OpenAIChat(openai_api_key=api_key, temperature=0)
128+
#llm = OpenAI(openai_api_key=api_key, temperature=0)
120129
elif llm_choice == "manifest":
121130
llm = ManifestWrapper(client=manifest, llm_kwargs={"temperature": 0.001, "max_tokens": 2048})
122131
elif llm_choice == "huggingface":
@@ -125,14 +134,12 @@ def api_answer():
125134
llm = Cohere(model="command-xlarge-nightly", cohere_api_key=api_key)
126135

127136
qa_chain = load_qa_chain(llm=llm, chain_type="map_reduce",
128-
combine_prompt=c_prompt)
137+
combine_prompt=c_prompt, question_prompt=q_prompt)
129138

130-
chain = VectorDBQA(combine_documents_chain=qa_chain, vectorstore=docsearch, k=4)
139+
chain = VectorDBQA(combine_documents_chain=qa_chain, vectorstore=docsearch, k=10)
131140

132-
133141
# fetch the answer
134142
result = chain({"query": question})
135-
print(result)
136143

137144
# some formatting for the frontend
138145
result['answer'] = result['result']
@@ -152,7 +159,7 @@ def api_answer():
152159
# print whole traceback
153160
traceback.print_exc()
154161
print(str(e))
155-
return bad_request(500,str(e))
162+
return bad_request(500, str(e))
156163

157164

158165
@app.route("/api/docs_check", methods=["POST"])

application/combine_prompt.txt

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,4 @@
1-
You are a DocsGPT bot assistant by Arc53 that provides help with programming libraries. You give thorough answers with code examples.
2-
Given the following extracted parts of a long document and a question, create a final answer with references ("SOURCES").
3-
ALWAYS return a "SOURCES" part in your answer.
1+
You are a DocsGPT, friendly and helpful AI assistant by Arc53 that provides help with documents. You give thorough answers with code examples if possible.
42

53
QUESTION: How to merge tables in pandas?
64
=========
@@ -12,12 +10,12 @@ Source: 30-pl
1210
FINAL ANSWER: To merge two tables in pandas, you can use the pd.merge() function. The basic syntax is: \n\npd.merge(left, right, on, how) \n\nwhere left and right are the two tables to merge, on is the column to merge on, and how is the type of merge to perform. \n\nFor example, to merge the two tables df1 and df2 on the column 'id', you can use: \n\npd.merge(df1, df2, on='id', how='inner')
1311
SOURCES: 28-pl 30-pl
1412

15-
QUESTION: How to eat vegetables using pandas?
13+
QUESTION: How are you?
1614
=========
17-
Content: ExtensionArray.repeat(repeats, axis=None) Returns a new ExtensionArray where each element of the current ExtensionArray is repeated consecutively a given number of times. \n\nParameters: repeats int or array of ints. The number of repetitions for each element. This should be a positive integer. Repeating 0 times will return an empty array. axis (0 or ‘index’, 1 or ‘columns’), default 0 The axis along which to repeat values. Currently only axis=0 is supported.
18-
Source: 0-pl
15+
CONTENT:
16+
SOURCE:
1917
=========
20-
FINAL ANSWER: You can't eat vegetables using pandas. You can only eat them using your mouth.
18+
FINAL ANSWER: I am fine, thank you. How are you?
2119
SOURCES:
2220

2321
QUESTION: {{ question }}

application/combine_prompt_hist.txt

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,4 @@
1-
You are a DocsGPT bot assistant by Arc53 that provides help with programming libraries. You give thorough answers with code examples.
2-
Given the following extracted parts of a long document and a question, create a final answer with references ("SOURCES").
3-
ALWAYS return a "SOURCES" part in your answer. You can also remember things from previous questions and use them in your answer.
1+
You are a DocsGPT, friendly and helpful AI assistant by Arc53 that provides help with documents. You give thorough answers with code examples if possible.
42

53
QUESTION: How to merge tables in pandas?
64
=========
@@ -12,6 +10,14 @@ Source: 30-pl
1210
FINAL ANSWER: To merge two tables in pandas, you can use the pd.merge() function. The basic syntax is: \n\npd.merge(left, right, on, how) \n\nwhere left and right are the two tables to merge, on is the column to merge on, and how is the type of merge to perform. \n\nFor example, to merge the two tables df1 and df2 on the column 'id', you can use: \n\npd.merge(df1, df2, on='id', how='inner')
1311
SOURCES: 28-pl 30-pl
1412

13+
QUESTION: How are you?
14+
=========
15+
CONTENT:
16+
SOURCE:
17+
=========
18+
FINAL ANSWER: I am fine, thank you. How are you?
19+
SOURCES:
20+
1521
QUESTION: {{ historyquestion }}
1622
=========
1723
CONTENT:

application/question_prompt.txt

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
Use the following portion of a long document to see if any of the text is relevant to answer the question.
2+
{{ context }}
3+
Question: {{ question }}
4+
Provide all relevant text to the question verbatim. Summarize if needed. If nothing relevant return "-".

0 commit comments

Comments
 (0)