Skip to content

Commit d20690e

Browse files
Kevin Tamglorat
authored andcommitted
feat: summary before doc query for better context
1 parent 976d9ba commit d20690e

File tree

4 files changed

+29
-13
lines changed

4 files changed

+29
-13
lines changed

src/lib/ai/openaiWrapper.ts

Lines changed: 13 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -76,21 +76,26 @@ const completionConfig = {
7676
}
7777

7878
export async function answerMeDirect(arg: {context: string, userPrompt: string, initPrompt?:string}): Promise<string> {
79-
const defaultPrompt = "Answer the question as truthfully as possible using the provided text, and if the answer is not contained within the text below, say \"I don't know\"\n\n"
79+
// const defaultPrompt = "Answer the question as truthfully as possible using the provided text, and if the answer is not contained within the text below, say \"I don't know\"\n\n"
80+
const defaultPrompt = `Use the following pieces of context to answer the users question.
81+
If you don't know the answer, just say that you don't know, don't try to make up an answer.
82+
----------------
83+
`
8084
const initPrompt = arg.initPrompt ?? defaultPrompt
81-
const {context, userPrompt} = arg
82-
const prompt = initPrompt
83-
+ 'Context:\n' + context + '\n\n'
84-
+ 'Q: ' + userPrompt + '\nA: ';
8585

86-
logger.debug(prompt)
86+
const {context, userPrompt} = arg
87+
// const prompt = initPrompt
88+
// + 'Context:\n' + context + '\n\n'
89+
// + 'Q: ' + userPrompt + '\nA: ';
90+
//
91+
// logger.debug(prompt)
8792

8893
const response = await callWithRetry(() => getOpenAIAPI(Config.chatModel).createChatCompletion({
8994
...completionConfig,
9095
model: Config.chatModel,
9196
messages:[
92-
{role: 'system', content: context},
93-
{role: 'user', content: initPrompt},
97+
{role: 'system', content: initPrompt + context},
98+
// {role: 'user', content: context},
9499
{role: 'user', content: userPrompt}
95100
],
96101
// user: TODO: for tracking purposes

src/pages/DocumentQuery.vue

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@
4747

4848
<script setup lang="ts">
4949
import {ref, nextTick, onMounted, Ref, computed} from 'vue'
50-
import {performQna2} from 'src/lib/ai/answer'
50+
import {performQna2, performQna3, performSummarisation} from 'src/lib/ai/answer'
5151
import {createVectorStoreFromLargeContent} from 'src/lib/ai/largeDocQna'
5252
import {exportFile, Notify} from 'quasar'
5353
import {matCloudUpload} from '@quasar/extras/material-icons'
@@ -85,10 +85,13 @@ async function doit() {
8585
8686
const vectorStore = await createVectorStoreFromLargeContent(text.value, (p)=>embedProgress.value=p)
8787
88+
const summary = await performSummarisation(text.value)
89+
console.log(`SUMMARY ${summary}`)
90+
8891
let idx = 0
8992
for (const question of questionStore.questions) {
9093
console.log(`QUESTION ${idx}: ${question}`)
91-
const response = await performQna2(question, vectorStore)
94+
const response = await performQna3(question, summary, vectorStore)
9295
answers.value[idx] = response ?? 'cannot answer'
9396
answerLoading.value[idx] = false
9497
console.log(`ANSWER ${idx}: ${response}`)

src/pages/MultiFilePage.vue

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
import MultiFileManager from 'components/MultiFileManager.vue'
1111
import QuestionInputs from 'components/QuestionInputs.vue'
1212
import {computed, ref} from 'vue'
13-
import {performQna2} from 'src/lib/ai/answer'
13+
import {performQna2, performQna3, performSummarisation} from 'src/lib/ai/answer'
1414
import {exportFile, Notify} from 'quasar'
1515
import {useQuestionStore} from 'stores/questionStore'
1616
import {useMultiFileStore} from 'stores/multiFileStore'
@@ -34,7 +34,7 @@ async function doit() {
3434
for (const [fileIdx, file] of multiFileStore.documentInfo.entries()) {
3535
console.log(`QUESTION ${idx}: ${question}`)
3636
const vectorStore = useMultiFileStore().vectorStore
37-
const response = await performQna2(question, vectorStore, d=>d.metadata['name'] === file.file.name)
37+
const response = await performQna3(question, file.summary, vectorStore, d=>d.metadata['name'] === file.file.name)
3838
answers[idx][fileIdx] = response ?? 'cannot answer'
3939
console.log(`ANSWER ${idx}: ${response}`)
4040
console.log()

src/stores/multiFileStore.ts

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,13 +7,15 @@ import {getLangchainConfig} from 'src/lib/ai/config';
77
import {embedsCache} from 'src/lib/ai/openaiWrapper';
88
import {anyBufferToText, fileToText} from 'src/lib/ai/unstructured';
99
import {RecursiveCharacterTextSplitter} from 'langchain/text_splitter';
10+
import {performSummarisation} from "src/lib/ai/answer";
1011

1112
export interface DocumentInfo {
1213
name: string
1314
file?: File
1415
buffer?: Buffer
15-
status: 'pending' | 'parsing' | 'processing' | 'ready' | 'error'
16+
status: 'pending' | 'parsing' | 'processing' | 'ready' | 'error' | string
1617
progress?: number,
18+
summary?: string
1719
// vectors?: MemoryVectorStore
1820
}
1921

@@ -58,6 +60,12 @@ export const useMultiFileStore = defineStore('multiFile', {
5860
const vectorStore = this.vectorStore
5961
await vectorStore.addDocuments(docs) // TODO: deduplicate based on metadata?
6062

63+
// We also want a summary
64+
// TODO: This could be in parallel of above?
65+
pendingDocument.status = 'summarising'
66+
const summary = await performSummarisation(text)
67+
pendingDocument.summary = summary
68+
6169
// Important to markRaw to avoid proxying the insides
6270
// pendingDocument.vectors = markRaw(vectorStore)
6371
// Update the status to 'ready' on successful processing

0 commit comments

Comments
 (0)