File tree 1 file changed +4
-2
lines changed
1 file changed +4
-2
lines changed Original file line number Diff line number Diff line change @@ -14,11 +14,13 @@ personas:
14
14
# Default number of chunks to include as context, set to 0 to disable retrieval
15
15
# Remove the field to set to the system default number of chunks/tokens to pass to Gen AI
16
16
# Each chunk is 512 tokens long
17
- num_chunks : 10
17
+ num_chunks : 25
18
18
# Enable/Disable usage of the LLM chunk filter feature whereby each chunk is passed to the LLM to determine
19
19
# if the chunk is useful or not towards the latest user query
20
20
# This feature can be overriden for all personas via DISABLE_LLM_DOC_RELEVANCE env variable
21
- llm_relevance_filter : true
21
+ # Disabling by default since for many deployments, it causes the user to hit rate limits with
22
+ # their LLM provider (e.g. Azure) or causes extremely slow results (Ollama).
23
+ llm_relevance_filter : false
22
24
# Enable/Disable usage of the LLM to extract query time filters including source type and time range filters
23
25
llm_filter_extraction : true
24
26
# Decay documents priority as they age, options are:
You can’t perform that action at this time.
0 commit comments