-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathconfig.yaml
More file actions
20 lines (20 loc) · 1.55 KB
/
config.yaml
File metadata and controls
20 lines (20 loc) · 1.55 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
project:
root: "." # Project root path used by scan/update/sync commands.
ai:
embedding_provider: "ollama" # Embedding provider (gemini|openai|ollama).
embedding_model: "nomic-embed-text" # Embedding model (local Ollama nomic model).
embedding_api_key: "" # Required when embedding_provider is gemini/openai. For openai embeddings, set this here or DOCOD_EMBEDDING_API_KEY.
embedding_dimension: 768 # Embedding vector dimension.
llm_provider: "gemini" # LLM provider for summarization (gemini|openai).
llm_model: "gemini-2.5-flash-lite" # LLM model for section drafting/summarization.
llm_api_key: "" # Required when llm_provider is gemini/openai. You can also set DOCOD_LLM_API_KEY.
openai_base_url: "" # Optional override for OpenAI embeddings endpoint (/v1/embeddings).
llm_base_url: "" # Optional override for LLM endpoint. For openai, use API root or /v1/chat/completions.
ollama_base_url: "http://127.0.0.1:11434" # Local Ollama server URL for embeddings.
docs:
max_llm_sections: 2 # Max number of impacted sections to rewrite with LLM per sync run.
enable_semantic_match: false # Enable embedding-based section matching for unmatched changes.
enable_llm_router: true # Enable ToC-based LLM routing to choose best section for unmatched changes.
max_llm_routes: 2 # Max unmatched chunks allowed to call ToC LLM routing per sync run.
min_confidence_for_llm: 0.6 # Rewrite only sections whose planner confidence meets this threshold (0.0~1.0).
max_embed_chunks_per_run: 80 # Upper bound for incremental embedding chunks per run (0 means unlimited).