-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathconfig.example.yml
More file actions
62 lines (52 loc) · 2.27 KB
/
config.example.yml
File metadata and controls
62 lines (52 loc) · 2.27 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
# Topic Watch Configuration
# Copy this file to data/config.yml and fill in your values.
# All settings can be overridden via environment variables with the prefix TOPIC_WATCH_
# Example: TOPIC_WATCH_LLM__API_KEY=sk-abc123
# LLM provider settings (required)
llm:
# Model string in LiteLLM format: provider/model-name
# Examples: openai/gpt-5.4-nano, anthropic/claude-haiku-4-5, ollama/llama3.3
model: "openai/gpt-5.4-nano"
# API key for your LLM provider
api_key: "your-api-key-here"
# Optional: Base URL for self-hosted providers like Ollama
# If running Topic Watch in Docker, use host.docker.internal instead of localhost:
# base_url: "http://host.docker.internal:11434"
# If running without Docker:
# base_url: "http://localhost:11434"
# Notification settings
# Uses Apprise URL format. See: https://github.com/caronc/apprise/wiki
notifications:
urls:
- "ntfy://your-topic-name"
# - "discord://webhook_id/webhook_token"
# - "tgram://bot_token/chat_id"
# - "slack://token_a/token_b/token_c/channel"
# Custom webhook URLs for JSON POST notifications
# Each URL receives a POST with JSON body:
# {"topic": "name", "reasoning": "...", "summary": "...", "key_facts": [...], "source_urls": [...], "confidence": 0.85, "timestamp": "..."}
webhook_urls: []
# - "https://your-server.com/webhook/topic-watch"
# - "https://hooks.example.com/trigger"
# Default check interval (how often to check each topic for updates)
# Supported units: m (minutes), h (hours), d (days), w (weeks), M (months)
# Combine units: "1w 3d", "2h 30m" — min 10m, max 6M
check_interval: "6h"
# Maximum number of articles to process per check per topic
# Higher values use more LLM tokens but catch more updates
max_articles_per_check: 10
# Maximum token budget for the knowledge state summary
# The LLM will compress older facts when approaching this limit
knowledge_state_max_tokens: 2000
# Number of days to retain articles before automatic cleanup
# Older articles are deleted daily at 4 AM
article_retention_days: 90
# --- Advanced settings (optional, sensible defaults) ---
# llm_temperature: 0.2
# min_confidence_threshold: 0.6
# web_page_size: 20
# feed_max_retries: 2
# content_fetch_concurrency: 3
# scheduler_misfire_grace_time: 300
# scheduler_jitter_seconds: 30
# llm_max_retries: 2