Skip to content

Commit d6b9909

Browse files
authored
Merge pull request #47 from jethronap/39_testing
39 testing
2 parents 242efab + 80b3d78 commit d6b9909

File tree

9 files changed

+397
-63
lines changed

9 files changed

+397
-63
lines changed

local_test_pipeline.sh

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,11 @@ pytest tests/agents/test_scraper_agent.py
2020
echo "Done..."
2121
echo "==============================================="
2222

23+
echo "Running analyzer agent tests"
24+
pytest tests/agents/test_analyzer_agent.py
25+
echo "Done..."
26+
echo "==============================================="
27+
2328
echo "Running llm wrapper tests"
2429
pytest tests/tools/test_llm_wrapper.py
2530
echo "Done..."
@@ -30,13 +35,19 @@ pytest tests/tools/test_ollama_client.py
3035
echo "Done..."
3136
echo "==============================================="
3237

38+
39+
echo "Running io tools tests"
40+
pytest tests/tools/test_io_tools.py
41+
echo "Done..."
42+
echo "==============================================="
43+
3344
echo "Running memory store tests"
3445
pytest tests/memory/test_memory_store.py
3546
echo "Done..."
3647
echo "==============================================="
3748

38-
echo "Running with_retry decorator tests"
39-
pytest tests/utils/test_retry.py
49+
echo "Running decorator tests"
50+
pytest tests/utils/test_decorators.py
4051
echo "Done..."
4152
echo "==============================================="
4253

main.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
from src.agents.analyser_agent import AnalyzerAgent
1+
from src.agents.analyzer_agent import AnalyzerAgent
22

33
from src.config.settings import DatabaseSettings
44
from src.memory.store import MemoryStore
File renamed without changes.

src/config/settings.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -14,10 +14,10 @@ class OllamaSettings(BaseSettings):
1414
default="http://localhost:11434/api/generate", description="The Ollama base url"
1515
)
1616
timeout_seconds: float = Field(
17-
default=None, description="Timeout for calling Ollama"
17+
default=30.0, description="Timeout for calling Ollama"
1818
)
1919
stream: bool = Field(default=False, description="Flag to denote chunked streaming.")
20-
model: str = Field(default=None, description="Ollama model name.")
20+
model: str = Field(default="qwen2.5", description="Ollama model name.")
2121

2222
model_config = SettingsConfigDict(
2323
env_file=".env", env_prefix="OLLAMA_", extra="allow"
@@ -42,17 +42,17 @@ class LoggingSettings(BaseSettings):
4242
Configuration for Loguru logging sinks.
4343
"""
4444

45-
level: str = Field(default=None, description="The log level")
45+
level: str = Field(default="DEBUG", description="The log level")
4646
console: bool = Field(default=True, description="Show logs in console")
4747
enable_file: bool = Field(
4848
default=False, description="Flag to denote persistence of logs"
4949
)
5050
filepath: Optional[Path] = Field(
51-
default=None, description="Optional file path for logs"
51+
default="logs/agents.log", description="Optional file path for logs"
5252
)
53-
rotation: str = Field(default=None, description="Roll log after this size")
53+
rotation: str = Field(default="10 MB", description="Roll log after this size")
5454
retention: str = Field(
55-
default=None, description="Keep logs for this amount of time"
55+
default="7 days", description="Keep logs for this amount of time"
5656
)
5757
compression: str = Field(default="zip", description="Compress old logs")
5858

Lines changed: 87 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,87 @@
1+
import json
2+
from pathlib import Path
3+
import pytest
4+
5+
from src.agents.analyzer_agent import AnalyzerAgent
6+
from src.utils.db import payload_hash
7+
from tests.conftest import make_raw_dump, set_stub_response
8+
9+
10+
def test_first_run_creates_files(tmp_path, mem_fake):
11+
"""
12+
• LLM is called once.
13+
• enriched + geojson are written.
14+
• stats are returned.
15+
"""
16+
raw_path, raw = make_raw_dump(tmp_path)
17+
18+
# what the LLM should spit back for each element
19+
set_stub_response(
20+
{
21+
"public": False,
22+
"sensitive": False,
23+
"camera_type": None,
24+
"mount_type": None,
25+
}
26+
)
27+
28+
agent = AnalyzerAgent("AnalyzerAgent", memory=mem_fake)
29+
ctx = agent.achieve_goal({"path": str(raw_path)})
30+
31+
# -------- assertions ----------
32+
enr = Path(ctx["output_path"])
33+
gj = Path(ctx["geojson_path"])
34+
35+
assert enr.exists() and gj.exists()
36+
# enriched has our analysis field
37+
loaded = json.loads(enr.read_text())["elements"][0]["analysis"]
38+
assert loaded["public"] is False
39+
40+
# GeoJSON features == 1
41+
assert len(json.loads(gj.read_text())["features"]) == 1
42+
43+
# stats present & sensible
44+
stats = ctx["stats"]
45+
assert stats.get("total") == 1
46+
assert stats.get("sensitive_count") == 0
47+
48+
# memory got a cache entry
49+
assert any(r.step == "enriched_cache" for r in mem_fake.rows)
50+
51+
52+
def test_second_run_hits_cache(tmp_path, mem_fake, monkeypatch):
53+
"""
54+
Pre-seed cache & existing files – LLM must **not** be invoked.
55+
"""
56+
raw_path, raw = make_raw_dump(tmp_path)
57+
58+
# Pretend previous run already produced these
59+
enriched_p = raw_path.with_name("lund_enriched.json")
60+
geojson_p = enriched_p.with_suffix(".geojson")
61+
enriched_p.write_text(json.dumps({"elements": []}), encoding="utf-8")
62+
geojson_p.write_text(
63+
json.dumps({"type": "FeatureCollection", "features": []}), "utf-8"
64+
)
65+
66+
# cache row
67+
raw_hash = payload_hash(raw)
68+
mem_fake.store(
69+
"AnalyzerAgent",
70+
"enriched_cache",
71+
f"{raw_hash}|{enriched_p}|{geojson_p}",
72+
)
73+
74+
# make LLM blow up if ever called
75+
from src.tools.llm_wrapper import LocalLLM
76+
77+
monkeypatch.setattr(
78+
LocalLLM,
79+
"generate_response",
80+
lambda *_a, **_kw: pytest.fail("LLM should not be called"),
81+
)
82+
83+
agent = AnalyzerAgent("AnalyzerAgent", memory=mem_fake)
84+
ctx = agent.achieve_goal({"path": str(raw_path)})
85+
86+
assert ctx["output_path"] == str(enriched_p)
87+
assert ctx["geojson_path"] == str(geojson_p)

tests/conftest.py

Lines changed: 93 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,9 @@
44
import pytest
55
import requests
66

7+
from src.agents.analyzer_agent import AnalyzerAgent
78
from src.config.settings import OllamaSettings, DatabaseSettings, OverpassSettings
9+
from src.utils.decorators import log_action
810

911

1012
@pytest.fixture
@@ -119,3 +121,94 @@ def load(self, agent_id: str):
119121
@pytest.fixture
120122
def mem_fake():
121123
return MemoryStoreFake()
124+
125+
126+
class DummyLogger:
127+
def __init__(self):
128+
self.infos = []
129+
self.debugs = []
130+
self.exceptions = []
131+
132+
def info(self, msg):
133+
self.infos.append(msg)
134+
135+
def debug(self, msg):
136+
self.debugs.append(msg)
137+
138+
def exception(self, msg):
139+
self.exceptions.append(msg)
140+
141+
142+
@pytest.fixture(autouse=True)
143+
def swap_logger(monkeypatch):
144+
stub = DummyLogger()
145+
# patch both logger and any module-local imports
146+
monkeypatch.setattr("src.config.logger.logger", stub)
147+
monkeypatch.setattr("src.utils.decorators.logger", stub)
148+
return stub
149+
150+
151+
class DummyAgent:
152+
name = "MyAgent"
153+
154+
@log_action
155+
def simple(self, x, context=None):
156+
"""just return x * 2"""
157+
return x * 2
158+
159+
@log_action
160+
def make_list(self, n):
161+
return list(range(n))
162+
163+
@log_action
164+
def save_file(self, path: str):
165+
return f"{path}.json"
166+
167+
@log_action
168+
def blows_up(self):
169+
raise ValueError("oops")
170+
171+
172+
@pytest.fixture(autouse=True)
173+
def patch_prompt_template(monkeypatch):
174+
"""
175+
AnalyzerAgent expects a prompt template on disk; for tests we just
176+
give it a minimal one in-memory so _load_template() never touches FS.
177+
"""
178+
dummy = "DUMMY TEMPLATE -- tags: {{ tags }}"
179+
180+
monkeypatch.setattr(
181+
AnalyzerAgent, "_load_template", lambda self: dummy, raising=True
182+
)
183+
184+
185+
# -------------------- Helpers for testing Analyzer agent --------------------#
186+
def make_raw_dump(tmp_path):
187+
"""Return Path to a tiny overpass dump with one element."""
188+
raw = {
189+
"elements": [
190+
{
191+
"type": "node",
192+
"id": 1,
193+
"lat": 55.0,
194+
"lon": 13.0,
195+
"tags": {"man_made": "surveillance"},
196+
}
197+
]
198+
}
199+
p = tmp_path / "lund.json"
200+
p.write_text(json.dumps(raw), encoding="utf-8")
201+
return p, raw
202+
203+
204+
def set_stub_response(data):
205+
"""
206+
Tell StubClient (patched in conftest) to return a canned Ollama reply.
207+
"""
208+
# from tests.conftest import StubClient
209+
210+
StubClient._response = {
211+
# LocalLLM expects either {"response": "<json>"} …
212+
"response": json.dumps(data, separators=(",", ":")),
213+
# … or {"choices": […]} – we use the first form.
214+
}

tests/tools/test_io_tools.py

Lines changed: 76 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,76 @@
1+
import json
2+
from pathlib import Path
3+
4+
from src.tools.io_tools import (
5+
load_overpass_elements,
6+
save_enriched_elements,
7+
to_geojson,
8+
)
9+
10+
ELEMENTS = [
11+
{"id": 1, "lat": 10.0, "lon": 20.0, "tags": {"foo": "bar"}, "analysis": {"a": 1}},
12+
{"id": 2}, # should be skipped by to_geojson (no lat/lon)
13+
{"id": 3, "lat": 0.0, "lon": 0.0, "tags": {}, "analysis": {}},
14+
]
15+
16+
17+
def test_load_overpass_elements(tmp_path):
18+
# prepare a dump with a few elements
19+
dump = {"elements": [{"id": "x"}, {"id": "y"}]}
20+
dump_path = tmp_path / "dump.json"
21+
dump_path.write_text(json.dumps(dump), encoding="utf-8")
22+
23+
loaded = load_overpass_elements(dump_path)
24+
assert isinstance(loaded, list)
25+
assert loaded == dump["elements"]
26+
27+
28+
def test_save_enriched_elements(tmp_path):
29+
# create a fake source file so save_enriched_elements can build its name
30+
source = tmp_path / "city.json"
31+
source.write_text("{}", encoding="utf-8")
32+
33+
enriched = [{"id": 42}, {"id": 43}]
34+
out = save_enriched_elements(enriched, source)
35+
out_path = Path(out)
36+
37+
# file should exist and contain exactly {"elements": enriched}
38+
assert out_path.exists()
39+
text = out_path.read_text(encoding="utf-8")
40+
parsed = json.loads(text)
41+
assert parsed == {"elements": enriched}
42+
43+
44+
def test_to_geojson_without_writing(tmp_path):
45+
# write an enriched JSON file with ELEMENTS
46+
enriched = {"elements": ELEMENTS}
47+
enriched_path = tmp_path / "in.json"
48+
enriched_path.write_text(json.dumps(enriched), encoding="utf-8")
49+
50+
geo = to_geojson(enriched_path)
51+
# should be a FeatureCollection
52+
assert geo["type"] == "FeatureCollection"
53+
# only two elements have both lat and lon
54+
assert len(geo["features"]) == 2
55+
56+
# check geometry & properties merging
57+
feat = geo["features"][0]
58+
assert feat["geometry"] == {"type": "Point", "coordinates": [20.0, 10.0]}
59+
# props should include both tags and analysis
60+
assert feat["properties"]["foo"] == "bar"
61+
assert feat["properties"]["a"] == 1
62+
63+
64+
def test_to_geojson_with_writing(tmp_path):
65+
enriched = {"elements": ELEMENTS}
66+
enriched_path = tmp_path / "data.json"
67+
enriched_path.write_text(json.dumps(enriched), encoding="utf-8")
68+
out_geojson = tmp_path / "out.geojson"
69+
70+
geo = to_geojson(enriched_path, out_geojson)
71+
# file was written
72+
assert out_geojson.exists()
73+
# contents match returned dict
74+
written = json.loads(out_geojson.read_text(encoding="utf-8"))
75+
assert written == geo
76+
assert written["type"] == "FeatureCollection"

0 commit comments

Comments
 (0)