Pale Fire includes a comprehensive test suite to ensure code quality and reliability.
tests/
├── __init__.py # Test package initialization
├── test_config.py # Configuration module tests
├── test_palefire_core.py # EntityEnricher & QuestionTypeDetector tests
├── test_search_functions.py # Search and helper function tests
└── test_api.py # FastAPI endpoint tests
pip install -r requirements.txtThis installs:
pytest- Test frameworkpytest-asyncio- Async test supportpytest-cov- Coverage reportingpytest-mock- Mocking utilitieshttpx- FastAPI testing
# Run all tests
pytest
# Run with verbose output
pytest -v
# Run with coverage report
pytest --cov=. --cov-report=html --cov-report=term# Run config tests only
pytest tests/test_config.py
# Run PaleFireCore tests only
pytest tests/test_palefire_core.py
# Run search function tests only
pytest tests/test_search_functions.py
# Run API tests only
pytest tests/test_api.py# Run specific test class
pytest tests/test_config.py::TestConfig
# Run specific test method
pytest tests/test_config.py::TestConfig::test_neo4j_config_defaults
# Run tests matching a pattern
pytest -k "test_neo4j"# Run only unit tests
pytest -m unit
# Run only integration tests
pytest -m integration
# Skip slow tests
pytest -m "not slow"
# Run tests that don't require Neo4j
pytest -m "not requires_neo4j"# HTML report (opens in browser)
pytest --cov=. --cov-report=html
open htmlcov/index.html
# Terminal report
pytest --cov=. --cov-report=term
# Both
pytest --cov=. --cov-report=html --cov-report=term- Overall: > 80%
- Core modules: > 90%
- Critical functions: 100%
Test individual functions and methods in isolation.
Location: All test files
Marker: @pytest.mark.unit
Run: pytest -m unit
Examples:
- Configuration validation
- Entity extraction
- Question type detection
- Score calculations
Test interactions between components.
Location: test_api.py, test_search_functions.py
Marker: @pytest.mark.integration
Run: pytest -m integration
Examples:
- API endpoints
- Database queries
- Full search pipeline
Some tests require external services:
Neo4j Tests:
- Marker:
@pytest.mark.requires_neo4j - Skip if Neo4j not available
- Run:
pytest -m requires_neo4j
spaCy Tests:
- Marker:
@pytest.mark.requires_spacy - Skip if spaCy not installed
- Run:
pytest -m requires_spacy
import pytest
class TestMyFeature:
"""Test MyFeature functionality."""
def test_basic_functionality(self):
"""Test basic feature works."""
result = my_function(input_data)
assert result == expected_output
def test_edge_case(self):
"""Test edge case handling."""
result = my_function(edge_case_input)
assert result is not None
def test_error_handling(self):
"""Test error handling."""
with pytest.raises(ValueError):
my_function(invalid_input)- Files:
test_*.py - Classes:
Test* - Methods:
test_* - Descriptive names:
test_extract_year_from_query
# Equality
assert result == expected
# Approximate equality (for floats)
assert result == pytest.approx(0.75, rel=1e-2)
# Membership
assert 'key' in dictionary
# Type checking
assert isinstance(result, dict)
# Exceptions
with pytest.raises(ValueError):
function_that_raises()
# Exceptions with message match
with pytest.raises(ValueError, match="specific error"):
function_that_raises()@pytest.fixture
def sample_data():
"""Provide sample data for tests."""
return {
'content': 'Test content',
'type': 'text'
}
def test_with_fixture(sample_data):
"""Test using fixture."""
result = process_data(sample_data)
assert result is not Nonefrom unittest.mock import Mock, patch
def test_with_mock():
"""Test with mocked dependency."""
mock_db = Mock()
mock_db.query.return_value = [{'id': 1}]
result = function_using_db(mock_db)
assert result == [{'id': 1}]
mock_db.query.assert_called_once()
@patch('module.external_api')
def test_with_patch(mock_api):
"""Test with patched external call."""
mock_api.return_value = {'status': 'ok'}
result = function_calling_api()
assert result['status'] == 'ok'import pytest
@pytest.mark.asyncio
async def test_async_function():
"""Test async function."""
result = await async_function()
assert result is not Nonedef test_config_validation():
"""Test configuration validation."""
import config
config.validate_config() # Should not raisedef test_entity_extraction():
"""Test entity extraction."""
enricher = EntityEnricher(use_spacy=False)
text = "Kamala Harris worked in California."
entities = enricher._extract_entities_pattern(text)
assert len(entities) > 0def test_who_question():
"""Test WHO question detection."""
detector = QuestionTypeDetector()
result = detector.detect_question_type("Who is the president?")
assert result['type'] == 'WHO'
assert result['entity_weights']['PER'] > 1.0def test_score_calculation():
"""Test score calculation."""
semantic_score = 0.8
connection_score = 0.6
weight = 0.3
final = (1 - weight) * semantic_score + weight * connection_score
assert 0.0 <= final <= 1.0name: Tests
on: [push, pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: '3.11'
- name: Install dependencies
run: |
pip install -r requirements.txt
- name: Run tests
run: |
pytest --cov=. --cov-report=xml
- name: Upload coverage
uses: codecov/codecov-action@v2Each test should be independent:
# Good: Independent test
def test_feature():
data = create_test_data()
result = process(data)
assert result == expected
# Bad: Depends on previous test
def test_feature_part2():
# Assumes test_feature_part1 ran first
result = get_global_state()
assert result == expected# Good: Descriptive name
def test_extract_year_from_query_with_2020():
pass
# Bad: Vague name
def test_extract():
pass# Good: Single logical assertion
def test_user_name():
user = create_user("John")
assert user.name == "John"
# Acceptable: Related assertions
def test_user_creation():
user = create_user("John", age=30)
assert user.name == "John"
assert user.age == 30# Good: Fixture for common setup
@pytest.fixture
def enricher():
return EntityEnricher(use_spacy=False)
def test_with_enricher(enricher):
result = enricher.enrich_episode(data)
assert result is not Nonedef test_empty_input():
result = process([])
assert result == []
def test_none_input():
result = process(None)
assert result is None
def test_large_input():
result = process(range(10000))
assert len(result) == 10000# Check test discovery
pytest --collect-only
# Verify test file naming
ls tests/test_*.py# Install in development mode
pip install -e .
# Or add to PYTHONPATH
export PYTHONPATH="${PYTHONPATH}:$(pwd)"# Install pytest-asyncio
pip install pytest-asyncio
# Mark async tests
@pytest.mark.asyncio
async def test_async():
pass# Define fixture in conftest.py
# tests/conftest.py
@pytest.fixture
def shared_fixture():
return "shared data"import time
def test_performance():
"""Test function performance."""
start = time.time()
result = expensive_function()
duration = time.time() - start
assert duration < 1.0 # Should complete in < 1 second
assert result is not NoneFor API load testing, use tools like:
locustab(Apache Bench)wrk
Testing Guide v1.0 - Quality Through Testing! ✅