Skip to content

Commit 484a4eb

Browse files
committed
test: Point benchmark + coverage at shared large-env fixture
- Bump the engine-test-data submodule to pick up Flagsmith/engine-test-data#51 (a realistic 262-feature / 26-multivariate / segment-override test case mirroring the #198 scenario) and consume it as the large-context benchmark, dropping the synthetic local fixture. - Exercise the `metadata` branch of the `get_flag_result_from_context` wrapper by adding metadata to the existing parametrized test. - Add a test for the `value == 100` recursive fallback on the new two-key fast-path hasher. Restores 100% coverage and keeps the realistic benchmark portable across every SDK consuming engine-test-data, rather than living only in this repo's Python tests. beep boop
1 parent 7b59ab3 commit 484a4eb

4 files changed

Lines changed: 47 additions & 56 deletions

File tree

tests/engine_tests/test_engine.py

Lines changed: 10 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -6,11 +6,14 @@
66
import pytest
77
from _pytest.mark import ParameterSet
88

9-
from flag_engine.context.types import EvaluationContext, FeatureContext
9+
from flag_engine.context.types import EvaluationContext
1010
from flag_engine.engine import get_evaluation_result
1111
from flag_engine.result.types import EvaluationResult
1212

1313
TEST_CASES_PATH = Path(__file__).parent / "engine-test-data/test_cases"
14+
LARGE_ENVIRONMENT_TEST_CASE = (
15+
"test_000000cf-0000-0000-0000-000000000000__large_environment.json"
16+
)
1417

1518
EnvironmentDocument = dict[str, typing.Any]
1619

@@ -40,66 +43,19 @@ def _extract_benchmark_contexts(
4043
yield pyjson5.loads((test_cases_dir_path / file_path).read_text())["context"]
4144

4245

43-
def _build_large_benchmark_context(
44-
n_features: int = 262,
45-
multivariate_features: int = 26,
46-
) -> EvaluationContext:
47-
"""Mirror the scenario from flagsmith-python-client issue #198: a real-world
48-
local-evaluation environment with ~260 features, a handful of which use
49-
multivariate splits, evaluated for a single identity. Small enough to
50-
keep the benchmark fast but large enough to surface per-feature overhead.
51-
"""
52-
features: dict[str, FeatureContext[typing.Any]] = {}
53-
for i in range(n_features):
54-
name = f"feature_{i:04d}"
55-
fc: FeatureContext[typing.Any] = {
56-
"key": str(i + 1),
57-
"name": name,
58-
"enabled": bool(i % 2),
59-
"value": f"value-{i}",
60-
"metadata": {"id": i + 1},
61-
}
62-
if i < multivariate_features:
63-
# Intentionally reverse-ordered so ``sorted()`` has work to do.
64-
fc["variants"] = [
65-
{"value": f"mv-{i}-b", "weight": 40.0, "priority": 2},
66-
{"value": f"mv-{i}-a", "weight": 60.0, "priority": 1},
67-
]
68-
features[name] = fc
69-
return {
70-
"environment": {"key": "bench-env", "name": "bench"},
71-
"features": features,
72-
"segments": {
73-
"1": {
74-
"key": "1",
75-
"name": "bench-segment",
76-
"rules": [
77-
{
78-
"type": "ALL",
79-
"conditions": [
80-
{
81-
"property": "venue_id",
82-
"operator": "EQUAL",
83-
"value": "no-match",
84-
}
85-
],
86-
}
87-
],
88-
}
89-
},
90-
"identity": {
91-
"identifier": "anonymous",
92-
"traits": {"venue_id": "12345"},
93-
},
94-
}
46+
def _load_test_case_context(name: str) -> EvaluationContext:
47+
ctx: EvaluationContext = pyjson5.loads((TEST_CASES_PATH / name).read_text())[
48+
"context"
49+
]
50+
return ctx
9551

9652

9753
TEST_CASES = sorted(
9854
_extract_test_cases(TEST_CASES_PATH),
9955
key=lambda param: str(param.id),
10056
)
10157
BENCHMARK_CONTEXTS = list(_extract_benchmark_contexts(TEST_CASES_PATH))
102-
LARGE_BENCHMARK_CONTEXT = _build_large_benchmark_context()
58+
LARGE_BENCHMARK_CONTEXT = _load_test_case_context(LARGE_ENVIRONMENT_TEST_CASE)
10359

10460

10561
@pytest.mark.parametrize(

tests/unit/segments/test_segments_evaluator.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -806,6 +806,7 @@ def test_segment_condition_matches_context_value_for_modulo(
806806
"name": "my_feature",
807807
"reason": "SPLIT; weight=30",
808808
"value": "foo",
809+
"metadata": {"id": 7},
809810
},
810811
),
811812
(
@@ -815,6 +816,7 @@ def test_segment_condition_matches_context_value_for_modulo(
815816
"name": "my_feature",
816817
"reason": "SPLIT; weight=30",
817818
"value": "bar",
819+
"metadata": {"id": 7},
818820
},
819821
),
820822
(
@@ -824,6 +826,7 @@ def test_segment_condition_matches_context_value_for_modulo(
824826
"name": "my_feature",
825827
"reason": "DEFAULT",
826828
"value": "control",
829+
"metadata": {"id": 7},
827830
},
828831
),
829832
),
@@ -851,6 +854,7 @@ def test_get_flag_result_from_context__calls_returns_expected(
851854
"enabled": False,
852855
"name": "my_feature",
853856
"value": "control",
857+
"metadata": {"id": 7},
854858
"variants": [
855859
{"value": "foo", "weight": 30, "priority": 1},
856860
{"value": "bar", "weight": 30, "priority": 2},

tests/unit/utils/test_utils_hashing.py

Lines changed: 32 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,10 @@
55

66
import pytest
77

8-
from flag_engine.utils.hashing import get_hashed_percentage_for_object_ids
8+
from flag_engine.utils.hashing import (
9+
get_hashed_percentage_for_object_id_pair,
10+
get_hashed_percentage_for_object_ids,
11+
)
912

1013

1114
@pytest.mark.parametrize(
@@ -146,3 +149,31 @@ def hexdigest_side_effect() -> str:
146149
# the second call, with a string (in bytes) that contains each object id twice
147150
expected_bytes_2 = ",".join(str(id_) for id_ in object_ids * 2).encode("utf-8")
148151
assert call_list[1][0][0] == expected_bytes_2
152+
153+
154+
@mock.patch("flag_engine.utils.hashing.hashlib")
155+
def test_get_hashed_percentage_for_object_id_pair__value_is_100__falls_back(
156+
mock_hashlib: mock.Mock,
157+
) -> None:
158+
"""When the two-key fast path would return exactly 100, it must fall back
159+
to the generic helper with iterations=2 (same anti-boundary guarantee as
160+
``get_hashed_percentage_for_object_ids``)."""
161+
162+
# 270e converts to 9998, forcing value == 100. 270f → 9999 → value == 0.
163+
hashed_values = ["270f", "270e"]
164+
165+
def hexdigest_side_effect() -> str:
166+
return hashed_values.pop()
167+
168+
mock_hash = mock.MagicMock()
169+
mock_hashlib.md5.return_value = mock_hash
170+
mock_hash.hexdigest.side_effect = hexdigest_side_effect
171+
172+
value = get_hashed_percentage_for_object_id_pair("12", "93")
173+
174+
assert value == 0
175+
# First call: fast-path two-key hash (single pair); second: recursive fallback.
176+
call_list = mock_hashlib.md5.call_args_list
177+
assert len(call_list) == 2
178+
assert call_list[0][0][0] == b"12,93"
179+
assert call_list[1][0][0] == b"12,93,12,93"

0 commit comments

Comments
 (0)