Skip to content

Commit dfd0563

Browse files
Add a bound to the inference tips cache
Small bounds still yield about equal hits and misses. Further work could determine if storing only the last result is optimal.
1 parent e6f449c commit dfd0563

File tree

1 file changed

+8
-3
lines changed

1 file changed

+8
-3
lines changed

astroid/inference_tip.py

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66

77
from __future__ import annotations
88

9+
from collections import OrderedDict
910
from collections.abc import Generator
1011
from typing import Any, TypeVar
1112

@@ -18,9 +19,9 @@
1819
TransformFn,
1920
)
2021

21-
_cache: dict[
22+
_cache: OrderedDict[
2223
tuple[InferFn[Any], NodeNG, InferenceContext | None], list[InferenceResult]
23-
] = {}
24+
] = OrderedDict()
2425

2526
_CURRENTLY_INFERRING: set[tuple[InferFn[Any], NodeNG]] = set()
2627

@@ -61,10 +62,14 @@ def inner(
6162
_CURRENTLY_INFERRING.add(partial_cache_key)
6263
try:
6364
# May raise UseInferenceDefault
64-
result = _cache[func, node, context] = list(func(node, context, **kwargs))
65+
result = _cache[func, node, context] = list(
66+
func(node, context, **kwargs)
67+
)
6568
finally:
6669
# Remove recursion guard.
6770
_CURRENTLY_INFERRING.remove(partial_cache_key)
71+
if len(_cache) > 64:
72+
_cache.popitem(last=False)
6873

6974
yield from result
7075

0 commit comments

Comments
 (0)