Skip to content

Commit 53c7437

Browse files
committed
refactored int the performance report capabilities that where developer on the Phase_E set of projects
1 parent 5ebc4da commit 53c7437

37 files changed

+2679
-1
lines changed

osbot_utils/helpers/performance/benchmark/schemas/enums/Enum__Measure_Mode.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,4 +8,5 @@
88
class Enum__Measure_Mode(Enum):
99
QUICK = 'quick' # ~100 iterations, fastest, highest variance
1010
FAST = 'fast' # ~1,000 iterations, balanced
11-
DEFAULT = 'default' # ~10,000 iterations, most accurate, slowest
11+
DEFAULT = 'default' # ~10,000 iterations, most accurate, slowest
12+
ONLY_3 = 'only_3' # todo: add only_3 support to execution engine
Lines changed: 239 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,239 @@
1+
# ═══════════════════════════════════════════════════════════════════════════════
2+
# Perf_Report__Builder - Main entry point for performance report generation
3+
# Runs benchmarks, calculates categories/percentages, identifies bottlenecks
4+
# ═══════════════════════════════════════════════════════════════════════════════
5+
6+
from typing import Callable, Optional
7+
from osbot_utils.helpers.performance.benchmark.Perf_Benchmark__Timing import Perf_Benchmark__Timing
8+
from osbot_utils.helpers.performance.benchmark.schemas.safe_str.Safe_Str__Benchmark__Section import Safe_Str__Benchmark__Section
9+
from osbot_utils.helpers.performance.benchmark.schemas.timing.Schema__Perf_Benchmark__Timing__Config import Schema__Perf_Benchmark__Timing__Config
10+
from osbot_utils.helpers.performance.report.schemas.collections.Dict__Perf_Report__Legend import Dict__Perf_Report__Legend
11+
from osbot_utils.helpers.performance.report.schemas.collections.List__Perf_Report__Benchmarks import List__Perf_Report__Benchmarks
12+
from osbot_utils.helpers.performance.report.schemas.collections.List__Perf_Report__Categories import List__Perf_Report__Categories
13+
from osbot_utils.helpers.performance.report.schemas.Schema__Perf_Report import Schema__Perf_Report
14+
from osbot_utils.helpers.performance.report.schemas.Schema__Perf_Report__Analysis import Schema__Perf_Report__Analysis
15+
from osbot_utils.helpers.performance.report.schemas.Schema__Perf_Report__Benchmark import Schema__Perf_Report__Benchmark
16+
from osbot_utils.helpers.performance.report.schemas.Schema__Perf_Report__Builder__Config import Schema__Perf_Report__Builder__Config
17+
from osbot_utils.helpers.performance.report.schemas.Schema__Perf_Report__Category import Schema__Perf_Report__Category
18+
from osbot_utils.helpers.performance.report.schemas.Schema__Perf_Report__Metadata import Schema__Perf_Report__Metadata
19+
from osbot_utils.type_safe.primitives.core.Safe_Int import Safe_Int
20+
from osbot_utils.type_safe.type_safe_core.decorators.type_safe import type_safe
21+
from osbot_utils.type_safe.Type_Safe import Type_Safe
22+
23+
24+
class Perf_Report__Builder(Type_Safe): # Main builder for performance reports
25+
metadata : Schema__Perf_Report__Metadata # Report metadata
26+
legend : Dict__Perf_Report__Legend # Category descriptions
27+
config : Schema__Perf_Benchmark__Timing__Config # Timing configuration
28+
builder_config: Schema__Perf_Report__Builder__Config # Builder configuration
29+
30+
# ═══════════════════════════════════════════════════════════════════════════
31+
# Main Entry Point
32+
# ═══════════════════════════════════════════════════════════════════════════
33+
34+
@type_safe
35+
def run(self, benchmarks_fn: Callable) -> Schema__Perf_Report: # Run benchmarks and build report
36+
timing = Perf_Benchmark__Timing(config=self.config)
37+
benchmarks_fn(timing)
38+
39+
benchmarks = self.build_benchmarks(timing)
40+
categories = self.build_categories(benchmarks)
41+
analysis = self.build_analysis(benchmarks, categories)
42+
43+
self.metadata.benchmark_count = len(benchmarks)
44+
45+
return Schema__Perf_Report(metadata = self.metadata ,
46+
benchmarks = benchmarks ,
47+
categories = categories ,
48+
analysis = analysis ,
49+
legend = self.legend )
50+
51+
# ═══════════════════════════════════════════════════════════════════════════
52+
# Build Benchmarks
53+
# ═══════════════════════════════════════════════════════════════════════════
54+
55+
@type_safe
56+
def build_benchmarks(self ,
57+
timing: Perf_Benchmark__Timing ) -> List__Perf_Report__Benchmarks:
58+
benchmarks = List__Perf_Report__Benchmarks()
59+
total_ns = self.calculate_total_ns(timing)
60+
61+
for bench_id, result in timing.results.items():
62+
time_ns = result.final_score
63+
category_id = self.extract_category_id(bench_id)
64+
pct = (time_ns / total_ns * 100) if total_ns > 0 else 0
65+
66+
benchmark = Schema__Perf_Report__Benchmark(benchmark_id = bench_id ,
67+
time_ns = time_ns ,
68+
category_id = category_id ,
69+
pct_of_total = pct )
70+
benchmarks.append(benchmark)
71+
72+
return benchmarks
73+
74+
@type_safe
75+
def calculate_total_ns(self, timing: Perf_Benchmark__Timing) -> Safe_Int:
76+
total = 0
77+
for _, result in timing.results.items():
78+
total += result.final_score
79+
return total
80+
81+
@type_safe
82+
def extract_category_id(self ,
83+
benchmark_id: str ) -> Safe_Str__Benchmark__Section:
84+
if '_' in benchmark_id:
85+
return Safe_Str__Benchmark__Section(benchmark_id.split('_')[0])
86+
return Safe_Str__Benchmark__Section(benchmark_id[0] if benchmark_id else 'X')
87+
88+
# ═══════════════════════════════════════════════════════════════════════════
89+
# Build Categories
90+
# ═══════════════════════════════════════════════════════════════════════════
91+
92+
@type_safe
93+
def build_categories(self ,
94+
benchmarks: List__Perf_Report__Benchmarks ) -> List__Perf_Report__Categories:
95+
cat_totals = {} # {cat_id: total_ns}
96+
cat_counts = {} # {cat_id: count}
97+
98+
for benchmark in benchmarks:
99+
cat_id = benchmark.category_id
100+
time_ns = benchmark.time_ns
101+
102+
if cat_id not in cat_totals:
103+
cat_totals[cat_id] = 0
104+
cat_counts[cat_id] = 0
105+
106+
cat_totals[cat_id] += time_ns
107+
cat_counts[cat_id] += 1
108+
109+
total_ns = sum(cat_totals.values())
110+
categories = List__Perf_Report__Categories()
111+
112+
for cat_id in sorted(cat_totals.keys()):
113+
cat_ns = cat_totals[cat_id]
114+
cat_count = cat_counts[cat_id]
115+
pct = (cat_ns / total_ns * 100) if total_ns > 0 else 0
116+
name, desc = self.extract_category_name(cat_id)
117+
118+
category = Schema__Perf_Report__Category(category_id = cat_id ,
119+
name = name ,
120+
description = desc ,
121+
total_ns = cat_ns ,
122+
pct_of_total = pct ,
123+
benchmark_count = cat_count )
124+
categories.append(category)
125+
126+
return categories
127+
128+
@type_safe
129+
def extract_category_name(self, category_id: str) -> tuple: # Returns (name, description)
130+
if self.legend and category_id in self.legend:
131+
legend_value = self.legend[category_id]
132+
133+
if ' = ' in legend_value: # Format: "Name = Description"
134+
parts = legend_value.split(' = ', 1)
135+
return parts[0].strip(), parts[1].strip() if len(parts) > 1 else ''
136+
137+
return legend_value, ''
138+
139+
return f'Category {category_id}', ''
140+
141+
# ═══════════════════════════════════════════════════════════════════════════
142+
# Build Analysis
143+
# ═══════════════════════════════════════════════════════════════════════════
144+
145+
@type_safe
146+
def build_analysis(self ,
147+
benchmarks: List__Perf_Report__Benchmarks ,
148+
categories: List__Perf_Report__Categories ) -> Schema__Perf_Report__Analysis:
149+
bottleneck = self.find_bottleneck (benchmarks)
150+
total_ns = self.calculate_benchmarks_total(benchmarks)
151+
overhead = self.calculate_overhead (categories)
152+
insight = self.generate_insight (categories)
153+
154+
return Schema__Perf_Report__Analysis(bottleneck_id = bottleneck.benchmark_id if bottleneck else '' ,
155+
bottleneck_ns = bottleneck.time_ns if bottleneck else 0 ,
156+
bottleneck_pct = bottleneck.pct_of_total if bottleneck else 0 ,
157+
total_ns = total_ns ,
158+
overhead_ns = overhead ,
159+
overhead_pct = overhead / total_ns * 100 if total_ns > 0 else 0 ,
160+
key_insight = insight )
161+
162+
@type_safe
163+
def find_bottleneck(self ,
164+
benchmarks: List__Perf_Report__Benchmarks ) -> Optional[Schema__Perf_Report__Benchmark]:
165+
if not benchmarks:
166+
return None
167+
168+
slowest = benchmarks[0]
169+
for benchmark in benchmarks:
170+
if benchmark.time_ns > slowest.time_ns:
171+
slowest = benchmark
172+
173+
return slowest
174+
175+
@type_safe
176+
def calculate_benchmarks_total(self ,
177+
benchmarks: List__Perf_Report__Benchmarks) -> Safe_Int:
178+
total = 0
179+
for benchmark in benchmarks:
180+
total += benchmark.time_ns
181+
return total
182+
183+
@type_safe
184+
def calculate_overhead(self ,
185+
categories: List__Perf_Report__Categories) -> Safe_Int:
186+
config = self.builder_config if self.builder_config else Schema__Perf_Report__Builder__Config()
187+
188+
full_id = config.full_category_id if config.full_category_id else None
189+
create_id = config.create_category_id if config.create_category_id else None
190+
convert_id = config.convert_category_id if config.convert_category_id else None
191+
192+
cat_totals = {}
193+
for category in categories:
194+
cat_totals[category.category_id] = category.total_ns
195+
196+
full_total = cat_totals.get(full_id, 0) if full_id else 0
197+
create_total = cat_totals.get(create_id, 0) if create_id else 0
198+
convert_total = cat_totals.get(convert_id, 0) if convert_id else 0
199+
200+
if full_total > 0 and (create_total > 0 or convert_total > 0):
201+
return Safe_Int(int(full_total) - int(create_total) - int(convert_total))
202+
203+
return 0
204+
205+
@type_safe
206+
def generate_insight(self ,
207+
categories: List__Perf_Report__Categories ) -> str:
208+
config = self.builder_config if self.builder_config else Schema__Perf_Report__Builder__Config()
209+
210+
if config.include_auto_insight is False:
211+
return ''
212+
213+
full_id = config.full_category_id if config.full_category_id else None
214+
create_id = config.create_category_id if config.create_category_id else None
215+
216+
if not full_id or not create_id:
217+
return ''
218+
219+
cat_totals = {}
220+
cat_pcts = {}
221+
for category in categories:
222+
cat_id = category.category_id
223+
cat_totals[cat_id] = category.total_ns
224+
cat_pcts[cat_id] = category.pct_of_total
225+
226+
full_total = cat_totals.get(full_id, 0)
227+
create_total = cat_totals.get(create_id, 0)
228+
229+
if full_total == 0:
230+
return ''
231+
232+
create_pct = (create_total / full_total) * 100 if full_total > 0 else 0
233+
234+
if create_pct < 1.0:
235+
return f'Category {create_id} is {create_pct:.2f}% of {full_id} → NEGLIGIBLE'
236+
elif create_pct < 10.0:
237+
return f'Category {create_id} is {create_pct:.1f}% of {full_id} → Minor impact'
238+
else:
239+
return f'Category {create_id} is {create_pct:.1f}% of {full_id} → Significant'

osbot_utils/helpers/performance/report/__init__.py

Whitespace-only changes.
Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
# ═══════════════════════════════════════════════════════════════════════════════
2+
# Perf_Report__Renderer__Base - Abstract base for report renderers
3+
# Provides shared formatting helpers for time and percentage values
4+
# ═══════════════════════════════════════════════════════════════════════════════
5+
6+
from osbot_utils.helpers.performance.report.schemas.Schema__Perf_Report import Schema__Perf_Report
7+
from osbot_utils.type_safe.type_safe_core.decorators.type_safe import type_safe
8+
from osbot_utils.type_safe.Type_Safe import Type_Safe
9+
10+
11+
class Perf_Report__Renderer__Base(Type_Safe): # Abstract base for renderers
12+
13+
@type_safe
14+
def render(self, report: Schema__Perf_Report) -> str: # Render report to string
15+
raise NotImplementedError()
16+
17+
# ═══════════════════════════════════════════════════════════════════════════
18+
# Formatting Helpers
19+
# ═══════════════════════════════════════════════════════════════════════════
20+
21+
@type_safe
22+
def format_ns(self, ns: int) -> str: # Format nanoseconds to human-readable
23+
if ns < 1_000:
24+
return f'{ns}ns'
25+
elif ns < 1_000_000:
26+
return f'{ns / 1_000:.2f}µs'
27+
elif ns < 1_000_000_000:
28+
return f'{ns / 1_000_000:.2f}ms'
29+
else:
30+
return f'{ns / 1_000_000_000:.2f}s'
31+
32+
@type_safe
33+
def format_pct(self, pct: float, width: int = 5) -> str: # Format percentage with padding
34+
return f'{pct:>{width}.1f}%'
35+
36+
@type_safe
37+
def format_timestamp(self, timestamp_ms: int) -> str: # Format timestamp to readable date
38+
from datetime import datetime, timezone
39+
dt = datetime.fromtimestamp(timestamp_ms / 1000, tz=timezone.utc)
40+
return dt.strftime('%Y-%m-%d %H:%M:%S')
41+
42+
@type_safe
43+
def escape_markdown(self, text: str) -> str: # Escape special markdown characters
44+
special_chars = ['|', '`', '*', '_', '[', ']', '<', '>']
45+
for char in special_chars:
46+
text = text.replace(char, f'\\{char}')
47+
return text
Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
# ═══════════════════════════════════════════════════════════════════════════════
2+
# Perf_Report__Renderer__Json - JSON format renderer
3+
# Serializes Schema__Perf_Report to JSON string
4+
# ═══════════════════════════════════════════════════════════════════════════════
5+
6+
from osbot_utils.helpers.performance.report.renderers.Perf_Report__Renderer__Base import Perf_Report__Renderer__Base
7+
from osbot_utils.helpers.performance.report.schemas.Schema__Perf_Report import Schema__Perf_Report
8+
from osbot_utils.type_safe.type_safe_core.decorators.type_safe import type_safe
9+
from osbot_utils.utils.Json import json_dumps
10+
11+
12+
class Perf_Report__Renderer__Json(Perf_Report__Renderer__Base): # JSON format renderer
13+
14+
@type_safe
15+
def render(self, report: Schema__Perf_Report) -> str: # Render report to JSON
16+
return json_dumps(report.json(), indent=2)

0 commit comments

Comments
 (0)