-
Notifications
You must be signed in to change notification settings - Fork 0
213 lines (184 loc) · 6.99 KB
/
benchmarks.yml
File metadata and controls
213 lines (184 loc) · 6.99 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
name: Benchmarks
on:
push:
branches: [ main, phase-* ]
paths:
- 'benchmarks/**'
- 'src/**'
- 'include/**'
- '.github/workflows/benchmarks.yml'
pull_request:
branches: [ main ]
paths:
- 'benchmarks/**'
- 'src/**'
- 'include/**'
- '.github/workflows/benchmarks.yml'
workflow_dispatch:
inputs:
save_baseline:
description: 'Save as baseline'
required: false
default: 'false'
jobs:
benchmark:
name: Run Performance Benchmarks
runs-on: ${{ matrix.os }}
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
os: [ubuntu-24.04, macos-15]
compiler: [clang]
build_type: [Release]
steps:
- name: Checkout code
uses: actions/checkout@v6
with:
fetch-depth: 0 # Full history for comparison
- name: Checkout common_system
uses: actions/checkout@v6
with:
repository: kcenon/common_system
path: common_system
token: ${{ secrets.GITHUB_TOKEN }}
- name: Install dependencies (Ubuntu)
if: runner.os == 'Linux'
run: |
sudo apt-get update
sudo apt-get install -y cmake ninja-build clang libbenchmark-dev libgtest-dev libfmt-dev libasio-dev
- name: Install dependencies (macOS)
if: runner.os == 'macOS'
run: |
brew install ninja google-benchmark googletest fmt asio
- name: Set up compiler
run: |
echo "CC=clang" >> $GITHUB_ENV
echo "CXX=clang++" >> $GITHUB_ENV
- name: Build common_system dependency
run: |
cd common_system
cmake -B build -G Ninja \
-DCMAKE_BUILD_TYPE=Release \
-DBUILD_TESTS=OFF \
-DBUILD_EXAMPLES=OFF \
-DBUILD_SAMPLES=OFF
cmake --build build
cmake --install build --prefix ${{ github.workspace }}/deps/install
- name: Configure CMake
run: |
cmake -B build -S . \
-GNinja \
-DCMAKE_BUILD_TYPE=${{ matrix.build_type }} \
-DCMAKE_PREFIX_PATH=${{ github.workspace }}/deps/install \
-DNETWORK_BUILD_BENCHMARKS=ON \
-DBUILD_WITH_COMMON_SYSTEM=ON \
-DBUILD_WITH_LOGGER_SYSTEM=OFF \
-DBUILD_WITH_THREAD_SYSTEM=OFF \
-DBUILD_WITH_CONTAINER_SYSTEM=OFF \
-DBUILD_MESSAGING_BRIDGE=OFF \
-DBUILD_TESTS=OFF \
-DBUILD_SAMPLES=OFF
- name: Build benchmarks
run: cmake --build build --config ${{ matrix.build_type }} --target network_benchmarks -j
- name: Run benchmarks
timeout-minutes: 45
run: |
cd build/benchmarks
# Exclude real I/O benchmarks in CI due to network timing and
# thread lifecycle issues (hangs on Ubuntu CI runners).
# Excluded categories:
# HTTP_ - HTTP client benchmarks (network timing)
# TcpLoopbackFixture - TCP echo server loopback (detached threads)
# DirectAPI_Send/FacadeAPI_Send - real server+client throughput
# BurstThroughput - burst send validation with real connections
# FullLifecycle - full connect/send/disconnect cycle
# Facade(Client|Server)_Create - spawns IO threads
# These can be run manually for performance analysis.
REPS=${{ github.event.inputs.save_baseline == 'true' && '3' || '1' }}
EXCLUDE="HTTP_|TcpLoopbackFixture|DirectAPI_Send|FacadeAPI_Send"
EXCLUDE="${EXCLUDE}|BurstThroughput|FullLifecycle"
EXCLUDE="${EXCLUDE}|FacadeClient_Create|FacadeServer_Create"
./network_benchmarks \
--benchmark_format=json \
--benchmark_out=benchmark_results_${{ matrix.os }}.json \
--benchmark_repetitions=$REPS \
--benchmark_report_aggregates_only=true \
--benchmark_filter="-${EXCLUDE}"
- name: Run benchmarks (console output)
timeout-minutes: 10
run: |
cd build/benchmarks
./network_benchmarks \
--benchmark_filter="Message_Create|Connection_Create|Session_Create" \
--benchmark_repetitions=1
- name: Upload benchmark results
uses: actions/upload-artifact@v7
continue-on-error: true
with:
name: benchmark-results-${{ matrix.os }}
path: build/benchmarks/benchmark_results_${{ matrix.os }}.json
retention-days: 30
- name: Save baseline (if requested)
if: github.event.inputs.save_baseline == 'true' && github.ref == 'refs/heads/main'
run: |
mkdir -p benchmarks/baselines
cp build/benchmarks/benchmark_results_${{ matrix.os }}.json \
benchmarks/baselines/baseline_${{ matrix.os }}_$(date +%Y%m%d).json
- name: Compare with baseline (if exists)
run: |
# Check if any baseline files exist for this OS
BASELINE=$(ls -t benchmarks/baselines/baseline_${{ matrix.os }}_*.json 2>/dev/null | head -1 || true)
if [ -z "$BASELINE" ] || [ ! -f "$BASELINE" ]; then
echo "No baseline found for ${{ matrix.os }}, skipping comparison"
exit 0
fi
echo "Comparing with baseline..."
# Create virtual environment for Python dependencies (PEP 668 compliance)
python3 -m venv .venv
source .venv/bin/activate
pip install scipy
echo "Baseline: $BASELINE"
echo "Current: build/benchmarks/benchmark_results_${{ matrix.os }}.json"
# Download compare.py
curl -O https://raw.githubusercontent.com/google/benchmark/main/tools/compare.py
chmod +x compare.py
# Run comparison
python3 compare.py \
"$BASELINE" \
build/benchmarks/benchmark_results_${{ matrix.os }}.json
deactivate
- name: Check for performance regression
run: |
echo "Performance regression detection: TBD"
echo "Will be implemented in Phase 1 with baseline establishment"
report:
name: Generate Benchmark Report
needs: benchmark
runs-on: ubuntu-24.04
if: always()
steps:
- name: Download all artifacts
uses: actions/download-artifact@v8
- name: Generate summary
run: |
echo "# Benchmark Results Summary" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "## Phase 0: Baseline Measurement" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
for dir in benchmark-results-*; do
if [ -d "$dir" ]; then
echo "### $dir" >> $GITHUB_STEP_SUMMARY
if [ -f "$dir/benchmark_results_"*.json ]; then
echo "✅ Benchmarks completed" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
else
echo "❌ No results found" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
fi
fi
done
echo "## Next Steps" >> $GITHUB_STEP_SUMMARY
echo "- Review benchmark results" >> $GITHUB_STEP_SUMMARY
echo "- Document baseline in docs/BASELINE.md" >> $GITHUB_STEP_SUMMARY
echo "- Set performance targets for Phase 1" >> $GITHUB_STEP_SUMMARY