Skip to content

Commit 6a7f392

Browse files
committed
feat : use actions to generate and deploy benchmark graph
2 parents d8e7986 + cba6242 commit 6a7f392

File tree

5 files changed

+206
-15
lines changed

5 files changed

+206
-15
lines changed

.github/workflows/ci.yml

Lines changed: 81 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,20 +1,22 @@
1-
name: CI Build and Test
1+
name: CI Build and Test
2+
23

3-
# Run on pushes and pull requests targeting the main branch
44
on:
5-
push:
6-
branches: [ "main" ]
7-
pull_request:
8-
branches: [ "main" ]
5+
# push:
6+
# branches: "benchmark" #[ "main" ]
7+
workflow_dispatch:
98

109
jobs:
1110
build_and_test:
12-
# Use a build matrix to test on Linux and Windows
11+
# Using build matrix to test on Linux and Windows
1312
strategy:
1413
matrix:
15-
os: [ubuntu-latest, windows-latest]
14+
os: [ubuntu-latest] #, windows-latest]
1615

1716
runs-on: ${{ matrix.os }}
17+
permissions:
18+
deployments: write
19+
contents: write
1820

1921
steps:
2022
- name: Checkout Code
@@ -25,9 +27,78 @@ jobs:
2527
run: cmake -B ${{github.workspace}}/build
2628

2729
- name: Build Project
28-
# The '--config Release' is important, especially on Windows where CMake defaults to Debug
30+
2931
run: cmake --build ${{github.workspace}}/build --config Release
3032

3133
- name: Run Unit Tests via CTest
32-
run: ctest --output-on-failure --verbose
34+
run: |
35+
if [ "${{ runner.os }}" == "Windows" ]; then
36+
ctest -C Release --output-on-failure --verbose
37+
else
38+
ctest --output-on-failure --verbose
39+
fi
40+
shell: bash
3341
working-directory: build
42+
43+
- name: Run Benchmarks and Save Results
44+
working-directory: ${{github.workspace}}/build/bin
45+
run: |
46+
if [ "${{ runner.os }}" == "Windows" ]; then
47+
./Release/benchmark-app.exe --benchmark_format=json --benchmark_out=benchmark_result.json
48+
else
49+
./benchmark-app --benchmark_format=json --benchmark_out=benchmark_result.json
50+
fi
51+
shell: bash
52+
53+
54+
- name: Setup Python and Install Dependencies
55+
uses: actions/setup-python@v5
56+
with:
57+
python-version: '3.x'
58+
59+
- name: Install Python Dependencies
60+
run: pip install -r requirements.txt
61+
62+
63+
- name: Generate Graphs and Prepare for Deployment
64+
run: |
65+
# Define the temporary folder for all deployed content
66+
OUTPUT_DIR="gh-pages-output"
67+
68+
# Create the output directory structure
69+
mkdir -p $OUTPUT_DIR/data
70+
71+
# Execute the Python script
72+
# Arguments: [Input JSON Path] [Output Directory Path]
73+
python scripts/generate_graphs.py ./build/bin/benchmark_result.json $OUTPUT_DIR
74+
75+
# Copy the raw JSON to the deployment folder for public access
76+
cp ./build/bin/benchmark_result.json $OUTPUT_DIR/data/benchmark_result.json
77+
echo "Deployment folder contents prepared in: $OUTPUT_DIR"
78+
79+
# --- 3. Artifact Upload (For Direct Download) ---
80+
- name: ⬆️ Upload Job Artifacts (Graphs & JSON)
81+
uses: actions/upload-artifact@v4
82+
with:
83+
name: allocator-benchmark-results # Name of the downloadable ZIP file
84+
path: gh-pages-output/ # Path to the folder to zip and upload
85+
retention-days: 7 # Keep for one week
86+
87+
# --- 4. GitHub Pages Deployment (For Public Hosting) ---
88+
- name: Configure Pages Environment
89+
uses: actions/configure-pages@v5 # Sets up necessary environment variables
90+
91+
- name: Upload Pages Artifact
92+
uses: actions/upload-pages-artifact@v3
93+
with:
94+
path: gh-pages-output/ # Uploads the folder contents for Pages deployment
95+
96+
- name: Deploy to GitHub Pages
97+
id: deployment
98+
uses: actions/deploy-pages@v4 # Triggers the final site update
99+
100+
- name: Report Deployment URL
101+
run: |
102+
echo "Graphs are live at: ${{ steps.deployment.outputs.page_url }}"
103+
# ONLY RUNS ON UBUNTU: Prevents redundant pushes from the Windows job
104+
# if: success() && matrix.os == 'ubuntu-latest'

CMakeLists.txt

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,8 @@ set(CMAKE_CXX_STANDARD_REQUIRED ON)
1111

1212
set(CMAKE_CXX_EXTENSIONS OFF)
1313

14+
include(FetchContent)
15+
1416
# Set a common output directory for executables and libraries
1517
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
1618
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
@@ -20,9 +22,9 @@ set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
2022
add_library(tlsf-allocator STATIC
2123

2224
src/tlsf.cpp
23-
2425
)
2526

27+
2628
add_library(tlsf::tlsf-allocator ALIAS tlsf-allocator)
2729

2830
target_include_directories(tlsf-allocator PUBLIC
@@ -66,7 +68,6 @@ endif()
6668
google-benchmark
6769
GIT_REPOSITORY https://github.com/google/benchmark.git
6870
GIT_TAG v1.9.4 # specific version for stability
69-
7071
)
7172

7273

@@ -93,4 +94,4 @@ add_executable(benchmark-app
9394
benchmark::benchmark
9495
)
9596

96-
endif()
97+
endif()

benchmark/benchmark.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -198,12 +198,12 @@ int main(int argc, char** argv)
198198

199199
bool success = benchmark::RunSpecifiedBenchmarks();
200200

201-
std::cout << "\n\n--- Benchmarks Finished ---\nPress ENTER to exit...";
201+
std::cout << "\n\n--- Benchmarks Finished ---\n\n";
202202

203203

204204

205205
// Wait for the user to press ENTER
206-
std::cin.get();
206+
// std::cin.get();
207207

208208
return success ? 0 : 1;
209209
}

scripts/plot_benchmark.py

Lines changed: 114 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,114 @@
1+
import json
2+
import pandas as pd
3+
import matplotlib.pyplot as plt
4+
import os
5+
import numpy as np
6+
7+
# --- Configuration ---
8+
9+
if len(sys.argv) < 3:
10+
print("Usage: python generate_graphs.py <input_json_path> <output_dir>")
11+
sys.exit(1)
12+
13+
INPUT_FILE = sys.argv[1]
14+
OUTPUT_DIR = sys.argv[2] # This will be 'benchmark_graphs'
15+
16+
17+
18+
# Ensure the output directory exists
19+
os.makedirs(OUTPUT_DIR, exist_ok=True)
20+
21+
# --- 1. Data Processing Function ---
22+
def process_benchmark_data(file_path):
23+
"""Loads benchmark JSON, calculates Average Time per Operation, and returns a DataFrame."""
24+
with open(file_path, 'r') as f:
25+
data = json.load(f)
26+
27+
benchmarks = []
28+
29+
for bm in data.get('benchmarks', []):
30+
name = bm['name']
31+
cpu_time_ns = bm['cpu_time']
32+
iterations = bm['iterations']
33+
34+
# Extract the key features for grouping and display
35+
if 'SystemMalloc' in name:
36+
allocator = 'System Malloc'
37+
elif 'MyTLSFAllocatorFixture' in name:
38+
allocator = 'TLSF Allocator'
39+
else:
40+
continue # Skip non-relevant benchmarks
41+
42+
# Determine the workload type ( Mixed)
43+
if 'AllocDeallocCycle_MixedSize' in name:
44+
workload = 'AllocDeallocCycle_MixedSize'
45+
# Extract N from the end of the name (e.g., '/100')
46+
try:
47+
num_ops = int(name.split('/')[-1])
48+
except ValueError:
49+
num_ops = 1 # Fallback
50+
# For mixed, CPU time is total time for N operations; Avg is total / N
51+
avg_time_ns = cpu_time_ns / num_ops
52+
else:
53+
continue
54+
55+
benchmarks.append({
56+
'name': name,
57+
'allocator': allocator,
58+
'workload': workload,
59+
'num_ops': num_ops,
60+
'cpu_time_ns': cpu_time_ns,
61+
'avg_time_ns': avg_time_ns # Average time for a single allocation/operation
62+
})
63+
64+
return pd.DataFrame(benchmarks)
65+
66+
# --- 2. Plotting Functions ---
67+
def plot_performance(df_AllocDeallocCycle_MixedSize):
68+
"""Generates a line plot showing My TLSF's performance vs. Malloc's"""
69+
70+
# Sort for correct line plotting
71+
df_mixed_sorted = df_AllocDeallocCycle_MixedSize.sort_values(by='num_ops')
72+
73+
plt.figure(figsize=(9, 6))
74+
75+
# Plotting the lines
76+
for name, group in df_mixed_sorted.groupby('allocator'):
77+
plt.plot(group['num_ops'], group['avg_time_ns'],
78+
marker='o', linestyle='-', label=name,
79+
linewidth=2.5)
80+
81+
plt.title('Performance Graph', fontsize=14)
82+
plt.ylabel('Average Time per N Allocation-Deallocation of Mixed Sizes (nanoseconds)', fontsize=12)
83+
plt.xlabel('N Allocations per iteration ', fontsize=12)
84+
plt.xticks(group['num_ops']) # Ensure clear ticks
85+
plt.legend(title='Allocator')
86+
plt.grid(linestyle='--', alpha=0.7)
87+
88+
plt.savefig(os.path.join(OUTPUT_DIR, 'Performance_line_plot.svg'), format='svg', bbox_inches='tight')
89+
print(f"Generated {os.path.join(OUTPUT_DIR, 'Performance_line_plot.svg')}")
90+
plt.close()
91+
92+
# --- 3. Main Execution ---
93+
if __name__ == '__main__':
94+
try:
95+
# Load and process the data
96+
df = process_benchmark_data(INPUT_FILE)
97+
print("Data loaded successfully.")
98+
print(f"Total benchmarks processed: {len(df)}")
99+
100+
# Filter for the mixed workload (fragmentation stress)
101+
df_AllocDeallocCycle_MixedSize = df[df['workload'] == 'AllocDeallocCycle_MixedSize'].copy()
102+
103+
if df_AllocDeallocCycle_MixedSize.empty:
104+
print("Error: Could not find 'AllocDeallocCycle_MixedSize' data. Check JSON keys.")
105+
else:
106+
#time taken across various allocations
107+
plot_performance(df_AllocDeallocCycle_MixedSize)
108+
109+
print("\nPlotting complete. SVGs saved to the 'output_graphs' directory.")
110+
111+
except FileNotFoundError:
112+
print(f"Error: Input file '{INPUT_FILE}' not found. Place your benchmark JSON here.")
113+
except Exception as e:
114+
print(f"An unexpected error occurred: {e}")

scripts/requirements.txt

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
# requirements.txt
2+
matplotlib
3+
pandas
4+
numpy
5+
json

0 commit comments

Comments
 (0)