Skip to content

Commit 3990316

Browse files
Add CI workflows for sanitizers, coverage, and perf benchmark gate
1 parent 6230cbf commit 3990316

File tree

8 files changed

+375
-0
lines changed

8 files changed

+375
-0
lines changed

.github/workflows/coverage.yml

Lines changed: 66 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,66 @@
1+
name: Coverage
2+
3+
on:
4+
push:
5+
branches: [main]
6+
pull_request:
7+
8+
jobs:
9+
coverage:
10+
runs-on: ubuntu-latest
11+
env:
12+
CTEST_OUTPUT_ON_FAILURE: 1
13+
LLVM_PROFILE_FILE: build/profiles/%p.profraw
14+
steps:
15+
- uses: actions/checkout@v4
16+
17+
- name: Install dependencies
18+
run: |
19+
sudo apt-get update
20+
sudo apt-get install -y clang llvm ninja-build
21+
22+
- name: Configure
23+
run: |
24+
cmake -S . -B build -G Ninja \
25+
-DBUILD_TESTING=ON \
26+
-DTSD_ENABLE_COVERAGE=ON \
27+
-DCMAKE_BUILD_TYPE=Debug \
28+
-DCMAKE_C_COMPILER=clang \
29+
-DCMAKE_CXX_COMPILER=clang++
30+
31+
- name: Build
32+
run: cmake --build build --config Debug
33+
34+
- name: Run tests
35+
run: |
36+
mkdir -p build/profiles
37+
ctest --test-dir build --output-on-failure
38+
39+
- name: Generate coverage report
40+
run: |
41+
shopt -s nullglob
42+
profiles=(build/profiles/*.profraw)
43+
if [ ${#profiles[@]} -eq 0 ]; then
44+
echo "No coverage profiles were generated" >&2
45+
exit 1
46+
fi
47+
llvm-profdata merge -sparse "${profiles[@]}" -o build/coverage.profdata
48+
llvm-cov export \
49+
--format=lcov \
50+
--instr-profile=build/coverage.profdata \
51+
build/test_config_parser \
52+
build/test_statistics \
53+
build/test_thermal_simd > build/lcov.info
54+
llvm-cov report \
55+
--instr-profile=build/coverage.profdata \
56+
build/test_config_parser \
57+
build/test_statistics \
58+
build/test_thermal_simd > build/coverage.txt
59+
60+
- name: Upload coverage artifacts
61+
uses: actions/upload-artifact@v4
62+
with:
63+
name: coverage-report
64+
path: |
65+
build/coverage.txt
66+
build/lcov.info

.github/workflows/perf-gate.yml

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
name: perf-gate
2+
3+
on:
4+
workflow_call:
5+
inputs:
6+
baseline:
7+
description: Path to the baseline file
8+
required: false
9+
default: tests/perf_smoke.baseline
10+
type: string
11+
threshold:
12+
description: Maximum allowed regression ratio
13+
required: false
14+
default: 0.6
15+
type: number
16+
17+
jobs:
18+
perf:
19+
runs-on: ubuntu-latest
20+
env:
21+
CTEST_OUTPUT_ON_FAILURE: 1
22+
steps:
23+
- uses: actions/checkout@v4
24+
25+
- name: Install dependencies
26+
run: |
27+
sudo apt-get update
28+
sudo apt-get install -y ninja-build
29+
30+
- name: Configure
31+
run: |
32+
cmake -S . -B build -G Ninja \
33+
-DBUILD_TESTING=ON \
34+
-DCMAKE_BUILD_TYPE=RelWithDebInfo
35+
36+
- name: Build perf smoke benchmark
37+
run: cmake --build build --target perf_smoke --config RelWithDebInfo
38+
39+
- name: Run perf smoke gate
40+
run: |
41+
python3 tools/perf_gate.py \
42+
--baseline "${{ inputs.baseline }}" \
43+
--threshold "${{ inputs.threshold }}" \
44+
-- ./build/perf_smoke

.github/workflows/perf.yml

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
name: Perf Regression Gate
2+
3+
on:
4+
push:
5+
branches: [main]
6+
pull_request:
7+
8+
jobs:
9+
perf-smoke:
10+
uses: ./.github/workflows/perf-gate.yml
11+
with:
12+
baseline: tests/perf_smoke.baseline
13+
threshold: 0.6

.github/workflows/sanitizers.yml

Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,45 @@
1+
name: Sanitizer Builds
2+
3+
on:
4+
push:
5+
branches: [main]
6+
pull_request:
7+
8+
jobs:
9+
sanitizers:
10+
name: ${{ matrix.compiler }}-${{ matrix.sanitizer }}
11+
runs-on: ubuntu-latest
12+
strategy:
13+
fail-fast: false
14+
matrix:
15+
compiler: [gcc, clang]
16+
sanitizer: [asan, ubsan, tsan]
17+
env:
18+
CTEST_OUTPUT_ON_FAILURE: 1
19+
steps:
20+
- uses: actions/checkout@v4
21+
22+
- name: Install dependencies
23+
run: |
24+
sudo apt-get update
25+
sudo apt-get install -y ninja-build
26+
27+
- name: Configure
28+
run: |
29+
if [ "${{ matrix.compiler }}" = "gcc" ]; then
30+
cxx_compiler=g++
31+
else
32+
cxx_compiler=clang++
33+
fi
34+
cmake -S . -B build -G Ninja \
35+
-DBUILD_TESTING=ON \
36+
-DCMAKE_BUILD_TYPE=RelWithDebInfo \
37+
-DTSD_SANITIZER=${{ matrix.sanitizer }} \
38+
-DCMAKE_C_COMPILER=${{ matrix.compiler }} \
39+
-DCMAKE_CXX_COMPILER=${cxx_compiler}
40+
41+
- name: Build
42+
run: cmake --build build --config RelWithDebInfo
43+
44+
- name: Run tests
45+
run: ctest --test-dir build --output-on-failure

CMakeLists.txt

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,47 @@ set(THERMAL_SIMD_DISPATCHER_CPU_FLAGS "-msse4.1" CACHE STRING "CPU-specific comp
66

77
include(CTest)
88

9+
option(TSD_ENABLE_COVERAGE "Enable coverage instrumentation" OFF)
10+
set(TSD_SANITIZER "" CACHE STRING "Sanitizer to enable (asan, ubsan, tsan)")
11+
12+
if(TSD_ENABLE_COVERAGE)
13+
add_compile_options(-fprofile-instr-generate -fcoverage-mapping)
14+
add_link_options(-fprofile-instr-generate -fcoverage-mapping)
15+
endif()
16+
17+
if(TSD_SANITIZER)
18+
string(TOLOWER "${TSD_SANITIZER}" _tsd_sanitizer_kind)
19+
if(_tsd_sanitizer_kind STREQUAL "asan")
20+
set(_tsd_sanitizer_flag "address")
21+
elseif(_tsd_sanitizer_kind STREQUAL "ubsan")
22+
set(_tsd_sanitizer_flag "undefined")
23+
elseif(_tsd_sanitizer_kind STREQUAL "tsan")
24+
set(_tsd_sanitizer_flag "thread")
25+
elseif(_tsd_sanitizer_kind STREQUAL "address" OR _tsd_sanitizer_kind STREQUAL "undefined" OR _tsd_sanitizer_kind STREQUAL "thread")
26+
set(_tsd_sanitizer_flag "${_tsd_sanitizer_kind}")
27+
else()
28+
message(FATAL_ERROR "Unsupported sanitizer '${TSD_SANITIZER}'. Use asan, ubsan or tsan.")
29+
endif()
30+
31+
set(_tsd_sanitizer_flags "-fsanitize=${_tsd_sanitizer_flag}")
32+
add_compile_options(${_tsd_sanitizer_flags} -fno-omit-frame-pointer)
33+
add_link_options(${_tsd_sanitizer_flags})
34+
35+
if(_tsd_sanitizer_kind STREQUAL "ubsan" OR _tsd_sanitizer_kind STREQUAL "undefined")
36+
add_compile_options(-fno-sanitize-recover=undefined)
37+
add_link_options(-fno-sanitize-recover=undefined)
38+
endif()
39+
40+
if(_tsd_sanitizer_kind STREQUAL "asan" OR _tsd_sanitizer_kind STREQUAL "address")
41+
add_link_options(-shared-libasan)
42+
endif()
43+
44+
if(_tsd_sanitizer_kind STREQUAL "tsan" OR _tsd_sanitizer_kind STREQUAL "thread")
45+
add_compile_options(-fPIE)
46+
add_link_options(-pie)
47+
endif()
48+
endif()
49+
950
file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/include/thermal/simd)
1051
configure_file(
1152
${CMAKE_CURRENT_SOURCE_DIR}/include/thermal/simd/version.h.in
@@ -66,6 +107,11 @@ if(BUILD_TESTING)
66107
target_compile_options(test_thermal_simd PRIVATE -Wall -Wextra -O1 -pthread -fPIC)
67108
target_include_directories(test_thermal_simd PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/src)
68109
add_test(NAME thermal_simd COMMAND test_thermal_simd)
110+
111+
add_executable(perf_smoke tests/perf_smoke.c)
112+
target_link_libraries(perf_smoke PRIVATE thermal_simd_core_tests pthread m)
113+
target_compile_definitions(perf_smoke PRIVATE TSD_ENABLE_TESTS)
114+
target_compile_options(perf_smoke PRIVATE -Wall -Wextra -Wpedantic)
69115
endif()
70116

71117
install(TARGETS thermal_simd_core

tests/perf_smoke.baseline

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
0.150

tests/perf_smoke.c

Lines changed: 67 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,67 @@
1+
#include <math.h>
2+
#include <stdatomic.h>
3+
#include <stdint.h>
4+
#include <stdio.h>
5+
#include <stdlib.h>
6+
#include <time.h>
7+
8+
#include <thermal/simd/thermal_config.h>
9+
#include <thermal/simd/thermal_perf.h>
10+
11+
#define WORKLOAD_ITERS 1024
12+
#define WARMUP_ITERATIONS 8
13+
#define MEASURED_ITERATIONS 64
14+
15+
static void smoke_workload(void) {
16+
for (int i = 0; i < WORKLOAD_ITERS; ++i) {
17+
atomic_fetch_add_explicit(&g_tsd_workload_iterations, (uint64_t)1, memory_order_relaxed);
18+
}
19+
}
20+
21+
int main(void) {
22+
if (setenv("TSD_FAKE_PERF", "1", 1) != 0) {
23+
perror("setenv");
24+
return 1;
25+
}
26+
27+
tsd_runtime_config cfg;
28+
tsd_runtime_config_set_defaults(&cfg);
29+
cfg.work_iters = WORKLOAD_ITERS;
30+
tsd_runtime_config_refresh_ticks(&cfg);
31+
32+
const uint32_t fake_ratios[] = {1000, 950, 925, 900, 875, 850};
33+
tsd_perf_set_fake_script(fake_ratios, sizeof(fake_ratios) / sizeof(fake_ratios[0]), 1200);
34+
35+
perf_ctx_t *ctx = tsd_perf_init(smoke_workload);
36+
if (!ctx) {
37+
fprintf(stderr, "failed to initialise perf context\n");
38+
return 1;
39+
}
40+
41+
tsd_perf_measure_baseline(ctx, &cfg);
42+
43+
tsd_thermal_eval_t eval = {0};
44+
for (int i = 0; i < WARMUP_ITERATIONS; ++i) {
45+
(void)tsd_perf_evaluate(ctx, &eval, &cfg);
46+
}
47+
48+
struct timespec start = {0}, end = {0};
49+
clock_gettime(CLOCK_MONOTONIC, &start);
50+
for (int i = 0; i < MEASURED_ITERATIONS; ++i) {
51+
(void)tsd_perf_evaluate(ctx, &eval, &cfg);
52+
}
53+
clock_gettime(CLOCK_MONOTONIC, &end);
54+
55+
const double start_us = (double)start.tv_sec * 1e6 + (double)start.tv_nsec / 1e3;
56+
const double end_us = (double)end.tv_sec * 1e6 + (double)end.tv_nsec / 1e3;
57+
const double elapsed = fmax(end_us - start_us, 1.0);
58+
const double per_eval = elapsed / (double)MEASURED_ITERATIONS;
59+
60+
tsd_perf_clear_fake_script();
61+
tsd_perf_cleanup(ctx);
62+
63+
printf("PERF_SMOKE_PER_EVAL_US=%.3f\n", per_eval);
64+
printf("PERF_SMOKE_ITERATIONS=%d\n", MEASURED_ITERATIONS);
65+
66+
return 0;
67+
}

tools/perf_gate.py

Lines changed: 93 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,93 @@
1+
#!/usr/bin/env python3
2+
"""Simple performance gate for the perf_smoke benchmark."""
3+
4+
from __future__ import annotations
5+
6+
import argparse
7+
import pathlib
8+
import subprocess
9+
import sys
10+
from typing import List
11+
12+
13+
def parse_args(argv: List[str]) -> argparse.Namespace:
14+
parser = argparse.ArgumentParser(description=__doc__)
15+
parser.add_argument(
16+
"--baseline",
17+
required=True,
18+
type=pathlib.Path,
19+
help="Path to the baseline file containing the expected microseconds per evaluation.",
20+
)
21+
parser.add_argument(
22+
"--threshold",
23+
type=float,
24+
default=0.20,
25+
help="Maximum allowed relative regression (e.g. 0.15 for 15%%).",
26+
)
27+
parser.add_argument(
28+
"command",
29+
nargs=argparse.REMAINDER,
30+
help="Command to run (prefix with -- to terminate the argument parser).",
31+
)
32+
args = parser.parse_args(argv)
33+
if not args.command:
34+
parser.error("a benchmark command must be provided after --")
35+
if args.command[0] == "--":
36+
args.command = args.command[1:]
37+
if not args.command:
38+
parser.error("a benchmark command must be provided after --")
39+
return args
40+
41+
42+
def read_baseline(path: pathlib.Path) -> float:
43+
try:
44+
contents = path.read_text(encoding="utf-8").strip()
45+
except OSError as exc:
46+
raise SystemExit(f"failed to read baseline '{path}': {exc}") from exc
47+
try:
48+
return float(contents)
49+
except ValueError as exc:
50+
raise SystemExit(f"baseline '{path}' does not contain a valid float") from exc
51+
52+
53+
def extract_metric(stdout: str) -> float:
54+
for line in stdout.splitlines():
55+
if line.startswith("PERF_SMOKE_PER_EVAL_US="):
56+
try:
57+
return float(line.split("=", 1)[1])
58+
except ValueError as exc:
59+
raise SystemExit("failed to parse benchmark output") from exc
60+
raise SystemExit("benchmark output did not contain PERF_SMOKE_PER_EVAL_US")
61+
62+
63+
def main(argv: List[str]) -> int:
64+
args = parse_args(argv)
65+
baseline = read_baseline(args.baseline)
66+
67+
result = subprocess.run(args.command, capture_output=True, text=True)
68+
if result.stdout:
69+
print(result.stdout, end="")
70+
if result.stderr:
71+
print(result.stderr, end="", file=sys.stderr)
72+
if result.returncode != 0:
73+
return result.returncode
74+
75+
measured = extract_metric(result.stdout)
76+
if baseline <= 0:
77+
raise SystemExit("baseline value must be positive")
78+
79+
regression = (measured - baseline) / baseline
80+
print(f"Recorded perf_smoke: {measured:.3f} us (baseline {baseline:.3f} us)")
81+
print(f"Relative change: {regression * 100.0:.2f}% (threshold {args.threshold * 100.0:.2f}%)")
82+
83+
if regression > args.threshold:
84+
print(
85+
f"Regression of {regression * 100.0:.2f}% exceeds allowed threshold",
86+
file=sys.stderr,
87+
)
88+
return 1
89+
return 0
90+
91+
92+
if __name__ == "__main__":
93+
sys.exit(main(sys.argv[1:]))

0 commit comments

Comments
 (0)