chore(deps): verify libmariadb LGPL-2.1 dynamic linking compliance (#… #367
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| name: Benchmarks | ||
| on: | ||
| push: | ||
| branches: [ main, phase-* ] | ||
| paths: | ||
| - 'benchmarks/**' | ||
| - 'database/**' | ||
| - '.github/workflows/benchmarks.yml' | ||
| pull_request: | ||
| branches: [ main ] | ||
| paths: | ||
| - 'benchmarks/**' | ||
| - 'database/**' | ||
| workflow_dispatch: | ||
| inputs: | ||
| save_baseline: | ||
| description: 'Save as baseline' | ||
| required: false | ||
| default: 'false' | ||
| permissions: | ||
| contents: read | ||
| pull-requests: write | ||
| issues: write | ||
| jobs: | ||
| benchmark: | ||
| name: Run Performance Benchmarks | ||
| runs-on: ${{ matrix.os }} | ||
| timeout-minutes: 30 | ||
| strategy: | ||
| fail-fast: false | ||
| matrix: | ||
| # Ubuntu 24.04 and macOS 14 for full C++20 std::format support | ||
| os: [ubuntu-24.04, macos-14] | ||
| compiler: [clang] | ||
| build_type: [Release] | ||
| steps: | ||
| - name: Checkout database_system | ||
| uses: actions/checkout@v4 | ||
| with: | ||
| fetch-depth: 0 # Full history for comparison | ||
| - name: Checkout common_system | ||
| uses: actions/checkout@v4 | ||
| with: | ||
| repository: kcenon/common_system | ||
| path: common_system | ||
| token: ${{ secrets.GITHUB_TOKEN }} | ||
| - name: Install dependencies (Ubuntu) | ||
| if: runner.os == 'Linux' | ||
| run: | | ||
| sudo apt-get update | ||
| # Use Clang 18 for full C++20 std::format support | ||
| sudo apt-get install -y cmake ninja-build clang-18 libbenchmark-dev libgtest-dev libfmt-dev libpq-dev | ||
| - name: Install dependencies (macOS) | ||
| if: runner.os == 'macOS' | ||
| run: | | ||
| brew install ninja google-benchmark googletest fmt postgresql | ||
| - name: Set up compiler | ||
| run: | | ||
| if [ "$RUNNER_OS" == "Linux" ]; then | ||
| # Use Clang 18 for full C++20 std::format support | ||
| echo "CC=clang-18" >> $GITHUB_ENV | ||
| echo "CXX=clang++-18" >> $GITHUB_ENV | ||
| else | ||
| echo "CC=clang" >> $GITHUB_ENV | ||
| echo "CXX=clang++" >> $GITHUB_ENV | ||
| fi | ||
| - name: Build and install common_system | ||
| shell: bash | ||
| run: | | ||
| cd common_system | ||
| cmake -B build -S . -DCMAKE_BUILD_TYPE=Release -DUSE_UNIT_TEST=OFF \ | ||
| -DCMAKE_C_COMPILER=$CC -DCMAKE_CXX_COMPILER=$CXX | ||
| cmake --build build --config Release | ||
| if [ "$RUNNER_OS" == "Windows" ]; then | ||
| cmake --install build --prefix "C:/Program Files/common_system" | ||
| else | ||
| sudo cmake --install build --prefix /usr/local | ||
| fi | ||
| - name: Configure CMake | ||
| run: | | ||
| cmake -B build -S . \ | ||
| -GNinja \ | ||
| -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} \ | ||
| -DDATABASE_BUILD_BENCHMARKS=ON \ | ||
| -DUSE_UNIT_TEST=OFF \ | ||
| -DBUILD_DATABASE_SAMPLES=OFF \ | ||
| -DALLOW_BUILD_WITHOUT_NETWORK_SYSTEM=ON | ||
| - name: Build benchmarks | ||
| run: cmake --build build --config ${{ matrix.build_type }} --target database_benchmarks -j | ||
| - name: Run benchmarks | ||
| run: | | ||
| cd build/bin | ||
| ./database_benchmarks \ | ||
| --benchmark_format=json \ | ||
| --benchmark_out=benchmark_results_${{ matrix.os }}.json \ | ||
| --benchmark_repetitions=1 \ | ||
| --benchmark_min_time=0.1 | ||
| - name: Run benchmarks (console output) | ||
| run: | | ||
| cd build/bin | ||
| ./database_benchmarks \ | ||
| --benchmark_filter="QueryBuilder_Create|QueryBuilder_Select|Database_SingleSelect" | ||
| - name: Upload benchmark results | ||
| uses: actions/upload-artifact@v4 | ||
| with: | ||
| name: benchmark-results-${{ matrix.os }} | ||
| path: build/bin/benchmark_results_${{ matrix.os }}.json | ||
| retention-days: 30 | ||
| - name: Save baseline (if requested) | ||
| if: github.event.inputs.save_baseline == 'true' && github.ref == 'refs/heads/main' | ||
| run: | | ||
| mkdir -p benchmarks/baselines | ||
| cp build/bin/benchmark_results_${{ matrix.os }}.json \ | ||
| benchmarks/baselines/baseline_${{ matrix.os }}_$(date +%Y%m%d).json | ||
| - name: Compare with baseline (if exists) | ||
| id: comparison | ||
| if: hashFiles('benchmarks/baselines/baseline_${{ matrix.os }}_*.json') != '' | ||
|
Check warning on line 134 in .github/workflows/benchmarks.yml
|
||
| run: | | ||
| echo "Comparing with baseline..." | ||
| # Find latest baseline | ||
| BASELINE=$(ls -t benchmarks/baselines/baseline_${{ matrix.os }}_*.json 2>/dev/null | head -1) | ||
| if [ -f "$BASELINE" ]; then | ||
| echo "Baseline: $BASELINE" | ||
| echo "Current: build/bin/benchmark_results_${{ matrix.os }}.json" | ||
| # Run comparison using our custom script | ||
| python3 scripts/compare_benchmarks.py \ | ||
| "$BASELINE" \ | ||
| build/bin/benchmark_results_${{ matrix.os }}.json \ | ||
| --threshold 10 \ | ||
| --output comparison_${{ matrix.os }}.md \ | ||
| --format markdown || echo "regression_detected=true" >> $GITHUB_OUTPUT | ||
| cat comparison_${{ matrix.os }}.md | ||
| else | ||
| echo "No baseline found, skipping comparison" | ||
| fi | ||
| - name: Upload comparison report | ||
| if: hashFiles('comparison_${{ matrix.os }}.md') != '' | ||
|
Check warning on line 159 in .github/workflows/benchmarks.yml
|
||
| uses: actions/upload-artifact@v4 | ||
| with: | ||
| name: comparison-report-${{ matrix.os }} | ||
| path: comparison_${{ matrix.os }}.md | ||
| retention-days: 30 | ||
| - name: Check for performance regression | ||
| if: steps.comparison.outputs.regression_detected == 'true' | ||
| run: | | ||
| echo "::warning::Performance regression detected in benchmarks" | ||
| echo "Please review the comparison report for details" | ||
| report: | ||
| name: Generate Benchmark Report | ||
| needs: benchmark | ||
| runs-on: ubuntu-24.04 | ||
| if: always() | ||
| steps: | ||
| - name: Download all artifacts | ||
| uses: actions/download-artifact@v4 | ||
| - name: Generate summary | ||
| run: | | ||
| echo "# Benchmark Results Summary" >> $GITHUB_STEP_SUMMARY | ||
| echo "" >> $GITHUB_STEP_SUMMARY | ||
| echo "## Phase 0: Baseline Measurement" >> $GITHUB_STEP_SUMMARY | ||
| echo "" >> $GITHUB_STEP_SUMMARY | ||
| for dir in benchmark-results-*; do | ||
| if [ -d "$dir" ]; then | ||
| echo "### $dir" >> $GITHUB_STEP_SUMMARY | ||
| if [ -f "$dir/benchmark_results_"*.json ]; then | ||
| echo "✅ Benchmarks completed" >> $GITHUB_STEP_SUMMARY | ||
| echo "" >> $GITHUB_STEP_SUMMARY | ||
| else | ||
| echo "❌ No results found" >> $GITHUB_STEP_SUMMARY | ||
| echo "" >> $GITHUB_STEP_SUMMARY | ||
| fi | ||
| fi | ||
| done | ||
| # Include comparison reports if available | ||
| for dir in comparison-report-*; do | ||
| if [ -d "$dir" ]; then | ||
| echo "" >> $GITHUB_STEP_SUMMARY | ||
| echo "### Comparison Report: ${dir#comparison-report-}" >> $GITHUB_STEP_SUMMARY | ||
| for report in "$dir"/*.md; do | ||
| if [ -f "$report" ]; then | ||
| cat "$report" >> $GITHUB_STEP_SUMMARY | ||
| fi | ||
| done | ||
| fi | ||
| done | ||
| echo "" >> $GITHUB_STEP_SUMMARY | ||
| echo "## Next Steps" >> $GITHUB_STEP_SUMMARY | ||
| echo "- Review benchmark results" >> $GITHUB_STEP_SUMMARY | ||
| echo "- Document baseline in docs/performance/BASELINE.md" >> $GITHUB_STEP_SUMMARY | ||
| - name: Comment on PR with benchmark results | ||
| if: github.event_name == 'pull_request' | ||
| uses: actions/github-script@v7 | ||
| with: | ||
| script: | | ||
| const fs = require('fs'); | ||
| let body = '## Benchmark Results\n\n'; | ||
| // Check for comparison reports | ||
| const dirs = fs.readdirSync('.').filter(d => d.startsWith('comparison-report-')); | ||
| if (dirs.length > 0) { | ||
| for (const dir of dirs) { | ||
| const files = fs.readdirSync(dir).filter(f => f.endsWith('.md')); | ||
| for (const file of files) { | ||
| const content = fs.readFileSync(`${dir}/${file}`, 'utf8'); | ||
| body += `### ${dir.replace('comparison-report-', '')}\n\n`; | ||
| body += content + '\n\n'; | ||
| } | ||
| } | ||
| } else { | ||
| body += 'No comparison reports available. Baseline may not be established yet.\n'; | ||
| } | ||
| // Find existing comment | ||
| const { data: comments } = await github.rest.issues.listComments({ | ||
| owner: context.repo.owner, | ||
| repo: context.repo.repo, | ||
| issue_number: context.issue.number | ||
| }); | ||
| const botComment = comments.find(c => | ||
| c.user.type === 'Bot' && c.body.includes('## Benchmark Results') | ||
| ); | ||
| if (botComment) { | ||
| await github.rest.issues.updateComment({ | ||
| owner: context.repo.owner, | ||
| repo: context.repo.repo, | ||
| comment_id: botComment.id, | ||
| body: body | ||
| }); | ||
| } else { | ||
| await github.rest.issues.createComment({ | ||
| owner: context.repo.owner, | ||
| repo: context.repo.repo, | ||
| issue_number: context.issue.number, | ||
| body: body | ||
| }); | ||
| } | ||