Skip to content

🚀 Add comprehensive benchmark integration system #14

🚀 Add comprehensive benchmark integration system

🚀 Add comprehensive benchmark integration system #14

Workflow file for this run

name: CI
on:
push:
branches: [ main, develop ]
pull_request:
branches: [ main ]
jobs:
test:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: [3.8, 3.9, '3.10', '3.11', '3.12']
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Install system dependencies
run: |
sudo apt-get update
sudo apt-get install -y build-essential
- name: Install Python dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
echo "Python dependencies installed successfully"
# Verify key packages are installed
python -c "import pytest; print('pytest version:', pytest.__version__)"
python -c "import numpy; print('numpy version:', numpy.__version__)"
python -c "import flake8; print('flake8 installed successfully')"
- name: Lint with flake8
run: |
# stop the build if there are Python syntax errors or undefined names
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
- name: Test with unittest
run: |
cd tests
python run_tests.py
- name: Test component system
run: |
cd tests
python test_components.py
- name: Run interface demo
run: |
python interface_demo.py
- name: Run combined demo
run: |
python combined_demo.py
- name: Generate coverage report
run: |
coverage run -m pytest tests/
coverage xml
- name: Upload coverage to Codecov
if: matrix.python-version == '3.11'
uses: codecov/codecov-action@v3
with:
file: ./coverage.xml
flags: unittests
name: codecov-umbrella
build:
runs-on: ubuntu-latest
needs: test
steps:
- uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.11'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
- name: Build package
run: |
python -m build
- name: Check package
run: |
twine check dist/*
integration-test:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-latest, windows-latest, macos-latest]
python-version: ['3.10', '3.11']
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
- name: Run core tests
run: |
cd tests
python test_components.py
- name: Test import
run: |
python -c "
import zlayout
print('ZLayout version:', zlayout.__version__)
print('Available modules:', zlayout.__all__)
# Test component creation
from zlayout import ComponentManager, create_resistor
manager = ComponentManager(':memory:')
resistor = create_resistor('test_r', 1000)
print('Successfully created resistor:', resistor.name)
manager.close()
print('Integration test passed!')
"
benchmark:
runs-on: ubuntu-latest
needs: [test, build]
if: github.ref == 'refs/heads/main' # Only run on main branch
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0 # Fetch full history for benchmark comparison
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.11'
- name: Install system dependencies
run: |
sudo apt-get update
sudo apt-get install -y build-essential cmake git curl
# Install XMake
curl -fsSL https://xmake.io/shget.text | bash
echo "$HOME/.local/bin" >> $GITHUB_PATH
- name: Install Python dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
- name: Install Google Benchmark
run: |
git clone https://github.com/google/benchmark.git
cd benchmark
cmake -E make_directory "build"
cmake -E chdir "build" cmake -DBENCHMARK_DOWNLOAD_DEPENDENCIES=on -DCMAKE_BUILD_TYPE=Release ../
cmake --build "build" --config Release --target install
cd ..
rm -rf benchmark
- name: Run Benchmarks
run: |
python scripts/run_benchmarks.py --output-dir benchmark_results
- name: Upload benchmark results
uses: actions/upload-artifact@v3
with:
name: benchmark-results
path: benchmark_results/
- name: Compare with previous benchmarks
run: |
# Download previous benchmark results if available
if [ -f "benchmark_results/benchmark_summary.json" ]; then
echo "Benchmark results generated successfully"
cat benchmark_results/benchmark_report.md >> $GITHUB_STEP_SUMMARY
fi
- name: Update documentation
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
run: |
# Configure git for automated commits
git config --local user.email "[email protected]"
git config --local user.name "GitHub Action"
# Check if documentation was updated
if [ -f "docs/benchmarks/performance_results.md" ]; then
git add docs/benchmarks/performance_results.md
# Only commit if there are changes
if ! git diff --staged --quiet; then
git commit -m "📊 Update benchmark results [skip ci]"
git push
fi
fi
documentation:
runs-on: ubuntu-latest
needs: [test, build]
if: github.ref == 'refs/heads/main'
steps:
- uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.11'
- name: Install dependencies
run: |
sudo apt-get update
sudo apt-get install -y doxygen graphviz
python -m pip install --upgrade pip
pip install -r requirements.txt
- name: Generate API documentation
run: |
doxygen Doxyfile
- name: Deploy to GitHub Pages
uses: peaceiris/actions-gh-pages@v3
if: github.ref == 'refs/heads/main'
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: ./docs/html