Skip to content

🚒 test: quant #130

🚒 test: quant

🚒 test: quant #130

Workflow file for this run

name: "CI/CD Pipeline (Push: dev-*)"
on:
push:
branches:
- "dev-*"
paths-ignore:
- "docs/**"
permissions:
contents: read
actions: read
env:
PYTHON_VERSION: "3.12"
jobs:
check-pr:
runs-on: ubuntu-latest
outputs:
has-pr: ${{ steps.check.outputs.has-pr }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Check for open PR
id: check
env:
GH_TOKEN: ${{ secrets.GH_TOKEN }}
run: |
PR=$(gh pr list --state open --head "${{ github.ref_name }}" --json number -q '.[0].number')
if [ -z "$PR" ]; then
echo "has-pr=false" >> $GITHUB_OUTPUT
else
echo "has-pr=true" >> $GITHUB_OUTPUT
fi
build:
runs-on: ubuntu-latest
needs: [check-pr]
strategy:
matrix:
python-version: ["3.10", "3.11", "3.12", "3.13"]
if: needs.check-pr.outputs.has-pr == 'false'
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
ref: ${{ github.ref }}
- name: Install uv
uses: astral-sh/setup-uv@v6
with:
version: "latest"
enable-cache: true
- name: Set up Python
run: uv python install ${{ matrix.python-version }}
- name: Install dependencies
run: |
uv sync --extra all
lint:
runs-on: ubuntu-latest
needs: [build]
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
ref: ${{ github.ref }}
- name: Install uv
uses: astral-sh/setup-uv@v6
with:
version: "latest"
enable-cache: true
- name: Set up Python
run: uv python install ${{ env.PYTHON_VERSION }}
- name: Install dependencies and lint
run: |
uvx ruff check .
uvx ruff format --check .
test:
runs-on: ubuntu-latest
needs: [build]
strategy:
matrix:
python-version: ["3.10", "3.11", "3.12", "3.13"]
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
ref: ${{ github.ref }}
- name: Install uv
uses: astral-sh/setup-uv@v6
with:
version: "latest"
enable-cache: true
- name: Set up Python
run: uv python install ${{ matrix.python-version }}
- name: Install system dependencies
run: |
sudo apt update
sudo apt install python3-opencv -y
- name: Start Triton Inference Server
run: |
docker run -d --name triton-inference-server \
-p 8000:8000 -p 8001:8001 \
-v ${{ github.workspace }}:/mnt/zerohertzLib \
-v ${{ github.workspace }}/test/data/triton-inference-server:/models \
nvcr.io/nvidia/tritonserver:25.08-py3 \
bash -c "cp -r /mnt/zerohertzLib /tmp/zerohertzLib && echo hi && pip install /tmp/zerohertzLib[mlops] && tritonserver --model-repository /models --model-control-mode=explicit"
- name: Wait for Triton Inference Server to be ready
run: |
echo "Waiting for Triton Inference Server to be ready..."
timeout 60 bash -c 'until curl -f http://localhost:8000/v2/health/ready; do sleep 2; done'
echo "Triton Inference Server is ready!"
- name: Run tests
env:
DISCORD_WEBHOOK_URL: ${{ secrets.DISCORD_WEBHOOK_URL }}
DISCORD_BOT_TOKEN: ${{ secrets.DISCORD_BOT_TOKEN }}
DISCORD_BOT_CHANNEL: ${{ secrets.DISCORD_BOT_CHANNEL }}
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
GH_TOKEN: ${{ secrets.GH_TOKEN }}
run: |
uv sync --extra all
uv pip install pytest pytest-xdist pytest-cov
uv run pytest --durations=0 -vv -n auto --dist=loadfile --cov=zerohertzLib --cov-report=xml --junitxml=junit.xml -o junit_family=legacy
- name: Upload test results to Codecov
uses: codecov/test-results-action@v1
with:
flags: python${{ matrix.python-version }}
token: ${{ secrets.CODECOV_TOKEN }}
if: always()
- name: Upload results to Codecov
uses: codecov/codecov-action@v4
with:
flags: python${{ matrix.python-version }}
token: ${{ secrets.CODECOV_TOKEN }}
if: always()