Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
303 changes: 205 additions & 98 deletions .github/workflows/run-samples.yml
Original file line number Diff line number Diff line change
@@ -1,108 +1,215 @@
# ---------------------------------------------------------------
# .github/workflows/project-e2e-tests.yml
#
# PURPOSE
# ▸ Run the same end-to-end test-suite against **many already-
# provisioned projects**. Each project has its own value for
# PROJECT_CLIENT, while MODEL_DEPLOYMENT_NAME is global.
#
# HOW TO USE
# 1. Under **Settings ▸ Environments** create one environment
# per project (project-alpha, project-beta, …).
# 2. In every environment add the secret PROJECT_CLIENT
# with that project’s value.
# 3. In **Settings ▸ Secrets → Actions** (repo-level) add a
# secret called MODEL_DEPLOYMENT_NAME that is identical
# for all projects.
# 4. List your projects in the matrix below.
# 5. Push a PR that touches tests/ → this workflow fans out
# and runs once per project, each run getting the correct
# secrets automatically.
# ---------------------------------------------------------------

name: Project E2E Tests

# ── 1️⃣ WHEN TO RUN ─────────────────────────────────────────────
# .github/workflows/run-samples.yml
# -----------------------------------------------------------------
# Run the same end-to-end test-suite against multiple "setups".
# A setup represents one already-provisioned environment
# (e.g., SAI_UAI or UAI) that supplies its own PROJECT_CLIENT
# secret. MODEL_DEPLOYMENT_NAME is shared across all setups.
# The workflow can be started three ways:
# 1. Automatically on any PR that modifies docs-samples/agents/**
# 2. Manually from the Actions tab / GitHub CLI (workflow_dispatch)
# 3. Via a slash-command comment in a PR ("/e2e <setup list>")
# -----------------------------------------------------------------

# ────────────────────────────────────────────────────────────────
# 0️⃣ TRIGGERS
# ────────────────────────────────────────────────────────────────
on:
pull_request_target: # Use target so the workflow
# file comes from the protected
branches: [ main ] # main branch (safer than PR),
paths: # and we still have access to
- "tests/**" # secrets for cloud creds.
# Automatic validation on pull requests
pull_request_target:
# Only PRs whose base branch is main
branches:
- main
# Only when sample code changes
paths:
- docs-samples/agents/**

# Run-button or gh CLI trigger
workflow_dispatch:
inputs:
setups:
description: "Setups to test (SAI_UAI,UAI) or 'all'"
required: false
ref:
description: "Git ref to test (defaults to branch head)"
required: false

# ── 2️⃣ JOB DEFINITION (matrix fan-out) ─────────────────────────
# Slash-command trigger inside pull-request comments
issue_comment:
types:
- created

# ────────────────────────────────────────────────────────────────
# 1️⃣ HELPER JOB – figure out which setups to test
# ────────────────────────────────────────────────────────────────
jobs:
e2e:
name: "E2E – ${{ matrix.project }}" # Shows up as “E2E – alpha”
resolve-setups:
runs-on: ubuntu-latest
permissions:
contents: read # read the repo
id-token: write # (only if you OIDC into Azure, AWS, …)
outputs:
matrix: ${{ steps.build.outputs.matrix }}

steps:
# Build the matrix JSON that downstream job will consume
- id: build
uses: actions/github-script@v7
with:
result-encoding: string
script: |
// Define all possible setups that this workflow supports.
const ALL_SETUPS = ['SAI_UAI', 'UAI'];

// Helper function to parse user input (comma-separated or 'all') into a list of setups.
function parse(input) {
// If input is empty, null, or 'all', return the full list.
if (
!input ||
input.trim() === '' ||
input.trim().toLowerCase() === 'all'
) {
return ALL_SETUPS;
}
// Otherwise, split the comma-separated string, trim whitespace, and remove empty entries.
return input
.split(',')
.map((s) => s.trim())
.filter(Boolean);
}

// Array to hold the setups requested for this specific workflow run.
let requested = [];

// Determine the requested setups based on how the workflow was triggered.
switch (context.eventName) {
case 'workflow_dispatch': {
// Triggered manually via UI or API.
// Get the 'setups' input provided by the user.
const inp = core.getInput('setups');
// Parse the user input.
requested = parse(inp);
break;
}
case 'issue_comment': {
// Triggered by a comment on a pull request.
const body = context.payload.comment.body;
// Check if the comment starts with the slash command '/e2e'.
const match = body.match(/^\/e2e\s+(.+)$/i);
if (match) {
// If it matches, parse the arguments provided after '/e2e'.
requested = parse(match[1]);
} else {
// If the comment doesn't match the command format, do nothing.
core.notice('Comment does not contain /e2e command');
}
break;
}
case 'pull_request_target':
// Triggered automatically by a PR change matching the path filter.
// Default to running all defined setups.
requested = ALL_SETUPS;
break;
}

// Validate that at least one setup was selected or determined.
if (!requested.length) {
// If no setups are found (e.g., invalid comment), fail the workflow early.
core.setFailed('No setups selected – stopping workflow.');
}

// Construct the matrix object in the format required by the downstream job's strategy.
const matrix = {
// The 'include' key pairs with the 'setup' variable name used in the e2e job.
include: requested.map((s) => ({ setup: s }))
};

// Convert the JavaScript matrix object into a JSON string to be used as the step's output.
return JSON.stringify(matrix);

# ────────────────────────────────────────────────────────────────
# 2️⃣ MAIN TEST JOB – one copy per setup
# ────────────────────────────────────────────────────────────────
e2e:
needs: resolve-setups
if: ${{ needs.resolve-setups.result == 'success' }}

# Matrix drives one job per project.
strategy:
fail-fast: false # Don’t cancel others if one fails
matrix:
project: [ SAI_UAI, UAI] # <── ADD / REMOVE PROJECT_CLIENTS here
# Use the matrix built by the helper job
matrix: ${{ fromJSON(needs.resolve-setups.outputs.matrix) }}
# Do not cancel other setups if one fails
fail-fast: false

# Human-readable name in the Actions UI
name: "E2E – ${{ matrix.setup }}"
# Virtual-machine image
runs-on: ubuntu-latest

# Map the matrix entry → matching Environment
# e.g. alpha → project-alpha
environment: project-${{ matrix.project }}
permissions:
# Needed to read repo contents
contents: read
# Needed if you acquire a cloud token via OIDC
id-token: write

# Map to per-setup environment (for PROJECT_CLIENT secret)
environment: setup-${{ matrix.setup }}

# ── 3️⃣ STEPS ───────────────────────────────────────────────
steps:
# ── Check out the code at the desired ref
- name: Checkout code
uses: actions/checkout@v4
with:
ref: ${{ github.event.inputs.ref ||
github.event.pull_request.head.sha ||
github.ref }}

# ── Set up Node.js for JavaScript tests
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: '20'
cache: npm

# ── Install JavaScript dependencies
- name: Install npm packages
run: npm ci

# ── Run Vitest
- name: Run Vitest
env:
PROJECT_CLIENT: ${{ secrets.PROJECT_CLIENT }}
MODEL_DEPLOYMENT_NAME: ${{ secrets.MODEL_DEPLOYMENT_NAME }}
run: npx vitest run --coverage

# ── Set up .NET SDK
- name: Set up .NET SDK
uses: actions/setup-dotnet@v4
with:
dotnet-version: '8.0.x'

# ── Restore .NET dependencies
- name: dotnet restore
run: dotnet restore

# ── Run .NET tests
- name: dotnet test
env:
PROJECT_CLIENT: ${{ secrets.PROJECT_CLIENT }}
MODEL_DEPLOYMENT_NAME: ${{ secrets.MODEL_DEPLOYMENT_NAME }}
run: dotnet test -c Release --verbosity normal

# ── Set up Python
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.11'
cache: pip

# ── Install Python requirements
- name: Install Python dependencies
run: |
pip install -r docs-samples/agents/python/requirements.txt

# -- 3.1 Check out PR code ----------------------------------
- name: Checkout code
uses: actions/checkout@v4

# -- 3.2 JavaScript / TypeScript (Vitest) -------------------
- name: Set up Node.js (for Vitest)
uses: actions/setup-node@v4
with:
node-version: "20" # use LTS; bump as needed
cache: npm # built-in cache keyed by package-lock.json

- name: Install JS dependencies
run: npm ci

- name: Run Vitest integration suite
# Inject the secrets as environment variables that your app/tests read.
env:
PROJECT_CLIENT: ${{ secrets.PROJECT_CLIENT }}
MODEL_DEPLOYMENT_NAME: ${{ secrets.MODEL_DEPLOYMENT_NAME }}
run: |
# Add any flags you like: coverage, reporters, etc.
npx vitest run

# -- 3.3 .NET (dotnet test) ---------------------------------
- name: Set up .NET SDK
uses: actions/setup-dotnet@v4
with:
dotnet-version: "8.0.x" # or 6.x / 7.x

- name: Restore & build .NET projects
run: dotnet restore

- name: Run .NET tests
env:
PROJECT_CLIENT: ${{ secrets.PROJECT_CLIENT }}
MODEL_DEPLOYMENT_NAME: ${{ secrets.MODEL_DEPLOYMENT_NAME }}
run: |
dotnet test --configuration Release --verbosity normal

# -- 3.4 Python / Pytest -------------------------
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.9"
cache: pip

- name: Install Python deps
run: pip install -r requirements.txt

- name: Run pytest
env:
PROJECT_CLIENT: ${{ secrets.PROJECT_CLIENT }}
MODEL_DEPLOYMENT_NAME: ${{ secrets.MODEL_DEPLOYMENT_NAME }}
run: pytest -q
# ── Run PyTest
- name: Run PyTest
env:
PROJECT_CLIENT: ${{ secrets.PROJECT_CLIENT }}
MODEL_DEPLOYMENT_NAME: ${{ secrets.MODEL_DEPLOYMENT_NAME }}
run: |
pytest docs-samples/agents/python --maxfail=1 --disable-warnings
Loading