Skip to content

bing grounding fix for connection id variable #105

bing grounding fix for connection id variable

bing grounding fix for connection id variable #105

Workflow file for this run

# .github/workflows/run-samples.yml
# -----------------------------------------------------------------
# Run the same end-to-end test-suite against multiple "setups".
# A setup represents one already-provisioned environment
# (e.g., SAI_UAI or UAI) that supplies its own PROJECT_ENDPOINT
# secret. MODEL_DEPLOYMENT_NAME is shared across all setups.
# The workflow can be started three ways:
# 1. Automatically on any PR that modifies doc-samples/agents/**
# 2. Manually from the Actions tab / GitHub CLI (workflow_dispatch)
# 3. Via a slash-command comment in a PR ("/e2e <setup list>")
# -----------------------------------------------------------------
name: "Run Samples"
# ────────────────────────────────────────────────────────────────
# 0️⃣ TRIGGERS
# ────────────────────────────────────────────────────────────────
on:
# Automatic validation on pull requests
pull_request_target:
# # Only PRs whose base branch is main
# branches:
# - main
# # Only when sample code changes
# paths:
# - doc-samples/agents/**
# Run-button or gh CLI trigger
workflow_dispatch:
inputs:
setups:
description: "Setups to test (SAI_UAI,UAI) or 'all'"
required: false
ref:
description: "Git ref to test (defaults to branch head)"
required: false
# Slash-command trigger inside pull-request comments
issue_comment:
types:
- created
# ────────────────────────────────────────────────────────────────
# 1️⃣ HELPER JOB – figure out which setups to test
# ────────────────────────────────────────────────────────────────
jobs:
resolve-setups:
runs-on: ubuntu-latest
outputs:
matrix: ${{ steps.build.outputs.matrix }}
steps:
# Build the matrix JSON that downstream job will consume
- id: build
uses: actions/github-script@v7
with:
result-encoding: string
script: |
// Define all possible setups that this workflow supports.
const ALL_SETUPS = ['SAI_UAI', 'UAI'];
// Helper function to parse user input (comma-separated or 'all') into a list of setups.
function parse(input) {
// If input is empty, null, or 'all', return the full list.
if (
!input ||
input.trim() === '' ||
input.trim().toLowerCase() === 'all'
) {
return ALL_SETUPS;
}
// Otherwise, split the comma-separated string, trim whitespace, and remove empty entries.
return input
.split(',')
.map((s) => s.trim())
.filter(Boolean);
}
// Array to hold the setups requested for this specific workflow run.
let requested = [];
// Determine the requested setups based on how the workflow was triggered.
switch (context.eventName) {
case 'workflow_dispatch': {
// Triggered manually via UI or API.
// Get the 'setups' input provided by the user.
const inp = core.getInput('setups');
// Parse the user input.
requested = parse(inp);
break;
}
case 'issue_comment': {
// Triggered by a comment on a pull request.
const body = context.payload.comment.body;
// Check if the comment starts with the slash command '/e2e'.
const match = body.match(/^\/e2e\s+(.+)$/i);
if (match) {
// If it matches, parse the arguments provided after '/e2e'.
requested = parse(match[1]);
} else {
// If the comment doesn't match the command format, do nothing.
core.notice('Comment does not contain /e2e command');
}
break;
}
case 'pull_request_target':
// Triggered automatically by a PR change matching the path filter.
// Default to running all defined setups.
requested = ALL_SETUPS;
break;
}
// Validate that at least one setup was selected or determined.
if (!requested.length) {
// If no setups are found (e.g., invalid comment), fail the workflow early.
core.setFailed('No setups selected – stopping workflow.');
// Note: Script execution CONTINUES after setFailed, so we set the output below.
}
// Construct the matrix object in the format required by the downstream job's strategy.
// If 'requested' is empty due to the failure check above, this will be { include: [] }.
const matrix = {
// The 'include' key pairs with the 'setup' variable name used in the e2e job.
include: requested.map((s) => ({ setup: s }))
};
// Explicitly set the output, even if the job is marked to fail.
// The downstream job's 'if' condition will prevent it from running if this job fails.
core.setOutput('matrix', JSON.stringify(matrix));
// No final 'return' statement needed when using core.setOutput
# ────────────────────────────────────────────────────────────────
# 2️⃣ MAIN TEST JOB – one copy per setup
# ────────────────────────────────────────────────────────────────
e2e:
needs: resolve-setups
if: ${{ needs.resolve-setups.result == 'success' }}
strategy:
# Use the matrix built by the helper job
matrix: ${{ fromJSON(needs.resolve-setups.outputs.matrix) }}
# Do not cancel other setups if one fails
fail-fast: false
# Human-readable name in the Actions UI
name: "E2E – ${{ matrix.setup }}"
# Virtual-machine image
runs-on: ubuntu-latest
permissions:
# Needed to read repo contents
contents: read
# Needed if you acquire a cloud token via OIDC
id-token: write
# Map to per-setup environment (for PROJECT_ENDPOINT)
environment: setup-${{ matrix.setup }}
steps:
# ── Check out the code at the desired ref
- name: Checkout code
uses: actions/checkout@v4
with:
ref: ${{ github.event.inputs.ref ||
github.event.pull_request.head.sha ||
github.ref }}
# # ── Set up Node.js for JavaScript/TypeScript tests
# - name: Set up Node.js
# if: false # Temporarily disable
# uses: actions/setup-node@v4
# with:
# node-version: '20'
# cache: npm
# # ── Install JavaScript dependencies
# - name: Install npm packages
# if: false # Temporarily disable
# run: npm ci
# # ── Run Vitest JavaScript/TypeScript tests
# - name: Run Vitest
# if: false # Temporarily disable
# env:
# PROJECT_ENDPOINT: ${{ secrets.PROJECT_ENDPOINT}}
# MODEL_DEPLOYMENT_NAME: ${{ secrets.MODEL_DEPLOYMENT_NAME }}
# run: npx vitest run --coverage
# ── Set up Python
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.11'
cache: pip
# ── Install azure-ai-projects from local wheel file
# Adjust the filename to match your exact .whl file
- name: Install local azure-ai-projects wheel
run: |
pip install libs/azure_ai_projects-1.0.0b10-py3-none-any.whl
pip install libs/azure_ai_agents-1.0.0a20250430003-py3-none-any.whl
# ── Install Python requirements
# Installs packages listed in the specified requirements.txt file (excluding azure-ai-projects)
- name: Install Python dependencies
run: |
pip install -r doc-samples/agents/python/requirements.txt
# ── Run PyTest
- name: Run PyTest
env:
PROJECT_ENDPOINT: ${{ secrets.PROJECT_ENDPOINT }}
MODEL_DEPLOYMENT_NAME: ${{ secrets.MODEL_DEPLOYMENT_NAME }}
run: |
pytest doc-samples/agents/python --maxfail=1 --disable-warnings
# ── Set up .NET SDK
- name: Set up .NET SDK
uses: actions/setup-dotnet@v4
with:
dotnet-version: '8.0.x'
# ── Restore .NET dependencies
- name: dotnet restore
run: dotnet restore
# ── Run .NET tests
- name: dotnet test
env:
PROJECT_ENDPOINT: ${{ secrets.PROJECT_ENDPOINT }}
MODEL_DEPLOYMENT_NAME: ${{ secrets.MODEL_DEPLOYMENT_NAME }}
run: dotnet test -c Release --verbosity normal