1414run-name : >-
1515 ${{ github.event_name == 'repository_dispatch'
1616 && format(
17- 'PR #{0} - Label {1} - {2}',
17+ 'GPU Test - PR #{0} - {1} - {2}',
1818 github.event.client_payload.pull_number,
1919 github.event.client_payload.ci_label,
2020 github.event.client_payload.correlation_id
6969 - name : Upload dependencies artifact
7070 uses : actions/upload-artifact@v4
7171 with :
72- name : dependencies-${{ matrix.python-version }}-${{ matrix.setup-script }}
72+ name : dependencies-tests-full- ${{ matrix.python-version }}-${{ matrix.setup-script }}
7373 path : requirements-freeze.txt
7474 compression-level : 0
7575
@@ -95,3 +95,177 @@ jobs:
9595 PYTEST_ADDOPTS : " --color=yes"
9696 OPENAI_BASE_URL : http://localhost:12306/
9797 OPENAI_API_KEY : dummy
98+
99+ minimal-examples :
100+ if : >
101+ github.event_name != 'repository_dispatch' ||
102+ github.event.action == 'ci-gpu' ||
103+ github.event.action == 'ci-all'
104+ name : Minimal Examples with Python ${{ matrix.python-version }} (${{ matrix.setup-script }})
105+
106+ runs-on : [self-hosted, 1ES.Pool=agl-runner-gpu]
107+ timeout-minutes : 30
108+ strategy :
109+ matrix :
110+ include :
111+ - python-version : ' 3.10'
112+ setup-script : ' legacy'
113+ - python-version : ' 3.12'
114+ setup-script : ' stable'
115+ - python-version : ' 3.13'
116+ setup-script : ' latest'
117+ fail-fast : false
118+ steps :
119+ - name : Check GPU status
120+ run : nvidia-smi
121+ - uses : actions/checkout@v4
122+ with :
123+ ref : ${{ github.event_name == 'repository_dispatch' && github.event.client_payload.pr_ref || (github.event.pull_request.number && format('refs/pull/{0}/merge', github.event.pull_request.number)) || github.ref }}
124+ - uses : astral-sh/setup-uv@v7
125+ with :
126+ enable-cache : true
127+ python-version : ${{ matrix.python-version }}
128+ - name : Upgrade dependencies (latest)
129+ run : uv lock --upgrade
130+ if : matrix.setup-script == 'latest'
131+ - name : Sync dependencies (latest)
132+ run : uv sync --frozen --no-default-groups --extra apo --group dev --group agents --group torch-gpu-stable
133+ if : matrix.setup-script == 'latest'
134+ - name : Sync dependencies (stable & legacy)
135+ run : uv sync --frozen --no-default-groups --extra apo --group dev --group agents --group torch-gpu-${{ matrix.setup-script }}
136+ if : matrix.setup-script != 'latest'
137+ - name : Freeze dependencies
138+ run : |
139+ set -ex
140+ uv pip freeze | tee requirements-freeze.txt
141+ echo "UV_LOCKED=1" >> $GITHUB_ENV
142+ echo "UV_NO_SYNC=1" >> $GITHUB_ENV
143+ - name : Upload dependencies artifact
144+ uses : actions/upload-artifact@v4
145+ with :
146+ name : dependencies-minimal-examples-${{ matrix.python-version }}-${{ matrix.setup-script }}
147+ path : requirements-freeze.txt
148+ compression-level : 0
149+
150+ - name : Launch LiteLLM Proxy
151+ run : |
152+ ./scripts/litellm_run.sh
153+ env :
154+ AZURE_API_BASE : ${{ secrets.AZURE_GROUP_SUBSCRIPTION_API_BASE }}
155+ AZURE_API_KEY : ${{ secrets.AZURE_GROUP_SUBSCRIPTION_API_KEY }}
156+
157+ - name : Write Traces via Otel Tracer
158+ run : |
159+ set -euo pipefail
160+ source .venv/bin/activate
161+ cd examples/minimal
162+ python write_traces.py otel
163+
164+ - name : Write Traces via AgentOps Tracer
165+ env :
166+ OPENAI_BASE_URL : http://localhost:12306/
167+ OPENAI_API_KEY : dummy
168+ run : |
169+ set -euo pipefail
170+ source .venv/bin/activate
171+ cd examples/minimal
172+ python write_traces.py agentops
173+
174+ - name : Write Traces via Otel Tracer with Client
175+ run : |
176+ set -euo pipefail
177+ source .venv/bin/activate
178+ cd examples/minimal
179+ agl store --port 45993 --log-level DEBUG &
180+ sleep 5
181+ python write_traces.py otel --use-client
182+ pkill -f agl && echo "SIGTERM sent to agl" || echo "No agl process found"
183+ while pgrep -f agl; do
184+ echo "Waiting for agl to finish..."
185+ sleep 5
186+ done
187+
188+ - name : Write Traces via AgentOps Tracer with Client
189+ env :
190+ OPENAI_BASE_URL : http://localhost:12306/
191+ OPENAI_API_KEY : dummy
192+ run : |
193+ set -euo pipefail
194+ source .venv/bin/activate
195+ cd examples/minimal
196+ agl store --port 45993 --log-level DEBUG &
197+ sleep 5
198+ python write_traces.py agentops --use-client
199+ pkill -f agl && echo "SIGTERM sent to agl" || echo "No agl process found"
200+ while pgrep -f agl; do
201+ echo "Waiting for agl to finish..."
202+ sleep 5
203+ done
204+
205+ - name : vLLM Server
206+ run : |
207+ set -euo pipefail
208+ source .venv/bin/activate
209+ cd examples/minimal
210+ python vllm_server.py Qwen/Qwen2.5-0.5B-Instruct
211+
212+ - name : LLM Proxy (OpenAI backend)
213+ env :
214+ OPENAI_API_BASE : http://localhost:12306/
215+ OPENAI_API_KEY : dummy
216+ run : |
217+ set -euo pipefail
218+ source .venv/bin/activate
219+ cd examples/minimal
220+
221+ python llm_proxy.py openai gpt-4.1-mini &
222+
223+ LLM_PROXY_READY=0
224+ for attempt in $(seq 1 30); do
225+ if curl -sSf http://localhost:43886/health > /dev/null 2>&1; then
226+ LLM_PROXY_READY=1
227+ break
228+ fi
229+ sleep 2
230+ done
231+ if [[ "$LLM_PROXY_READY" != "1" ]]; then
232+ echo "LLM proxy failed to become healthy" >&2
233+ exit 1
234+ fi
235+
236+ python llm_proxy.py test gpt-4.1-mini
237+
238+ pkill -f llm_proxy.py && echo "SIGTERM sent to llm_proxy.py" || echo "No llm_proxy.py process found"
239+ while pgrep -f llm_proxy.py; do
240+ echo "Waiting for llm_proxy.py to finish..."
241+ sleep 5
242+ done
243+
244+ - name : LLM Proxy (vLLM backend)
245+ if : matrix.setup-script != 'legacy' # Skip if return_token_ids is not supported
246+ run : |
247+ set -euo pipefail
248+ source .venv/bin/activate
249+ cd examples/minimal
250+ python llm_proxy.py vllm Qwen/Qwen2.5-0.5B-Instruct &
251+
252+ LLM_PROXY_READY=0
253+ for attempt in $(seq 1 30); do
254+ if curl -sSf http://localhost:43886/health > /dev/null 2>&1; then
255+ LLM_PROXY_READY=1
256+ break
257+ fi
258+ sleep 2
259+ done
260+ if [[ "$LLM_PROXY_READY" != "1" ]]; then
261+ echo "LLM proxy failed to become healthy" >&2
262+ exit 1
263+ fi
264+
265+ python llm_proxy.py test Qwen/Qwen2.5-0.5B-Instruct
266+
267+ pkill -f llm_proxy.py && echo "SIGTERM sent to llm_proxy.py" || echo "No llm_proxy.py process found"
268+ while pgrep -f llm_proxy.py; do
269+ echo "Waiting for llm_proxy.py to finish..."
270+ sleep 5
271+ done
0 commit comments