Skip to content

Commit 6eb0127

Browse files
committed
ci: Merge branch 'main' of github.com:fege/opendatahub-tests into python_required
2 parents f60abf3 + ac5a130 commit 6eb0127

9 files changed

Lines changed: 252 additions & 89 deletions

File tree

.github/workflows/add-remove-labels.yml

Lines changed: 6 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -11,12 +11,6 @@ on:
1111

1212
issue_comment:
1313
types: [created, edited, deleted]
14-
# I don't believe the conditional is supported here
15-
# if: |
16-
# contains(github.event.comment.body, '/wip') ||
17-
# contains(github.event.comment.body, '/verified') ||
18-
# contains(github.event.comment.body, '/lgtm') ||
19-
# contains(github.event.comment.body, '/hold')
2014

2115

2216
permissions:
@@ -26,6 +20,12 @@ permissions:
2620

2721
jobs:
2822
add-remove-labels:
23+
if: |
24+
contains(github.event.comment.body, '/wip') ||
25+
contains(github.event.comment.body, '/verified') ||
26+
contains(github.event.comment.body, '/lgtm') ||
27+
contains(github.event.comment.body, '/hold') ||
28+
contains(github.event.comment.body, '/cherry-pick')
2929
runs-on: ubuntu-latest
3030

3131
steps:
@@ -36,27 +36,6 @@ jobs:
3636
comment-id: ${{ github.event.comment.id }}
3737
reactions: '+1'
3838

39-
# This currently fails with either the bot PAT or the standard github token secret
40-
# gh: Insufficient scopes for reacting to this Pull Request Review Comment. (HTTP 403)
41-
# {"message":"Insufficient scopes for reacting to this Pull Request Review Comment.","documentation_url":"https://docs.github.com/rest/reactions/reactions#create-reaction-for-a-pull-request-review-comment","status":"403"}
42-
# It could work if we had a token with the proper permissions.
43-
# See https://github.com/peter-evans/create-or-update-comment/issues/392 for why the action above doesn't work.
44-
# Confirmed as a bug, see: https://github.com/github/docs/issues/36899
45-
# - name: Acknowledge the review with thumbs up reaction
46-
# if: ${{ github.event.review }}
47-
# env:
48-
# GH_TOKEN: ${{ secrets.OPENDATAHUB_TESTS_BOT_PAT }}
49-
# REVIEW_COMMENT_ID: ${{ github.event.review.id }}
50-
# REPO_NAME: ${{ github.event.repository.name }}
51-
# REPO_OWNER: ${{ github.event.repository.owner.login }}
52-
# run: |
53-
# gh api \
54-
# --method POST \
55-
# -H "Accept: application/vnd.github+json" \
56-
# -H "X-GitHub-Api-Version: 2022-11-28" \
57-
# /repos/$REPO_OWNER/$REPO_NAME/pulls/comments/$REVIEW_COMMENT_ID/reactions \
58-
# -f "content=+1"
59-
6039
- uses: actions/checkout@v4
6140

6241
- name: Install uv
Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
name: Cherry Pick On Comment
2+
on:
3+
issue_comment:
4+
types: [created]
5+
jobs:
6+
cherry-pick:
7+
name: Cherry Pick
8+
if: |
9+
github.event.issue.pull_request != '' &&
10+
contains(github.event.comment.body, '/cherry-pick') &&
11+
((github.event.pull_request.author_association != 'NONE') &&
12+
(github.event.pull_request.author_association != 'MANNEQUIN') &&
13+
(github.event.pull_request.author_association != 'FIRST_TIMER') &&
14+
(github.event.pull_request.author_association != 'FIRST_TIME_CONTRIBUTOR'))
15+
runs-on: ubuntu-latest
16+
steps:
17+
- name: Checkout the latest code
18+
uses: actions/checkout@v4
19+
with:
20+
token: ${{ secrets.OPENDATAHUB_TESTS_BOT_PAT }}
21+
fetch-depth: 0 # otherwise, you will fail to push refs to dest repo
22+
- name: Automatic Cherry Pick
23+
uses: dbasunag/cherry-pick-pr@master
24+
env:
25+
GITHUB_TOKEN: ${{ secrets.OPENDATAHUB_TESTS_BOT_PAT }}

.github/workflows/scripts/constants.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,8 @@
3535
`lgtm` label removed on each new commit push.
3636
* To mark PR as verified comment `/verified` to the PR, to un-verify comment `/verified cancel` to the PR.
3737
`verified` label removed on each new commit push.
38+
* To Cherry-pick a merged PR `/cherry-pick <target_branch_name>` to the PR. If <target_branch_name> is valid,
39+
and the current PR is merged, a cherry-picked PR would be created and linked to the current PR.
3840
3941
<details>
4042
<summary>Supported labels</summary>
Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
from typing import Generator, Any
2+
3+
import pytest
4+
from kubernetes.dynamic import DynamicClient
5+
from ocp_resources.namespace import Namespace
6+
from ocp_resources.persistent_volume_claim import PersistentVolumeClaim
7+
8+
9+
@pytest.fixture(scope="class")
10+
def pvc_minio_namespace(
11+
admin_client: DynamicClient, minio_namespace: Namespace
12+
) -> Generator[PersistentVolumeClaim, Any, Any]:
13+
with PersistentVolumeClaim(
14+
client=admin_client,
15+
name="minio-pvc",
16+
namespace=minio_namespace.name,
17+
accessmodes=PersistentVolumeClaim.AccessMode.RWO,
18+
volume_mode=PersistentVolumeClaim.VolumeMode.FILE,
19+
size="10Gi",
20+
) as pvc:
21+
yield pvc

tests/model_explainability/guardrails/conftest.py

Lines changed: 2 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -209,7 +209,7 @@ def vllm_gateway_config(admin_client: DynamicClient, model_namespace: Namespace)
209209
def minio_llm_deployment(
210210
admin_client: DynamicClient,
211211
minio_namespace: Namespace,
212-
llm_models_pvc: PersistentVolumeClaim,
212+
pvc_minio_namespace: PersistentVolumeClaim,
213213
) -> Generator[Deployment, Any, Any]:
214214
with Deployment(
215215
client=admin_client,
@@ -229,7 +229,7 @@ def minio_llm_deployment(
229229
"volumes": [
230230
{
231231
"name": "model-volume",
232-
"persistentVolumeClaim": {"claimName": "llm-models-claim"},
232+
"persistentVolumeClaim": {"claimName": pvc_minio_namespace.name},
233233
}
234234
],
235235
"initContainers": [
@@ -282,18 +282,3 @@ def minio_llm_deployment(
282282
) as deployment:
283283
deployment.wait_for_replicas(timeout=Timeout.TIMEOUT_10MIN)
284284
yield deployment
285-
286-
287-
@pytest.fixture(scope="class")
288-
def llm_models_pvc(
289-
admin_client: DynamicClient, minio_namespace: Namespace
290-
) -> Generator[PersistentVolumeClaim, Any, Any]:
291-
with PersistentVolumeClaim(
292-
client=admin_client,
293-
name="llm-models-claim",
294-
namespace=minio_namespace.name,
295-
accessmodes=PersistentVolumeClaim.AccessMode.RWO,
296-
volume_mode=PersistentVolumeClaim.VolumeMode.FILE,
297-
size="10Gi",
298-
) as pvc:
299-
yield pvc

tests/model_explainability/lm_eval/conftest.py

Lines changed: 136 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22

33
import pytest
44
from ocp_resources.route import Route
5+
from ocp_resources.secret import Secret
56
from ocp_resources.service import Service
67
from pytest import FixtureRequest
78
from kubernetes.dynamic import DynamicClient
@@ -14,27 +15,14 @@
1415
from ocp_resources.resource import ResourceEditor
1516
from pytest_testconfig import py_config
1617

17-
from utilities.constants import Labels, Timeout, Annotations, Protocols
18+
from tests.model_explainability.lm_eval.utils import get_lmevaljob_pod
19+
from utilities.constants import Labels, Timeout, Annotations, Protocols, MinIo
1820

1921
VLLM_EMULATOR: str = "vllm-emulator"
2022
VLLM_EMULATOR_PORT: int = 8000
2123
LMEVALJOB_NAME: str = "lmeval-test-job"
2224

2325

24-
@pytest.fixture(scope="function")
25-
def lmevaljob_hf_pod(admin_client: DynamicClient, lmevaljob_hf: LMEvalJob) -> Generator[Pod, Any, Any]:
26-
lmeval_pod = Pod(
27-
client=admin_client,
28-
namespace=lmevaljob_hf.namespace,
29-
name=lmevaljob_hf.name,
30-
)
31-
32-
# TODO: Check if we can rely on LMEvalJob instead of pod
33-
lmeval_pod.wait(timeout=Timeout.TIMEOUT_2MIN)
34-
35-
yield lmeval_pod
36-
37-
3826
@pytest.fixture(scope="function")
3927
def lmevaljob_hf(
4028
admin_client: DynamicClient, model_namespace: Namespace, patched_trustyai_operator_configmap_allow_online: ConfigMap
@@ -107,22 +95,6 @@ def lmevaljob_local_offline(
10795
yield job
10896

10997

110-
@pytest.fixture(scope="function")
111-
def lmevaljob_vllm_emulator_pod(
112-
admin_client: DynamicClient, lmevaljob_vllm_emulator: LMEvalJob
113-
) -> Generator[Pod, Any, Any]:
114-
lmeval_pod = Pod(
115-
client=admin_client,
116-
namespace=lmevaljob_vllm_emulator.namespace,
117-
name=lmevaljob_vllm_emulator.name,
118-
)
119-
120-
# TODO: Check if we can rely on LMEvalJob instead of pod
121-
lmeval_pod.wait(timeout=Timeout.TIMEOUT_2MIN)
122-
123-
yield lmeval_pod
124-
125-
12698
@pytest.fixture(scope="function")
12799
def lmevaljob_vllm_emulator(
128100
admin_client: DynamicClient,
@@ -307,3 +279,136 @@ def vllm_emulator_route(
307279
service=vllm_emulator_service.name,
308280
) as route:
309281
yield route
282+
283+
284+
@pytest.fixture(scope="function")
285+
def lmeval_minio_deployment(
286+
admin_client: DynamicClient, minio_namespace: Namespace, pvc_minio_namespace: PersistentVolumeClaim
287+
) -> Generator[Deployment, Any, Any]:
288+
minio_app_label = {"app": MinIo.Metadata.NAME}
289+
# TODO: Unify with minio_llm_deployment fixture once datasets and models are in new model image
290+
with Deployment(
291+
client=admin_client,
292+
name=MinIo.Metadata.NAME,
293+
namespace=minio_namespace.name,
294+
replicas=1,
295+
selector={"matchLabels": minio_app_label},
296+
template={
297+
"metadata": {"labels": minio_app_label},
298+
"spec": {
299+
"volumes": [
300+
{"name": "minio-storage", "persistentVolumeClaim": {"claimName": pvc_minio_namespace.name}}
301+
],
302+
"containers": [
303+
{
304+
"name": MinIo.Metadata.NAME,
305+
"image": "quay.io/minio/minio"
306+
"@sha256:46b3009bf7041eefbd90bd0d2b38c6ddc24d20a35d609551a1802c558c1c958f",
307+
"args": ["server", "/data", "--console-address", ":9001"],
308+
"env": [
309+
{"name": "MINIO_ROOT_USER", "value": MinIo.Credentials.ACCESS_KEY_VALUE},
310+
{"name": "MINIO_ROOT_PASSWORD", "value": MinIo.Credentials.SECRET_KEY_VALUE},
311+
],
312+
"ports": [{"containerPort": MinIo.Metadata.DEFAULT_PORT}, {"containerPort": 9001}],
313+
"volumeMounts": [{"name": "minio-storage", "mountPath": "/data"}],
314+
}
315+
],
316+
},
317+
},
318+
label=minio_app_label,
319+
wait_for_resource=True,
320+
) as deployment:
321+
deployment.wait_for_replicas(timeout=Timeout.TIMEOUT_10MIN)
322+
yield deployment
323+
324+
325+
@pytest.fixture(scope="function")
326+
def lmeval_minio_copy_pod(
327+
admin_client: DynamicClient, minio_namespace: Namespace, lmeval_minio_deployment: Deployment, minio_service: Service
328+
) -> Generator[Pod, Any, Any]:
329+
with Pod(
330+
client=admin_client,
331+
name="copy-to-minio",
332+
namespace=minio_namespace.name,
333+
restart_policy="Never",
334+
volumes=[{"name": "shared-data", "emptyDir": {}}],
335+
init_containers=[
336+
{
337+
"name": "copy-data",
338+
"image": "quay.io/trustyai_testing/lmeval-assets-flan-arceasy"
339+
"@sha256:11cc9c2f38ac9cc26c4fab1a01a8c02db81c8f4801b5d2b2b90f90f91b97ac98",
340+
"command": ["/bin/sh", "-c"],
341+
"args": ["cp -r /mnt/data /shared"],
342+
"volumeMounts": [{"name": "shared-data", "mountPath": "/shared"}],
343+
}
344+
],
345+
containers=[
346+
{
347+
"name": "minio-uploader",
348+
"image": "quay.io/minio/mc@sha256:470f5546b596e16c7816b9c3fa7a78ce4076bb73c2c73f7faeec0c8043923123",
349+
"command": ["/bin/sh", "-c"],
350+
"args": [
351+
f"mc alias set myminio http://{minio_service.name}:{MinIo.Metadata.DEFAULT_PORT} "
352+
f"{MinIo.Credentials.ACCESS_KEY_VALUE} {MinIo.Credentials.SECRET_KEY_VALUE} &&\n"
353+
"mc mb --ignore-existing myminio/models &&\n"
354+
"mc cp --recursive /shared/data/ myminio/models"
355+
],
356+
"volumeMounts": [{"name": "shared-data", "mountPath": "/shared"}],
357+
}
358+
],
359+
wait_for_resource=True,
360+
) as pod:
361+
pod.wait_for_status(status=Pod.Status.SUCCEEDED)
362+
yield pod
363+
364+
365+
@pytest.fixture(scope="function")
366+
def lmevaljob_s3_offline(
367+
admin_client: DynamicClient,
368+
model_namespace: Namespace,
369+
lmeval_minio_deployment: Deployment,
370+
minio_service: Service,
371+
lmeval_minio_copy_pod: Pod,
372+
minio_data_connection: Secret,
373+
) -> Generator[LMEvalJob, Any, Any]:
374+
with LMEvalJob(
375+
client=admin_client,
376+
name="evaljob-sample",
377+
namespace=model_namespace.name,
378+
model="hf",
379+
model_args=[{"name": "pretrained", "value": "/opt/app-root/src/hf_home/flan"}],
380+
task_list={"taskNames": ["arc_easy"]},
381+
log_samples=True,
382+
allow_online=False,
383+
offline={
384+
"storage": {
385+
"s3": {
386+
"accessKeyId": {"name": minio_data_connection.name, "key": "AWS_ACCESS_KEY_ID"},
387+
"secretAccessKey": {"name": minio_data_connection.name, "key": "AWS_SECRET_ACCESS_KEY"},
388+
"bucket": {"name": minio_data_connection.name, "key": "AWS_S3_BUCKET"},
389+
"endpoint": {"name": minio_data_connection.name, "key": "AWS_S3_ENDPOINT"},
390+
"region": {"name": minio_data_connection.name, "key": "AWS_DEFAULT_REGION"},
391+
"path": "",
392+
"verifySSL": False,
393+
}
394+
}
395+
},
396+
) as job:
397+
yield job
398+
399+
400+
@pytest.fixture(scope="function")
401+
def lmevaljob_hf_pod(admin_client: DynamicClient, lmevaljob_hf: LMEvalJob) -> Generator[Pod, Any, Any]:
402+
yield get_lmevaljob_pod(client=admin_client, lmevaljob=lmevaljob_hf)
403+
404+
405+
@pytest.fixture(scope="function")
406+
def lmevaljob_vllm_emulator_pod(
407+
admin_client: DynamicClient, lmevaljob_vllm_emulator: LMEvalJob
408+
) -> Generator[Pod, Any, Any]:
409+
yield get_lmevaljob_pod(client=admin_client, lmevaljob=lmevaljob_vllm_emulator)
410+
411+
412+
@pytest.fixture(scope="function")
413+
def lmevaljob_s3_offline_pod(admin_client: DynamicClient, lmevaljob_s3_offline: LMEvalJob) -> Generator[Pod, Any, Any]:
414+
yield get_lmevaljob_pod(client=admin_client, lmevaljob=lmevaljob_s3_offline)

tests/model_explainability/lm_eval/test_lm_eval.py

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,8 @@
33
from tests.model_explainability.lm_eval.utils import verify_lmevaljob_running
44
from utilities.constants import Timeout
55

6+
LMEVALJOB_COMPLETE_STATE: str = "Complete"
7+
68

79
@pytest.mark.parametrize(
810
"model_namespace",
@@ -90,3 +92,24 @@ def test_lmeval_vllm_emulator(admin_client, model_namespace, lmevaljob_vllm_emul
9092
lmevaljob_vllm_emulator_pod.wait_for_status(
9193
status=lmevaljob_vllm_emulator_pod.Status.SUCCEEDED, timeout=Timeout.TIMEOUT_10MIN
9294
)
95+
96+
97+
@pytest.mark.parametrize(
98+
"model_namespace, minio_data_connection",
99+
[
100+
pytest.param(
101+
{"name": "test-s3-lmeval"},
102+
{"bucket": "models"},
103+
)
104+
],
105+
indirect=True,
106+
)
107+
def test_lmeval_s3_storage(
108+
admin_client,
109+
model_namespace,
110+
lmevaljob_s3_offline_pod,
111+
):
112+
"""Test to verify that LMEval works with a model stored in a S3 bucket"""
113+
lmevaljob_s3_offline_pod.wait_for_status(
114+
status=lmevaljob_s3_offline_pod.Status.SUCCEEDED, timeout=Timeout.TIMEOUT_10MIN
115+
)

0 commit comments

Comments
 (0)