Skip to content

build(deps): bump lodash from 4.17.23 to 4.18.1 in /labextension/ui-t… #110

build(deps): bump lodash from 4.17.23 to 4.18.1 in /labextension/ui-t…

build(deps): bump lodash from 4.17.23 to 4.18.1 in /labextension/ui-t… #110

Workflow file for this run

name: E2E Test
description: >
Installs Kale and runs example notebook on KFP to verify end-to-end workflow.
NOTE: This test may fail if -
uprgrading kfp version in Kale can break e2e test behavior, need to update KFP_VERSION in env var accordingly.
iris.ipynb (example notebook) - updates to this example notebook can break the artifact checks
on:
push:
branches:
- main
pull_request:
# for testing
workflow_dispatch:
env:
KFP_VERSION: "2.16.0"
PYTHON_VERSION: "3.12"
jobs:
test:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- name: Kubernetes KinD Cluster
uses: helm/kind-action@dda0770415bac9fc20092cacbc54aa298604d140 # v1.8.0
with:
version: v0.20.0
cluster_name: kind-kale-test
- name: Create kubeflow namespace with PSS restricted
run: |
kubectl create namespace kubeflow
kubectl label namespace kubeflow \
pod-security.kubernetes.io/enforce=restricted \
pod-security.kubernetes.io/enforce-version=latest \
--overwrite
- name: Verify PSS restricted policy rejects root pods (regression test)
run: |
OUTPUT=$(kubectl run root-test --image=busybox --restart=Never -n kubeflow \
--overrides='{"spec":{"containers":[{"name":"root-test","image":"busybox","command":["sleep","10"],"securityContext":{"runAsUser":0,"runAsNonRoot":false}}]}}' \
2>&1 || true)
if echo "$OUTPUT" | grep -qiE "Error|forbidden|violates|denied"; then
echo "OK: Root pod correctly rejected by PSS restricted policy"
else
echo "ERROR: Root pod was NOT rejected by PSS restricted policy!"
echo "Output: $OUTPUT"
exit 1
fi
- name: Install KFP
run: |
export PIPELINE_VERSION=${{ env.KFP_VERSION }}
kubectl apply -k "github.com/kubeflow/pipelines/manifests/kustomize/cluster-scoped-resources?ref=$PIPELINE_VERSION"
kubectl wait --for condition=established --timeout=60s crd/applications.app.k8s.io
kubectl apply -k "github.com/kubeflow/pipelines/manifests/kustomize/env/platform-agnostic?ref=$PIPELINE_VERSION"
sleep 10
- name: Wait for KFP to be Ready
run: |
set -e
kubectl rollout status deployment/ml-pipeline-ui -n kubeflow --timeout=10m
kubectl rollout status deployment/ml-pipeline -n kubeflow --timeout=10m
kubectl rollout status deployment/seaweedfs -n kubeflow --timeout=10m
- name: Debug kubeflow deployments
if: failure()
run: kubectl get all -n kubeflow
- name: Port Forward KFP (background)
run: |
nohup kubectl port-forward svc/ml-pipeline-ui 8080:80 -n kubeflow > /dev/null 2>&1 &
sleep 5
- name: Set up Python
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: Set up Node.js
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4
with:
node-version: '22'
- name: Install Kale
run: |
make dev
- name: Set KFP host address for Linux
run: |
HOST_IP=$(ip -4 addr show | grep -oP '(?<=inet\s)\d+(\.\d+){3}' | grep -v '127.0.0.1' | head -n1)
echo "KFP_HOST_ADDR=${HOST_IP}" >> $GITHUB_ENV
- name: Build and serve Kale wheel for KFP
run: |
make kfp-serve &
sleep 5
- name: Run example notebook
run: |
make kfp-run NB=examples/serving/sklearn/iris.ipynb KFP_HOST=http://localhost:8080
- name: Check run result - just basic check for success
run: |
uv run python -c "
import kfp, sys
client = kfp.Client(host='http://localhost:8080')
runs = client.list_runs(page_size=1).runs
if not runs: sys.exit(1)
run = client.wait_for_run_completion(runs[0].run_id, timeout=600)
print(f'Pipeline run status: {run.state}')
if run.state == 'SUCCEEDED':
print('Pipeline run succeeded.')
sys.exit(0)
else:
print(f'Pipeline run failed: {run.state}')
sys.exit(1)
"
- name: Check metrics artifact - with sdk, just that it exists
run: |
uv run python -c "
import kfp, sys
client = kfp.Client(host='http://localhost:8080')
runs = client.list_runs(page_size=1).runs
if not runs:
print('ERROR: no runs found')
sys.exit(1)
run = client.get_run(run_id=runs[0].run_id)
if 'kale_metrics_artifact' not in str(run.pipeline_spec):
print('ERROR: kale_metrics_artifact not found in pipeline spec')
sys.exit(1)
print('OK: kale_metrics_artifact declared in pipeline')
"
- name: Port Forward SeaweedFS S3 (background)
run: |
nohup kubectl port-forward svc/seaweedfs 9000:9000 -n kubeflow > /dev/null 2>&1 &
for i in $(seq 1 30); do nc -z localhost 9000 && break; sleep 1; done
- name: Check artifact generation - done with minio to check size
run: |
uv run --with boto3 python -c "
import boto3, sys
from botocore.client import Config
s3 = boto3.client(
's3',
endpoint_url='http://localhost:9000',
aws_access_key_id='minio',
aws_secret_access_key='minio123',
config=Config(signature_version='s3v4'),
)
# List everything stored to find the real artifact paths
response = s3.list_objects_v2(Bucket='mlpipeline', Prefix='v2/artifacts/')
objects = response.get('Contents', [])
print(f'All artifacts in mlpipeline/v2/artifacts/ ({len(objects)} total):')
for o in objects:
print(f' {o[\"Key\"]} ({o[\"Size\"]} bytes)')
# check for output artifacts and their size
output_artifacts = ['x_trn_output_artifact', 'x_tst_output_artifact',
'y_trn_output_artifact', 'y_tst_output_artifact',
'model_output_artifact']
for art_name in output_artifacts:
hits = [o for o in objects if o['Key'].endswith(f'/{art_name}')]
if not hits:
print(f'ERROR: output artifact {art_name} not found in MinIO')
sys.exit(1)
if hits[0]['Size'] == 0:
print(f'ERROR: output artifact {art_name} is empty')
sys.exit(1)
print(f'OK: output artifact {art_name} ({hits[0][\"Size\"]} bytes)')
"
- name: Log from failed run
if: failure()
run: |
echo "--- LOGS FROM FAILED STEPS ---"
for pod in $(kubectl get pods -n kubeflow --field-selector=status.phase=Failed -o jsonpath='{.items[*].metadata.name}'); do
echo "LOGS FOR POD: $pod "
kubectl logs -n kubeflow $pod -c main || echo "Could not get logs for main container"
echo "DESCRIBE POD: $pod "
kubectl describe pod -n kubeflow $pod
done