Skip to content

Drop unsupported params for litellm models (#20045) #158

Drop unsupported params for litellm models (#20045)

Drop unsupported params for litellm models (#20045) #158

Workflow file for this run

name: MLflow tests
on:
pull_request:
types:
- opened
- synchronize
- reopened
- ready_for_review
push:
branches:
- master
- branch-[0-9]+.[0-9]+
concurrency:
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.ref }}
cancel-in-progress: true
# Use `bash` by default for all `run` steps in this workflow:
# https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#defaultsrun
defaults:
run:
shell: bash
env:
MLFLOW_HOME: ${{ github.workspace }}
# Note miniconda is pre-installed in the virtual environments for GitHub Actions:
# https://github.com/actions/virtual-environments/blob/main/images/linux/scripts/installers/miniconda.sh
MLFLOW_CONDA_HOME: /usr/share/miniconda
SPARK_LOCAL_IP: localhost
PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/cpu
PIP_CONSTRAINT: ${{ github.workspace }}/requirements/constraints.txt
PYTHONUTF8: "1"
_MLFLOW_TESTING_TELEMETRY: "true"
MLFLOW_SERVER_ENABLE_JOB_EXECUTION: "false"
jobs:
# python-skinny tests cover a subset of mlflow functionality
# that is meant to be supported with a smaller dependency footprint.
# The python skinny tests cover the subset of mlflow functionality
# while also verifying certain dependencies are omitted.
python-skinny:
if: github.event_name != 'pull_request' || github.event.pull_request.draft == false
runs-on: ubuntu-latest
timeout-minutes: 30
permissions:
contents: read
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: ./.github/actions/untracked
- uses: ./.github/actions/setup-python
- name: Install dependencies
run: |
source ./dev/install-common-deps.sh --skinny
- name: Run tests
run: |
./dev/run-python-skinny-tests.sh
python:
if: github.event_name != 'pull_request' || github.event.pull_request.draft == false
runs-on: ubuntu-latest
timeout-minutes: 120
permissions:
contents: read
strategy:
fail-fast: false
matrix:
group: [1, 2, 3, 4]
include:
- splits: 4
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: ./.github/actions/untracked
- uses: ./.github/actions/free-disk-space
- uses: ./.github/actions/setup-python
- uses: ./.github/actions/setup-pyenv
- uses: ./.github/actions/setup-java
- name: Install dependencies
run: |
uv sync --extra extras --extra gateway --extra mcp --extra genai
uv pip install \
-r requirements/test-requirements.txt \
-r requirements/extra-ml-requirements.txt
- uses: ./.github/actions/show-versions
- uses: ./.github/actions/pipdeptree
- name: Import check
run: |
# `-I` is used to avoid importing modules from user-specific site-packages
# that might conflict with the built-in modules (e.g. `types`).
uv run --no-sync python -I tests/check_mlflow_lazily_imports_ml_packages.py
- name: Run tests
run: |
source dev/setup-ssh.sh
uv run --no-sync pytest --splits=${{ matrix.splits }} --group=${{ matrix.group }} \
--quiet --requires-ssh --ignore-flavors \
--ignore=tests/examples \
--ignore=tests/evaluate \
--ignore=tests/genai \
tests
- name: Run databricks-connect related tests
run: |
# this needs to be run in a separate job because installing databricks-connect could break other
# tests that uses normal SparkSession instead of remote SparkSession
uv run --no-sync --with 'databricks-agents,databricks-connect' \
pytest tests/utils/test_requirements_utils.py::test_infer_pip_requirements_on_databricks_agents
database:
if: github.event_name != 'pull_request' || github.event.pull_request.draft == false
runs-on: ubuntu-latest
timeout-minutes: 90
permissions:
contents: read
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: ./.github/actions/untracked
- name: Build
run: |
./tests/db/compose.sh pull -q postgresql mysql mssql
docker images --digests
./tests/db/compose.sh build --build-arg DEPENDENCIES="$(python dev/extract_deps.py)"
- name: Run tests
run: |
set +e
err=0
trap 'err=1' ERR
RESULTS=""
for service in $(./tests/db/compose.sh config --services | grep '^mlflow-' | sort)
do
# Set `--no-TTY` to show container logs on GitHub Actions:
# https://github.com/actions/virtual-environments/issues/5022
./tests/db/compose.sh run --rm --no-TTY $service pytest \
tests/store/tracking/test_sqlalchemy_store.py \
tests/store/tracking/test_sqlalchemy_store_query_trace_metrics.py \
tests/store/tracking/test_gateway_sql_store.py \
tests/store/model_registry/test_sqlalchemy_store.py \
tests/db
RESULTS="$RESULTS\n$service: $(if [ $? -eq 0 ]; then echo "✅"; else echo "❌"; fi)"
done
echo -e "$RESULTS"
test $err = 0
- name: Run migration check
run: |
set +e
err=0
trap 'err=1' ERR
./tests/db/compose.sh down --volumes --remove-orphans
for service in $(./tests/db/compose.sh config --services | grep '^migration-')
do
./tests/db/compose.sh run --rm --no-TTY $service
done
test $err = 0
- name: Clean up
run: |
./tests/db/compose.sh down --volumes --remove-orphans --rmi all
java:
if: github.event_name != 'pull_request' || github.event.pull_request.draft == false
runs-on: ubuntu-latest
timeout-minutes: 30
permissions:
contents: read
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: ./.github/actions/untracked
- uses: ./.github/actions/setup-python
- uses: ./.github/actions/setup-java
with:
java-version: 11
- name: Install dependencies
run: |
source ./dev/install-common-deps.sh
- uses: ./.github/actions/show-versions
- uses: ./.github/actions/pipdeptree
- name: Run tests
run: |
cd mlflow/java
mvn clean package -q
flavors:
if: github.event_name != 'pull_request' || github.event.pull_request.draft == false
runs-on: ubuntu-latest
timeout-minutes: 120
permissions:
contents: read
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: ./.github/actions/untracked
- uses: ./.github/actions/setup-python
- uses: ./.github/actions/setup-pyenv
- uses: ./.github/actions/setup-java
- name: Install dependencies
run: |
uv sync --extra extras
uv pip install \
-r requirements/test-requirements.txt \
-r requirements/extra-ml-requirements.txt
- uses: ./.github/actions/show-versions
- uses: ./.github/actions/pipdeptree
- name: Run tests
run: |
uv run --no-sync pytest \
tests/tracking/fluent/test_fluent_autolog.py \
tests/autologging
models:
if: github.event_name != 'pull_request' || github.event.pull_request.draft == false
runs-on: ubuntu-latest
timeout-minutes: 120
permissions:
contents: read
strategy:
fail-fast: false
matrix:
group: [1, 2, 3]
include:
- splits: 3
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: ./.github/actions/untracked
- uses: ./.github/actions/free-disk-space
- uses: ./.github/actions/setup-python
- uses: ./.github/actions/setup-pyenv
- uses: ./.github/actions/setup-java
- name: Install dependencies
run: |
uv sync --extra mlserver
uv pip install \
-r requirements/test-requirements.txt \
pyspark langchain langchain-community
- uses: ./.github/actions/show-versions
- uses: ./.github/actions/pipdeptree
- name: Run tests
run: |
uv run --no-sync pytest --splits=${{ matrix.splits }} --group=${{ matrix.group }} tests/models
evaluate:
if: github.event_name != 'pull_request' || github.event.pull_request.draft == false
runs-on: ubuntu-latest
timeout-minutes: 90
permissions:
contents: read
strategy:
fail-fast: false
matrix:
group: [1]
include:
- splits: 1
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: ./.github/actions/untracked
- uses: ./.github/actions/setup-python
- uses: ./.github/actions/setup-pyenv
- uses: ./.github/actions/setup-java
- name: Install dependencies
run: |
uv sync --extra extras --extra genai
uv pip install \
-r requirements/test-requirements.txt \
torch transformers pyspark langchain langchain-experimental 'shap<0.47.0' lightgbm xgboost
- uses: ./.github/actions/show-versions
- uses: ./.github/actions/pipdeptree
- name: Run tests
run: |
uv run --no-sync pytest --splits=${{ matrix.splits }} --group=${{ matrix.group }} tests/evaluate --ignore=tests/evaluate/test_default_evaluator_delta.py
- name: Run tests with delta
run: |
uv run --no-sync pytest tests/evaluate/test_default_evaluator_delta.py
genai:
if: github.event_name != 'pull_request' || github.event.pull_request.draft == false
runs-on: ubuntu-latest
timeout-minutes: 30
permissions:
contents: read
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: ./.github/actions/untracked
- uses: ./.github/actions/setup-python
- uses: ./.github/actions/setup-pyenv
- uses: ./.github/actions/setup-java
- name: Install dependencies
run: |
uv sync --extra genai
uv pip install \
-r requirements/test-requirements.txt \
deepeval ragas arize-phoenix-evals
- uses: ./.github/actions/show-versions
- uses: ./.github/actions/pipdeptree
- name: Run GenAI Tests (OSS)
run: |
uv run --no-sync pytest tests/genai
- name: Run GenAI Tests (Databricks)
run: |
uv run --no-sync --with databricks-agents \
pytest tests/genai --ignore tests/genai/test_genai_import_without_agent_sdk.py \
--ignore tests/genai/optimize --ignore tests/genai/prompts
pyfunc:
if: github.event_name != 'pull_request' || github.event.pull_request.draft == false
runs-on: ubuntu-latest
timeout-minutes: 120
permissions:
contents: read
strategy:
fail-fast: false
matrix:
group: [1, 2, 3, 4]
include:
- splits: 4
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: ./.github/actions/untracked
- uses: ./.github/actions/free-disk-space
- uses: ./.github/actions/setup-python
- uses: ./.github/actions/setup-pyenv
- uses: ./.github/actions/setup-java
- name: Install dependencies
run: |
uv sync --extra extras --extra gateway
uv pip install \
-r requirements/test-requirements.txt \
tensorflow 'pyspark[connect]'
- uses: ./.github/actions/show-versions
- uses: ./.github/actions/pipdeptree
- name: Run tests
run: |
uv run --no-sync pytest --splits=${{ matrix.splits }} --group=${{ matrix.group }} --durations=30 \
tests/pyfunc tests/types --ignore tests/pyfunc/test_spark_connect.py
# test_spark_connect.py fails if it's run with other tests, so run it separately.
uv run --no-sync pytest tests/pyfunc/test_spark_connect.py
windows:
if: github.event_name != 'pull_request' || github.event.pull_request.draft == false
runs-on: windows-latest
timeout-minutes: 120
permissions:
contents: read
strategy:
fail-fast: false
matrix:
group: [1, 2, 3]
include:
- splits: 3
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: ./.github/actions/untracked
- uses: ./.github/actions/setup-python
- uses: ./.github/actions/setup-pyenv
- uses: ./.github/actions/setup-java
- name: Install python dependencies
run: |
uv sync --extra extras --extra genai --extra mcp
uv pip install \
-r requirements/test-requirements.txt \
pyspark datasets tensorflow torch transformers tf-keras openai \
tests/resources/mlflow-test-plugin
- uses: ./.github/actions/show-versions
- uses: ./.github/actions/pipdeptree
- name: Download Hadoop winutils for Spark
run: |
git clone https://github.com/cdarlint/winutils /tmp/winutils
- name: Run python tests
env:
# Starting from SQLAlchemy version 2.0, `QueuePool` is the default connection pool
# when creating an `Engine`. `QueuePool` prevents the removal of temporary database
# files created during tests on Windows as it keeps the DB connection open until
# it's explicitly disposed.
MLFLOW_SQLALCHEMYSTORE_POOLCLASS: "NullPool"
# Set Hadoop environment variables required for testing Spark integrations on Windows
HADOOP_HOME: /tmp/winutils/hadoop-3.2.2
run: |
export PATH=$PATH:$HADOOP_HOME/bin
uv run --no-sync pytest tests \
--splits=${{ matrix.splits }} \
--group=${{ matrix.group }} \
--ignore-flavors \
--ignore=tests/projects \
--ignore=tests/examples \
--ignore=tests/evaluate \
--ignore=tests/optuna \
--ignore=tests/pyspark/optuna \
--ignore=tests/genai \
--ignore=tests/sagemaker \
--ignore=tests/gateway \
--ignore=tests/server/auth \
--ignore=tests/data/test_spark_dataset.py