Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 24 additions & 0 deletions .github/workflows/tutorials-get-started-notebooks-quickstart.yml
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,15 @@ jobs:
az account get-access-token --scope https://management.azure.com/.default --output none
# ML
az account get-access-token --scope https://ml.azure.com/.default --output none
- name: delete stale online endpoints
run: |
source "${{ github.workspace }}/infra/bootstrapping/sdk_helpers.sh";
source "${{ github.workspace }}/infra/bootstrapping/init_environment.sh";
for ep in $(az ml online-endpoint list --query "[].name" -o tsv | grep "^credit-endpoint-"); do
echo "Deleting stale endpoint: $ep";
az ml online-endpoint delete -n "$ep" -y --no-wait || true;
done
continue-on-error: true
- name: run get-started-notebooks/quickstart.ipynb
run: |
source "${{ github.workspace }}/infra/bootstrapping/sdk_helpers.sh";
Expand All @@ -88,6 +97,21 @@ jobs:
[ -f "../../.azureml/config" ] && cat "../../.azureml/config";
papermill -k python quickstart.ipynb quickstart.output.ipynb
working-directory: tutorials/get-started-notebooks
- name: collect endpoint logs on failure
if: ${{ failure() }}
run: |
source "${{ github.workspace }}/infra/bootstrapping/sdk_helpers.sh";
source "${{ github.workspace }}/infra/bootstrapping/init_environment.sh";
endpoint_name=$(az ml online-endpoint list --query "[?starts_with(name, 'credit-endpoint-')]|sort_by(@,&properties.creation_context.created_at)[-1].name" -o tsv);
if [ -z "$endpoint_name" ]; then
echo "No credit-endpoint-* endpoints found for log collection.";
exit 0;
fi
echo "Collecting logs for endpoint: $endpoint_name";
az ml online-deployment list -e "$endpoint_name" --query "[].name" -o tsv;
az ml online-deployment get -n blue -e "$endpoint_name" --query "{provisioning_state:provisioning_state,status:status,errors:errors}" -o json || true;
az ml online-deployment logs -n blue -e "$endpoint_name" --lines 200 || true;
continue-on-error: true
- name: upload notebook's working folder as an artifact
if: ${{ always() }}
uses: ./.github/actions/upload-artifact
Expand Down
2 changes: 1 addition & 1 deletion infra/scripts/remove_role_assignments.ps1
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ try
Write-Output "Pre-checking the RoleAssignment count..."
Get-RoleAssignmentCount
# Remove only limited RoleDefinitions
$staleRoleAssignments = Get-AzRoleAssignment -ResourceGroupName "$ResourceGroupName" | Where-Object {($_.ObjectType -eq $OBJTYPE) -and ($_.RoleDefinitionName -match "Storage Blob Data Reader|AzureML Metrics Writer \(preview\)|AcrPull")}
$staleRoleAssignments = Get-AzRoleAssignment -ResourceGroupName "$ResourceGroupName" | Where-Object {($_.ObjectType -eq $OBJTYPE) -and ($_.RoleDefinitionName -match "Storage Blob Data Reader|AzureML Metrics Writer \(preview\)|AcrPull|AzureML Compute Operator|Contributor")}
$unknownRoleAssignmentCount = $staleRoleAssignments.Count
Write-Output "Initiating the cleanup of unknownRole in the ResourceGroup:$ResourceGroupName having count as $unknownRoleAssignmentCount..."
$staleRoleAssignments | Remove-AzRoleAssignment
Expand Down
71 changes: 38 additions & 33 deletions tutorials/get-started-notebooks/pipeline.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -234,10 +234,12 @@
" - scipy=1.7.1\n",
" - pandas>=1.1,<1.2\n",
" - pip:\n",
" - inference-schema[numpy-support]==1.3.0\n",
" - inference-schema[numpy-support]==1.8.0\n",
" - xlrd==2.0.1\n",
" - mlflow== 2.4.1\n",
" - azureml-mlflow==1.51.0"
" - mlflow==2.16.2\n",
" - azureml-mlflow==1.51.0\n",
" - azureml-ai-monitoring\n",
" - azureml-inference-server-http==1.3.4"
]
},
{
Expand Down Expand Up @@ -269,20 +271,26 @@
"from azure.ai.ml.entities import Environment\n",
"\n",
"custom_env_name = \"aml-scikit-learn\"\n",
"\n",
"pipeline_job_env = Environment(\n",
" name=custom_env_name,\n",
" description=\"Custom environment for Credit Card Defaults pipeline\",\n",
" tags={\"scikit-learn\": \"0.24.2\"},\n",
" conda_file=os.path.join(dependencies_dir, \"conda.yaml\"),\n",
" image=\"mcr.microsoft.com/azureml/openmpi4.1.0-ubuntu20.04:latest\",\n",
" version=\"0.2.0\",\n",
")\n",
"pipeline_job_env = ml_client.environments.create_or_update(pipeline_job_env)\n",
"\n",
"print(\n",
" f\"Environment with name {pipeline_job_env.name} is registered to workspace, the environment version is {pipeline_job_env.version}\"\n",
")"
"env_version = \"0.2.0\"\n",
"\n",
"# Check if the environment already exists\n",
"try:\n",
" pipeline_job_env = ml_client.environments.get(name=custom_env_name, version=env_version)\n",
" print(f\"Environment with name {pipeline_job_env.name} already exists, using existing version {pipeline_job_env.version}\")\n",
"except Exception:\n",
" # If it doesn't exist, create it\n",
" pipeline_job_env = Environment(\n",
" name=custom_env_name,\n",
" description=\"Custom environment for Credit Card Defaults pipeline\",\n",
" tags={\"scikit-learn\": \"0.24.2\"},\n",
" conda_file=os.path.join(dependencies_dir, \"conda.yaml\"),\n",
" image=\"mcr.microsoft.com/azureml/openmpi4.1.0-ubuntu20.04:latest\",\n",
" version=env_version,\n",
" )\n",
" pipeline_job_env = ml_client.environments.create_or_update(pipeline_job_env)\n",
" print(\n",
" f\"Environment with name {pipeline_job_env.name} is registered to workspace, the environment version is {pipeline_job_env.version}\"\n",
" )"
]
},
{
Expand Down Expand Up @@ -543,6 +551,7 @@
"import os\n",
"import pandas as pd\n",
"import mlflow\n",
"import joblib\n",
"\n",
"\n",
"def select_first_file(path):\n",
Expand All @@ -560,7 +569,7 @@
"mlflow.start_run()\n",
"\n",
"# enable autologging\n",
"mlflow.sklearn.autolog()\n",
"mlflow.sklearn.autolog(log_models=False)\n",
"\n",
"os.makedirs(\"./outputs\", exist_ok=True)\n",
"\n",
Expand Down Expand Up @@ -607,19 +616,15 @@
"\n",
" print(classification_report(y_test, y_pred))\n",
"\n",
" # Registering the model to the workspace\n",
" print(\"Registering the model via MLFlow\")\n",
" mlflow.sklearn.log_model(\n",
" sk_model=clf,\n",
" registered_model_name=args.registered_model_name,\n",
" artifact_path=args.registered_model_name,\n",
" )\n",
" print(\"Saving the model to the component output folder\")\n",
" os.makedirs(args.model, exist_ok=True)\n",
"\n",
" # Saving the model to a file\n",
" mlflow.sklearn.save_model(\n",
" sk_model=clf,\n",
" path=os.path.join(args.model, \"trained_model\"),\n",
" )\n",
" model_path = os.path.join(args.model, \"model.pkl\")\n",
" joblib.dump(clf, model_path)\n",
"\n",
" mlflow.log_artifact(model_path, artifact_path=\"model_output\")\n",
"\n",
" print(f\"Model saved to: {model_path}\")\n",
"\n",
" # Stop Logging\n",
" mlflow.end_run()\n",
Expand Down Expand Up @@ -924,9 +929,9 @@
"name": "python310-sdkv2"
},
"kernelspec": {
"display_name": "Python 3.10 - SDK v2",
"display_name": ".venv",
"language": "python",
"name": "python310-sdkv2"
"name": "python3"
},
"language_info": {
"codemirror_mode": {
Expand All @@ -938,7 +943,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.6"
"version": "3.9.5"
},
"nteract": {
"version": "nteract-front-end@1.0.0"
Expand Down
Loading