From 133746a40046cd72369437d3521252d8202d55a8 Mon Sep 17 00:00:00 2001 From: Guy Bertental Date: Sun, 9 Feb 2025 00:06:17 +0200 Subject: [PATCH 01/18] Disable local authentication between Airlock Processor and Azure Service Bus in Function Binding (#4277) * Support managed identity authentication in azure function binding between airlock and service bus queue * Disable service bus local authentication --- CHANGELOG.md | 2 + .../BlobCreatedTrigger/function.json | 4 +- .../DataDeletionTrigger/function.json | 4 +- .../ScanResultTrigger/function.json | 4 +- .../StatusChangedQueueTrigger/function.json | 4 +- airlock_processor/_version.py | 2 +- airlock_processor/host.json | 2 +- core/terraform/airlock/airlock_processor.tf | 41 ++++++++++++------- core/terraform/airlock/locals.tf | 1 + core/terraform/airlock/variables.tf | 3 ++ core/terraform/main.tf | 1 + core/terraform/servicebus.tf | 1 + core/version.txt | 2 +- 13 files changed, 49 insertions(+), 22 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2c41d6f0ae..a9f3dcaaad 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -34,6 +34,7 @@ ENHANCEMENTS: * Update Guacamole dependencies ([[#4232](https://github.com/microsoft/AzureTRE/issues/4232)]) * Add option to force tunnel TRE's Firewall ([#4237](https://github.com/microsoft/AzureTRE/issues/4237)) * Add EventGrid diagnostics to identify airlock issues ([#4258](https://github.com/microsoft/AzureTRE/issues/4258)) +* Disable local authentication in ServiceBus ([#4259](https://github.com/microsoft/AzureTRE/issues/4259)) * Allow enablement of Secure Boot and vTPM for Guacamole VMs ([#4235](https://github.com/microsoft/AzureTRE/issues/4235)) * Surface the server-layout parameter of Guacamole [server-layout](https://guacamole.apache.org/doc/gug/configuring-guacamole.html#session-settings) ([#4234](https://github.com/microsoft/AzureTRE/issues/4234)) * Add encryption at host for VMs ([#4263](https://github.com/microsoft/AzureTRE/pull/4263)) @@ -41,6 +42,7 @@ ENHANCEMENTS: * Airlock function host storage to use the user-assigned managed identity ([#4276](https://github.com/microsoft/AzureTRE/issues/4276)) * Disable local authentication in EventGrid ([#4254](https://github.com/microsoft/AzureTRE/issues/4254)) + BUG FIXES: * Update KeyVault references in API to use the version so Terraform cascades the update ([#4112](https://github.com/microsoft/AzureTRE/pull/4112)) * Template images are showing CVEs ([#4153](https://github.com/microsoft/AzureTRE/issues/4153)) diff --git a/airlock_processor/BlobCreatedTrigger/function.json b/airlock_processor/BlobCreatedTrigger/function.json index 5a652a8eff..c34edbeeb7 100644 --- a/airlock_processor/BlobCreatedTrigger/function.json +++ b/airlock_processor/BlobCreatedTrigger/function.json @@ -8,7 +8,9 @@ "direction": "in", "topicName": "%BLOB_CREATED_TOPIC_NAME%", "subscriptionName": "%TOPIC_SUBSCRIPTION_NAME%", - "connection": "SB_CONNECTION_STRING" + "connection": "%SERVICEBUS_CONNECTION_NAME%", + "accessRights": "listen", + "autoComplete": true }, { "type": "eventGrid", diff --git a/airlock_processor/DataDeletionTrigger/function.json b/airlock_processor/DataDeletionTrigger/function.json index 2b2bb580da..0cb7f66eab 100644 --- a/airlock_processor/DataDeletionTrigger/function.json +++ b/airlock_processor/DataDeletionTrigger/function.json @@ -7,7 +7,9 @@ "type": "serviceBusTrigger", "direction": "in", "queueName": "%AIRLOCK_DATA_DELETION_QUEUE_NAME%", - "connection": "SB_CONNECTION_STRING" + "connection": "%SERVICEBUS_CONNECTION_NAME%", + "accessRights": "listen", + "autoComplete": true } ] } diff --git a/airlock_processor/ScanResultTrigger/function.json b/airlock_processor/ScanResultTrigger/function.json index 32758cea1c..266bd059fe 100644 --- a/airlock_processor/ScanResultTrigger/function.json +++ b/airlock_processor/ScanResultTrigger/function.json @@ -7,7 +7,9 @@ "type": "serviceBusTrigger", "direction": "in", "queueName": "%AIRLOCK_SCAN_RESULT_QUEUE_NAME%", - "connection": "SB_CONNECTION_STRING" + "connection": "%SERVICEBUS_CONNECTION_NAME%", + "accessRights": "listen", + "autoComplete": true }, { "type": "eventGrid", diff --git a/airlock_processor/StatusChangedQueueTrigger/function.json b/airlock_processor/StatusChangedQueueTrigger/function.json index f686eca80a..b96de6710c 100644 --- a/airlock_processor/StatusChangedQueueTrigger/function.json +++ b/airlock_processor/StatusChangedQueueTrigger/function.json @@ -6,7 +6,9 @@ "type": "serviceBusTrigger", "direction": "in", "queueName": "%AIRLOCK_STATUS_CHANGED_QUEUE_NAME%", - "connection": "SB_CONNECTION_STRING" + "connection": "%SERVICEBUS_CONNECTION_NAME%", + "accessRights": "listen", + "autoComplete": true }, { "type": "eventGrid", diff --git a/airlock_processor/_version.py b/airlock_processor/_version.py index 8088f75131..deded3247f 100644 --- a/airlock_processor/_version.py +++ b/airlock_processor/_version.py @@ -1 +1 @@ -__version__ = "0.8.1" +__version__ = "0.8.2" diff --git a/airlock_processor/host.json b/airlock_processor/host.json index 95b6b4b7d6..f9667b1f23 100644 --- a/airlock_processor/host.json +++ b/airlock_processor/host.json @@ -8,7 +8,7 @@ } } }, - "extensionBundle": { +"extensionBundle": { "id": "Microsoft.Azure.Functions.ExtensionBundle", "version": "[4.0.0, 5.0.0)" } diff --git a/core/terraform/airlock/airlock_processor.tf b/core/terraform/airlock/airlock_processor.tf index ccb36b81bb..f6a0f98ed4 100644 --- a/core/terraform/airlock/airlock_processor.tf +++ b/core/terraform/airlock/airlock_processor.tf @@ -66,21 +66,32 @@ resource "azurerm_linux_function_app" "airlock_function_app" { } app_settings = { - "SB_CONNECTION_STRING" = var.airlock_servicebus.default_primary_connection_string - "BLOB_CREATED_TOPIC_NAME" = azurerm_servicebus_topic.blob_created.name - "TOPIC_SUBSCRIPTION_NAME" = azurerm_servicebus_subscription.airlock_processor.name - "WEBSITES_ENABLE_APP_SERVICE_STORAGE" = false - "AIRLOCK_STATUS_CHANGED_QUEUE_NAME" = local.status_changed_queue_name - "AIRLOCK_SCAN_RESULT_QUEUE_NAME" = local.scan_result_queue_name - "AIRLOCK_DATA_DELETION_QUEUE_NAME" = local.data_deletion_queue_name - "ENABLE_MALWARE_SCANNING" = var.enable_malware_scanning - "ARM_ENVIRONMENT" = var.arm_environment - "MANAGED_IDENTITY_CLIENT_ID" = azurerm_user_assigned_identity.airlock_id.client_id - "TRE_ID" = var.tre_id - "WEBSITE_CONTENTOVERVNET" = 1 - "STORAGE_ENDPOINT_SUFFIX" = module.terraform_azurerm_environment_configuration.storage_suffix - "AzureWebJobsStorage__clientId" = azurerm_user_assigned_identity.airlock_id.client_id - "AzureWebJobsStorage__credential" = "managedidentity" + "SERVICEBUS_CONNECTION_NAME" = local.servicebus_connection + "${local.servicebus_connection}__tenantId" = azurerm_user_assigned_identity.airlock_id.tenant_id + "${local.servicebus_connection}__clientId" = azurerm_user_assigned_identity.airlock_id.client_id + "${local.servicebus_connection}__credential" = "managedidentity" + "${local.servicebus_connection}__fullyQualifiedNamespace" = var.airlock_servicebus_fqdn + + "BLOB_CREATED_TOPIC_NAME" = azurerm_servicebus_topic.blob_created.name + "TOPIC_SUBSCRIPTION_NAME" = azurerm_servicebus_subscription.airlock_processor.name + "EVENT_GRID_STEP_RESULT_TOPIC_URI_SETTING" = azurerm_eventgrid_topic.step_result.endpoint + "EVENT_GRID_STEP_RESULT_TOPIC_KEY_SETTING" = azurerm_eventgrid_topic.step_result.primary_access_key + "EVENT_GRID_DATA_DELETION_TOPIC_URI_SETTING" = azurerm_eventgrid_topic.data_deletion.endpoint + "EVENT_GRID_DATA_DELETION_TOPIC_KEY_SETTING" = azurerm_eventgrid_topic.data_deletion.primary_access_key + "WEBSITES_ENABLE_APP_SERVICE_STORAGE" = false + "AIRLOCK_STATUS_CHANGED_QUEUE_NAME" = local.status_changed_queue_name + "AIRLOCK_SCAN_RESULT_QUEUE_NAME" = local.scan_result_queue_name + "AIRLOCK_DATA_DELETION_QUEUE_NAME" = local.data_deletion_queue_name + "ENABLE_MALWARE_SCANNING" = var.enable_malware_scanning + "ARM_ENVIRONMENT" = var.arm_environment + "MANAGED_IDENTITY_CLIENT_ID" = azurerm_user_assigned_identity.airlock_id.client_id + "TRE_ID" = var.tre_id + "WEBSITE_CONTENTOVERVNET" = 1 + "STORAGE_ENDPOINT_SUFFIX" = module.terraform_azurerm_environment_configuration.storage_suffix + + "TOPIC_SUBSCRIPTION_NAME" = azurerm_servicebus_subscription.airlock_processor.name + "AzureWebJobsStorage__clientId" = azurerm_user_assigned_identity.airlock_id.client_id + "AzureWebJobsStorage__credential" = "managedidentity" "EVENT_GRID_STEP_RESULT_CONNECTION" = local.step_result_eventgrid_connection "${local.step_result_eventgrid_connection}__topicEndpointUri" = azurerm_eventgrid_topic.step_result.endpoint diff --git a/core/terraform/airlock/locals.tf b/core/terraform/airlock/locals.tf index 8ed6805e0e..838ddf091a 100644 --- a/core/terraform/airlock/locals.tf +++ b/core/terraform/airlock/locals.tf @@ -61,6 +61,7 @@ locals { azurerm_storage_account.sa_export_approved.id ] + servicebus_connection = "SERVICEBUS_CONNECTION" step_result_eventgrid_connection = "EVENT_GRID_STEP_RESULT_CONNECTION" data_deletion_eventgrid_connection = "EVENT_GRID_DATA_DELETION_CONNECTION" } diff --git a/core/terraform/airlock/variables.tf b/core/terraform/airlock/variables.tf index 95e03b4ba4..bb0fad04df 100644 --- a/core/terraform/airlock/variables.tf +++ b/core/terraform/airlock/variables.tf @@ -62,6 +62,9 @@ variable "airlock_servicebus" { default_primary_connection_string = string }) } +variable "airlock_servicebus_fqdn" { + type = string +} variable "tre_core_tags" { type = map(string) } diff --git a/core/terraform/main.tf b/core/terraform/main.tf index 49693884c1..4d6d910257 100644 --- a/core/terraform/main.tf +++ b/core/terraform/main.tf @@ -132,6 +132,7 @@ module "airlock_resources" { airlock_app_service_plan_sku = var.core_app_service_plan_sku airlock_processor_subnet_id = module.network.airlock_processor_subnet_id airlock_servicebus = azurerm_servicebus_namespace.sb + airlock_servicebus_fqdn = azurerm_servicebus_namespace.sb.endpoint applicationinsights_connection_string = module.azure_monitor.app_insights_connection_string enable_malware_scanning = var.enable_airlock_malware_scanning arm_environment = var.arm_environment diff --git a/core/terraform/servicebus.tf b/core/terraform/servicebus.tf index f686a8e08e..7c03d661c0 100644 --- a/core/terraform/servicebus.tf +++ b/core/terraform/servicebus.tf @@ -5,6 +5,7 @@ resource "azurerm_servicebus_namespace" "sb" { sku = "Premium" premium_messaging_partitions = "1" capacity = "1" + local_auth_enabled = false tags = local.tre_core_tags # Block public access diff --git a/core/version.txt b/core/version.txt index 663d6b3572..836582489b 100644 --- a/core/version.txt +++ b/core/version.txt @@ -1 +1 @@ -__version__ = "0.11.22" +__version__ = "0.11.23" From 9327874584b1045b40a58a40512e37bc4c6e9751 Mon Sep 17 00:00:00 2001 From: Tamir Kamara <26870601+tamirkamara@users.noreply.github.com> Date: Sun, 9 Feb 2025 12:14:24 +0200 Subject: [PATCH 02/18] Release 0.20.0 (#4345) * update release doc formatting * update change log * fix typos --- CHANGELOG.md | 65 +++++++++++++++++++++++++++++++--- docs/tre-developers/release.md | 1 + 2 files changed, 62 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a9f3dcaaad..6e919c37e7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,9 +1,19 @@ -## 0.20.0 (Unreleased) +## 0.21.0 (Unreleased) **BREAKING CHANGES & MIGRATIONS**: -* InnerEye and MLFlow bundles depreciated and removed from main. If you wish to update and deploy these worksapce services they can be retrieved from release 0.19.1. ([#4127](https://github.com/microsoft/AzureTRE/issues/4127)) -* This released removed support for Porter v0.*. If you're upgrading from a much earlier verion you can't go directly to this one. ([#4228](https://github.com/microsoft/AzureTRE/issues/4228)) + +ENHANCEMENTS: + +BUG FIXES: + +COMPONENTS: + +## 0.20.0 (Feburary 9, 2025) + +**BREAKING CHANGES & MIGRATIONS**: +* InnerEye and MLFlow bundles depreciated and removed from main. If you wish to update and deploy these workspace services they can be retrieved from release 0.19.1. ([#4127](https://github.com/microsoft/AzureTRE/issues/4127)) +* This release removed support for Porter v0.*. If you're upgrading from a much earlier version you can't go directly to this one. ([#4228](https://github.com/microsoft/AzureTRE/issues/4228)) FEATURES: * Add support for customer-managed keys encryption. Core support ([#4141](https://github.com/microsoft/AzureTRE/issues/4142), [#4144](https://github.com/microsoft/AzureTRE/issues/4144)), Base workspace ([#4161](https://github.com/microsoft/AzureTRE/pull/4161)), other templates ([#4145](https://github.com/microsoft/AzureTRE/issues/4145)) @@ -55,7 +65,7 @@ BUG FIXES: * Fix failing tests, .env missing and storage logs ([#4207](https://github.com/microsoft/AzureTRE/issues/4207)) * Unable to delete virtual machines, add skip_shutdown_and_force_delete = true ([#4135](https://github.com/microsoft/AzureTRE/issues/4135)) * Bump terraform version in windows VM template ([#4212](https://github.com/microsoft/AzureTRE/issues/4212)) -* Upgrade azurerm terraform provider from v3.112.0 to v3.117.0 to mitiagte storage account deployment issue ([#4004](https://github.com/microsoft/AzureTRE/issues/4004)) +* Upgrade azurerm terraform provider from v3.112.0 to v3.117.0 to mitigate storage account deployment issue ([#4004](https://github.com/microsoft/AzureTRE/issues/4004)) * Fix VM actions where Workspace shared storage doesn't allow shared key access ([#4222](https://github.com/microsoft/AzureTRE/issues/4222)) * Fix public exposure in Guacamole service ([[#4199](https://github.com/microsoft/AzureTRE/issues/4199)]) * Fix Azure ML network tags to use name rather than ID ([[#4151](https://github.com/microsoft/AzureTRE/issues/4151)]) @@ -65,6 +75,37 @@ BUG FIXES: COMPONENTS: +| name | version | +| ----- | ----- | +| devops | 0.5.5 | +| core | 0.11.23 | +| ui | 0.6.3 | +| tre-shared-service-databricks-private-auth | 0.1.11 | +| tre-shared-service-gitea | 1.1.4 | +| tre-shared-service-sonatype-nexus | 3.3.2 | +| tre-shared-service-firewall | 1.3.0 | +| tre-shared-service-admin-vm | 0.5.2 | +| tre-shared-service-certs | 0.7.3 | +| tre-shared-service-airlock-notifier | 1.0.8 | +| tre-shared-service-cyclecloud | 0.7.2 | +| tre-workspace-airlock-import-review | 0.14.2 | +| tre-workspace-base | 1.9.2 | +| tre-workspace-unrestricted | 0.13.2 | +| tre-workspace-service-gitea | 1.2.2 | +| tre-workspace-service-mysql | 1.0.9 | +| tre-workspace-service-health | 0.2.11 | +| tre-workspace-service-openai | 1.0.6 | +| tre-service-azureml | 0.9.2 | +| tre-user-resource-aml-compute-instance | 0.5.11 | +| tre-service-databricks | 1.0.10 | +| tre-workspace-service-azuresql | 1.0.15 | +| tre-service-guacamole | 0.12.7 | +| tre-service-guacamole-export-reviewvm | 0.2.2 | +| tre-service-guacamole-linuxvm | 1.2.4 | +| tre-service-guacamole-import-reviewvm | 0.3.2 | +| tre-service-guacamole-windowsvm | 1.2.6 | +| tre-workspace-service-ohdsi | 0.3.2 | + ## 0.19.1 **BREAKING CHANGES & MIGRATIONS**: @@ -80,6 +121,7 @@ BUG FIXES: * Workspace creation blocked due to Azure API depreciation ([#4095](https://github.com/microsoft/AzureTRE/issues/4095)) COMPONENTS: + | name | version | | ----- | ----- | | devops | 0.5.2 | @@ -138,6 +180,7 @@ BUG FIXES: * Update .NET version on Linux VMs ([#4067](https://github.com/microsoft/AzureTRE/issues/4067)) COMPONENTS: + | name | version | | ----- | ----- | | devops | 0.5.1 | @@ -201,6 +244,7 @@ BUG FIXES: * Add lifecycle rule to the Gitea Shared Service template for the MySQL resource to stop it recreating on `update` ([#4006](https://github.com/microsoft/AzureTRE/issues/4006)) COMPONENTS: + | name | version | | ----- | ----- | | devops | 0.5.1 | @@ -255,6 +299,7 @@ BUG FIXES: * Fix issue with firewall failing to deploy on a new TRE deploy ([#3775](https://github.com/microsoft/AzureTRE/issues/3775)) COMPONENTS: + | name | version | | ----- | ----- | | devops | 0.5.1 | @@ -304,6 +349,7 @@ BUG FIXES: * Airlock Import Review workspace uses dedicated DNS zone to prevent conflict with core ([#3767](https://github.com/microsoft/AzureTRE/pull/3767)) COMPONENTS: + | name | version | | ----- | ----- | | devops | 0.5.1 | @@ -346,6 +392,7 @@ BUG FIXES: * Fix workspace not loading fails if operation or history roles are not loaded ([#3755](https://github.com/microsoft/AzureTRE/issues/3755)) COMPONENTS: + | name | version | | ----- | ----- | | devops | 0.5.1 | @@ -384,6 +431,7 @@ BUG FIXES: * SecuredByRole failing if roles are null ([#3740](https://github.com/microsoft/AzureTRE/issues/3740 )) COMPONENTS: + | name | version | | ----- | ----- | | devops | 0.5.1 | @@ -433,6 +481,7 @@ BUG FIXES: * Fix issue with cost tags not displaying correctly for some user roles ([#3721](https://github.com/microsoft/AzureTRE/issues/3721)) COMPONENTS: + | name | version | | ----- | ----- | | devops | 0.5.1 | @@ -469,6 +518,7 @@ BUG FIXES: * Fix firewall config related to Nexus so that `pypi.org` is added to the allow-list ([#3694](https://github.com/microsoft/AzureTRE/issues/3694)) COMPONENTS: + | name | version | | ----- | ----- | | devops | 0.5.1 | @@ -518,6 +568,7 @@ BUG FIXES: * Added missing region entries in `databricks-udr.json` ([[#3688](https://github.com/microsoft/AzureTRE/pull/3688)) COMPONENTS: + | name | version | | ----- | ----- | | devops | 0.5.1 | @@ -557,6 +608,7 @@ BUG FIXES: * Upgrade airlock and unrestricted workspaces to base workspace version 0.12.0 ([#3659](https://github.com/microsoft/AzureTRE/pull/3659)) COMPONENTS: + | name | version | | ----- | ----- | | devops | 0.5.1 | @@ -616,6 +668,7 @@ BUG FIXES: COMPONENTS: + | name | version | | ----- | ----- | | devops | 0.5.1 | @@ -657,6 +710,7 @@ BUG FIXES: * Nexus fails to install due to `az login` and firewall rules ([#3453](https://github.com/microsoft/AzureTRE/issues/3453)) COMPONENTS: + | name | version | | ----- | ----- | | devops | 0.5.1 | @@ -859,6 +913,7 @@ BUG FIXES: * Fix KeyVault purge error on MLFlow uninstall ([#3082](https://github.com/microsoft/AzureTRE/pull/3082)) COMPONENTS: + | name | version | | ----- | ----- | | devops | 0.4.4 | @@ -935,6 +990,7 @@ BUG FIXES: * Handle 429 TooManyRequests and 503 ServiceUnavailable which might return from Azure Cost Management in TRE Cost API ([#2835](https://github.com/microsoft/AzureTRE/issues/2835)) COMPONENTS: + | name | version | | ----- | ----- | | devops | 0.4.2 | @@ -982,6 +1038,7 @@ BUG FIXES: * Fix issues with AML workspace service deployment ([#2768](https://github.com/microsoft/AzureTRE/pull/2768)) COMPONENTS: + | name | version | | ----- | ----- | | devops | 0.4.2 | diff --git a/docs/tre-developers/release.md b/docs/tre-developers/release.md index 19aeb12866..40a431eea5 100644 --- a/docs/tre-developers/release.md +++ b/docs/tre-developers/release.md @@ -21,6 +21,7 @@ The process follows these steps: 5. Include a final line with a link to the full changelog similar to this: **Full Changelog**: https://github.com/microsoft/AzureTRE/compare/v0.9.1...v0.9.2 + 7. Update [AzureTRE-Deployment](https://github.com/microsoft/AzureTRE-Deployment). The procedure may vary depending on the level of changes introduced in the new version but should include the following steps: 1. Update the tag used in [devcontainer.json](https://github.com/microsoft/AzureTRE-Deployment/blob/main/.devcontainer/devcontainer.json). 2. Rebuild the container. From 0bcf5b3b20d030e321f86dd8139f0b30ca728097 Mon Sep 17 00:00:00 2001 From: Jonny Rylands Date: Sun, 9 Feb 2025 18:05:39 +0000 Subject: [PATCH 03/18] Allow workspace App Service Plan SKU to be updated (#4339) * Allow workspace App Service Plan SKU to be updated #4331 --- CHANGELOG.md | 1 + templates/workspaces/airlock-import-review/porter.yaml | 2 +- templates/workspaces/airlock-import-review/template_schema.json | 1 + templates/workspaces/base/porter.yaml | 2 +- templates/workspaces/base/template_schema.json | 1 + templates/workspaces/unrestricted/porter.yaml | 2 +- templates/workspaces/unrestricted/template_schema.json | 1 + 7 files changed, 7 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6e919c37e7..a3ea27a0ac 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ **BREAKING CHANGES & MIGRATIONS**: ENHANCEMENTS: +* Allow workspace App Service Plan SKU to be updated ([#4331](https://github.com/microsoft/AzureTRE/issues/4331)) BUG FIXES: diff --git a/templates/workspaces/airlock-import-review/porter.yaml b/templates/workspaces/airlock-import-review/porter.yaml index 56f90dbc70..a651294ea9 100644 --- a/templates/workspaces/airlock-import-review/porter.yaml +++ b/templates/workspaces/airlock-import-review/porter.yaml @@ -1,7 +1,7 @@ --- schemaVersion: 1.0.0 name: tre-workspace-airlock-import-review -version: 0.14.2 +version: 0.14.3 description: "A workspace to do Airlock Data Import Reviews for Azure TRE" dockerfile: Dockerfile.tmpl registry: azuretre diff --git a/templates/workspaces/airlock-import-review/template_schema.json b/templates/workspaces/airlock-import-review/template_schema.json index 180e360abc..09d7d6be87 100644 --- a/templates/workspaces/airlock-import-review/template_schema.json +++ b/templates/workspaces/airlock-import-review/template_schema.json @@ -15,6 +15,7 @@ "title": "App Service Plan SKU", "description": "The SKU that will be used when deploying an Azure App Service Plan.", "default": "P1v3", + "updateable": true, "enum": [ "P0v3", "P1v3", diff --git a/templates/workspaces/base/porter.yaml b/templates/workspaces/base/porter.yaml index a7e09fa692..ed48af1a9b 100644 --- a/templates/workspaces/base/porter.yaml +++ b/templates/workspaces/base/porter.yaml @@ -1,7 +1,7 @@ --- schemaVersion: 1.0.0 name: tre-workspace-base -version: 1.9.2 +version: 1.9.3 description: "A base Azure TRE workspace" dockerfile: Dockerfile.tmpl registry: azuretre diff --git a/templates/workspaces/base/template_schema.json b/templates/workspaces/base/template_schema.json index b456b5f044..24cec47f34 100644 --- a/templates/workspaces/base/template_schema.json +++ b/templates/workspaces/base/template_schema.json @@ -27,6 +27,7 @@ "title": "App Service Plan SKU", "description": "The SKU that will be used when deploying an Azure App Service Plan.", "default": "P1v3", + "updateable": true, "enum": [ "P0v3", "P1v3", diff --git a/templates/workspaces/unrestricted/porter.yaml b/templates/workspaces/unrestricted/porter.yaml index b8bd2becae..6c9cb1e43b 100644 --- a/templates/workspaces/unrestricted/porter.yaml +++ b/templates/workspaces/unrestricted/porter.yaml @@ -1,7 +1,7 @@ --- schemaVersion: 1.0.0 name: tre-workspace-unrestricted -version: 0.13.2 +version: 0.13.3 description: "A base Azure TRE workspace" dockerfile: Dockerfile.tmpl registry: azuretre diff --git a/templates/workspaces/unrestricted/template_schema.json b/templates/workspaces/unrestricted/template_schema.json index f9c8a807f1..6ebbb1c159 100644 --- a/templates/workspaces/unrestricted/template_schema.json +++ b/templates/workspaces/unrestricted/template_schema.json @@ -27,6 +27,7 @@ "title": "App Service Plan SKU", "description": "The SKU that will be used when deploying an Azure App Service Plan.", "default": "P1v3", + "updateable": true, "enum": [ "P0v3", "P1v3", From 998c3d4eb97ae9daed35e9c12d022029101b0862 Mon Sep 17 00:00:00 2001 From: Yuval Yaron <43217306+yuvalyaron@users.noreply.github.com> Date: Tue, 11 Feb 2025 00:46:57 +0200 Subject: [PATCH 04/18] Remove public IP from TRE's firewall when forced tunneling is configured (#4346) * remove public IP from TRE's firewall when forced tunneling is configured * update changelog * update version as this change is not backward compatible * update template version --- CHANGELOG.md | 1 + templates/shared_services/firewall/porter.yaml | 2 +- templates/shared_services/firewall/terraform/firewall.tf | 5 +++-- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a3ea27a0ac..9979b55d2c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ ENHANCEMENTS: * Allow workspace App Service Plan SKU to be updated ([#4331](https://github.com/microsoft/AzureTRE/issues/4331)) +* Remove public IP from TRE's firewall when forced tunneling is configured ([#4346](https://github.com/microsoft/AzureTRE/pull/4346)) BUG FIXES: diff --git a/templates/shared_services/firewall/porter.yaml b/templates/shared_services/firewall/porter.yaml index ffba80504b..d5759218a7 100644 --- a/templates/shared_services/firewall/porter.yaml +++ b/templates/shared_services/firewall/porter.yaml @@ -1,7 +1,7 @@ --- schemaVersion: 1.0.0 name: tre-shared-service-firewall -version: 1.3.0 +version: 1.3.1 description: "An Azure TRE Firewall shared service" dockerfile: Dockerfile.tmpl registry: azuretre diff --git a/templates/shared_services/firewall/terraform/firewall.tf b/templates/shared_services/firewall/terraform/firewall.tf index 6697a359b6..3f42237a0f 100644 --- a/templates/shared_services/firewall/terraform/firewall.tf +++ b/templates/shared_services/firewall/terraform/firewall.tf @@ -1,4 +1,5 @@ resource "azurerm_public_ip" "fwtransit" { + count = var.firewall_force_tunnel_ip != "" ? 0 : 1 name = "pip-fw-${var.tre_id}" resource_group_name = local.core_resource_group_name location = data.azurerm_resource_group.rg.location @@ -11,7 +12,7 @@ resource "azurerm_public_ip" "fwtransit" { moved { from = azurerm_public_ip.fwpip - to = azurerm_public_ip.fwtransit + to = azurerm_public_ip.fwtransit[0] } resource "azurerm_public_ip" "fwmanagement" { @@ -38,7 +39,7 @@ resource "azurerm_firewall" "fw" { ip_configuration { name = "fw-ip-configuration" subnet_id = data.azurerm_subnet.firewall.id - public_ip_address_id = azurerm_public_ip.fwtransit.id + public_ip_address_id = var.firewall_force_tunnel_ip != "" ? null : azurerm_public_ip.fwtransit[0].id } dynamic "management_ip_configuration" { From a6d85e2600ffac2a3dcc9576607a637911e330ec Mon Sep 17 00:00:00 2001 From: Marcus Robinson Date: Mon, 10 Feb 2025 23:34:44 +0000 Subject: [PATCH 05/18] Fix upgrade if porter install has failed and add tests to resource processor (#4338) --- .devcontainer/devcontainer.json | 1 + CHANGELOG.md | 3 + airlock_processor/_version.py | 2 +- .../tests/shared_code}/__init__.py | 0 resource_processor/_version.py | 2 +- resource_processor/helpers/__init__.py | 0 .../{resources => helpers}/commands.py | 29 +- .../{resources => helpers}/httpserver.py | 0 .../{resources => helpers}/statuses.py | 2 +- .../{resources => helpers}/strings.py | 0 resource_processor/resources/helpers.py | 5 - resource_processor/tests_rp/test_commands.py | 94 ++++++ resource_processor/tests_rp/test_runner.py | 284 ++++++++++++++++++ resource_processor/vmss_porter/runner.py | 37 ++- 14 files changed, 427 insertions(+), 32 deletions(-) rename {resource_processor/resources => airlock_processor/tests/shared_code}/__init__.py (100%) create mode 100644 resource_processor/helpers/__init__.py rename resource_processor/{resources => helpers}/commands.py (89%) rename resource_processor/{resources => helpers}/httpserver.py (100%) rename resource_processor/{resources => helpers}/statuses.py (96%) rename resource_processor/{resources => helpers}/strings.py (100%) delete mode 100644 resource_processor/resources/helpers.py create mode 100644 resource_processor/tests_rp/test_commands.py create mode 100644 resource_processor/tests_rp/test_runner.py diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 90b080ddb1..e8966910e5 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -275,6 +275,7 @@ "extensions": [ "ms-python.python", "ms-python.pylance", + "ms-python.flake8", "hashicorp.terraform", "github.vscode-pull-request-github", "gitHub.copilot", diff --git a/CHANGELOG.md b/CHANGELOG.md index 9979b55d2c..9cf9862269 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,9 @@ ENHANCEMENTS: * Remove public IP from TRE's firewall when forced tunneling is configured ([#4346](https://github.com/microsoft/AzureTRE/pull/4346)) BUG FIXES: +* Fix upgrade when porter install has failed ([#4338](https://github.com/microsoft/AzureTRE/pull/4338)) + + COMPONENTS: diff --git a/airlock_processor/_version.py b/airlock_processor/_version.py index deded3247f..732155f8df 100644 --- a/airlock_processor/_version.py +++ b/airlock_processor/_version.py @@ -1 +1 @@ -__version__ = "0.8.2" +__version__ = "0.8.3" diff --git a/resource_processor/resources/__init__.py b/airlock_processor/tests/shared_code/__init__.py similarity index 100% rename from resource_processor/resources/__init__.py rename to airlock_processor/tests/shared_code/__init__.py diff --git a/resource_processor/_version.py b/resource_processor/_version.py index fee46bd8ce..def467e071 100644 --- a/resource_processor/_version.py +++ b/resource_processor/_version.py @@ -1 +1 @@ -__version__ = "0.11.1" +__version__ = "0.12.1" diff --git a/resource_processor/helpers/__init__.py b/resource_processor/helpers/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/resource_processor/resources/commands.py b/resource_processor/helpers/commands.py similarity index 89% rename from resource_processor/resources/commands.py rename to resource_processor/helpers/commands.py index 0111b52358..59c0eaea82 100644 --- a/resource_processor/resources/commands.py +++ b/resource_processor/helpers/commands.py @@ -4,14 +4,13 @@ import logging from urllib.parse import urlparse -from resources.helpers import get_installation_id from shared.logging import logger, shell_output_logger def azure_login_command(config): set_cloud_command = f"az cloud set --name {config['azure_environment']} >/dev/null " - if config["vmss_msi_id"]: + if config.get("vmss_msi_id"): # Use the Managed Identity when in VMSS context login_command = f"az login --identity -u {config['vmss_msi_id']} >/dev/null " @@ -23,7 +22,7 @@ def azure_login_command(config): def apply_porter_credentials_sets_command(config): - if config["vmss_msi_id"]: + if config.get("vmss_msi_id"): # Use the Managed Identity when in VMSS context porter_credential_sets = "porter credentials apply vmss_porter/arm_auth_local_debugging.json >/dev/null 2>&1 && porter credentials apply vmss_porter/aad_auth.json >/dev/null 2>&1" @@ -80,25 +79,31 @@ async def build_porter_command(config, msg_body, custom_action=False): val_base64_bytes = base64.b64encode(val_bytes) parameter_value = val_base64_bytes.decode("ascii") - porter_parameters = porter_parameters + f" --param {parameter_name}=\"{parameter_value}\"" + porter_parameters += f" --param {parameter_name}=\"{parameter_value}\"" - installation_id = get_installation_id(msg_body) + installation_id = msg_body['id'] command_line = [f"porter" # If a custom action (i.e. not install, uninstall, upgrade) we need to use 'invoke' - f"{' invoke --action' if custom_action else ''}" - f" {msg_body['action']} \"{installation_id}\"" - f" --reference {config['registry_server']}/{msg_body['name']}:v{msg_body['version']}" - f" {porter_parameters} --force" - f" --credential-set arm_auth" - f" --credential-set aad_auth" + f"{' invoke --action' if custom_action else ''} " + f"{msg_body['action']} \"{installation_id}\" " + f"--reference {config['registry_server']}/{msg_body['name']}:v{msg_body['version']}" + f"{porter_parameters} " + f"--force " + f"--credential-set arm_auth " + f"--credential-set aad_auth " ] + if msg_body['action'] == 'upgrade': + command_line[0] = command_line[0] + "--force-upgrade " + + command_line[0] = command_line[0].strip() + return command_line async def build_porter_command_for_outputs(msg_body): - installation_id = get_installation_id(msg_body) + installation_id = msg_body['id'] command_line = [f"porter installations output list --installation {installation_id} --output json"] return command_line diff --git a/resource_processor/resources/httpserver.py b/resource_processor/helpers/httpserver.py similarity index 100% rename from resource_processor/resources/httpserver.py rename to resource_processor/helpers/httpserver.py diff --git a/resource_processor/resources/statuses.py b/resource_processor/helpers/statuses.py similarity index 96% rename from resource_processor/resources/statuses.py rename to resource_processor/helpers/statuses.py index 952dcef24b..2e1941f482 100644 --- a/resource_processor/resources/statuses.py +++ b/resource_processor/helpers/statuses.py @@ -1,5 +1,5 @@ from collections import defaultdict -from resources import strings +from helpers import strings # Specify pass and fail status strings so we can return the right statuses to the api depending on the action type (with a default of custom action) diff --git a/resource_processor/resources/strings.py b/resource_processor/helpers/strings.py similarity index 100% rename from resource_processor/resources/strings.py rename to resource_processor/helpers/strings.py diff --git a/resource_processor/resources/helpers.py b/resource_processor/resources/helpers.py deleted file mode 100644 index 98ef4d2e0d..0000000000 --- a/resource_processor/resources/helpers.py +++ /dev/null @@ -1,5 +0,0 @@ -def get_installation_id(msg_body): - """ - This is used to identify each bundle install within the porter state store. - """ - return msg_body['id'] diff --git a/resource_processor/tests_rp/test_commands.py b/resource_processor/tests_rp/test_commands.py new file mode 100644 index 0000000000..bb2f9f20e0 --- /dev/null +++ b/resource_processor/tests_rp/test_commands.py @@ -0,0 +1,94 @@ +import json +import pytest +from unittest.mock import patch, AsyncMock +from helpers.commands import azure_login_command, apply_porter_credentials_sets_command, azure_acr_login_command, build_porter_command, build_porter_command_for_outputs, get_porter_parameter_keys + + +@pytest.fixture +def mock_get_porter_parameter_keys(): + with patch("helpers.commands.get_porter_parameter_keys", new_callable=AsyncMock) as mock: + yield mock + + +@pytest.mark.parametrize("config, expected_command", [ + ({"azure_environment": "AzureCloud", "vmss_msi_id": "msi_id"}, "az cloud set --name AzureCloud >/dev/null && az login --identity -u msi_id >/dev/null "), + ({"azure_environment": "AzureCloud", "arm_client_id": "client_id", "arm_client_secret": "client_secret", "arm_tenant_id": "tenant_id"}, "az cloud set --name AzureCloud >/dev/null && az login --service-principal --username client_id --password client_secret --tenant tenant_id >/dev/null") +]) +def test_azure_login_command(config, expected_command): + """Test azure_login_command function.""" + assert azure_login_command(config) == expected_command + + +@pytest.mark.parametrize("config, expected_command", [ + ({"vmss_msi_id": "msi_id"}, "porter credentials apply vmss_porter/arm_auth_local_debugging.json >/dev/null 2>&1 && porter credentials apply vmss_porter/aad_auth.json >/dev/null 2>&1"), + ({}, "porter credentials apply vmss_porter/arm_auth_local_debugging.json >/dev/null 2>&1 && porter credentials apply vmss_porter/aad_auth_local_debugging.json >/dev/null 2>&1") +]) +def test_apply_porter_credentials_sets_command(config, expected_command): + """Test apply_porter_credentials_sets_command function.""" + assert apply_porter_credentials_sets_command(config) == expected_command + + +@pytest.mark.parametrize("config, expected_command", [ + ({"registry_server": "myregistry.azurecr.io"}, "az acr login --name myregistry >/dev/null ") +]) +def test_azure_acr_login_command(config, expected_command): + """Test azure_acr_login_command function.""" + assert azure_acr_login_command(config) == expected_command + + +@pytest.mark.asyncio +async def test_build_porter_command(mock_get_porter_parameter_keys): + """Test build_porter_command function.""" + config = {"registry_server": "myregistry.azurecr.io"} + msg_body = {"id": "guid", "action": "install", "name": "mybundle", "version": "1.0.0", "parameters": {"param1": "value1"}} + mock_get_porter_parameter_keys.return_value = ["param1"] + + expected_command = [ + "porter install \"guid\" --reference myregistry.azurecr.io/mybundle:v1.0.0 --param param1=\"value1\" --force --credential-set arm_auth --credential-set aad_auth" + ] + + command = await build_porter_command(config, msg_body) + assert command == expected_command + + +@pytest.mark.asyncio +async def test_build_porter_command_for_upgrade(mock_get_porter_parameter_keys): + """Test build_porter_command function for upgrade action.""" + config = {"registry_server": "myregistry.azurecr.io"} + msg_body = {"id": "guid", "action": "upgrade", "name": "mybundle", "version": "1.0.0", "parameters": {"param1": "value1"}} + mock_get_porter_parameter_keys.return_value = ["param1"] + + expected_command = [ + "porter upgrade \"guid\" --reference myregistry.azurecr.io/mybundle:v1.0.0 --param param1=\"value1\" --force --credential-set arm_auth --credential-set aad_auth --force-upgrade" + ] + + command = await build_porter_command(config, msg_body) + assert command == expected_command + + +@pytest.mark.asyncio +async def test_build_porter_command_for_outputs(): + """Test build_porter_command_for_outputs function.""" + msg_body = {"id": "guid", "action": "install", "name": "mybundle", "version": "1.0.0"} + expected_command = ["porter installations output list --installation guid --output json"] + + command = await build_porter_command_for_outputs(msg_body) + assert command == expected_command + + +@pytest.mark.asyncio +@patch("helpers.commands.azure_login_command", return_value="az login command") +@patch("helpers.commands.azure_acr_login_command", return_value="az acr login command") +@patch("asyncio.create_subprocess_shell") +async def test_get_porter_parameter_keys(mock_create_subprocess_shell, mock_azure_acr_login_command, mock_azure_login_command): + """Test get_porter_parameter_keys function.""" + config = {"registry_server": "myregistry.azurecr.io", "porter_env": {}} + msg_body = {"name": "mybundle", "version": "1.0.0"} + mock_proc = AsyncMock() + mock_proc.communicate.return_value = (json.dumps({"parameters": [{"name": "param1"}]}).encode(), b"") + mock_create_subprocess_shell.return_value = mock_proc + + expected_keys = ["param1"] + + keys = await get_porter_parameter_keys(config, msg_body) + assert keys == expected_keys diff --git a/resource_processor/tests_rp/test_runner.py b/resource_processor/tests_rp/test_runner.py new file mode 100644 index 0000000000..7c1dc6d2e4 --- /dev/null +++ b/resource_processor/tests_rp/test_runner.py @@ -0,0 +1,284 @@ +import json +from unittest.mock import patch, AsyncMock, Mock +import pytest +from resource_processor.vmss_porter.runner import ( + set_up_config, receive_message, invoke_porter_action, get_porter_outputs, check_runners, runner +) +from azure.servicebus.aio import ServiceBusClient +from azure.servicebus import ServiceBusSessionFilter + + +@pytest.fixture +def mock_service_bus_client(): + with patch("resource_processor.vmss_porter.runner.ServiceBusClient") as mock: + yield mock + + +@pytest.fixture +def mock_default_credential(): + with patch("resource_processor.vmss_porter.runner.default_credentials") as mock: + yield mock + + +@pytest.fixture +def mock_auto_lock_renewer(): + with patch("resource_processor.vmss_porter.runner.AutoLockRenewer") as mock: + yield mock + + +@pytest.fixture +def mock_logger(): + with patch("resource_processor.vmss_porter.runner.logger") as mock: + yield mock + + +@pytest.mark.asyncio +@patch("resource_processor.vmss_porter.runner.get_config", return_value={"resource_request_queue": "test_queue", "service_bus_namespace": "test_namespace", "vmss_msi_id": "test_msi_id", "porter_env": {}}) +async def test_set_up_config(mock_get_config): + """Test setting up configuration.""" + config = set_up_config() + assert config == {"resource_request_queue": "test_queue", "service_bus_namespace": "test_namespace", "vmss_msi_id": "test_msi_id", "porter_env": {}} + + +async def setup_service_bus_client_and_credential(mock_service_bus_client, mock_default_credential, msi_id): + mock_credential = AsyncMock() + mock_default_credential.return_value.__aenter__.return_value = mock_credential + mock_service_bus_client_instance = mock_service_bus_client.return_value + return mock_service_bus_client_instance, mock_credential + + +@pytest.mark.asyncio +@patch("resource_processor.vmss_porter.runner.receive_message") +async def test_runner(mock_receive_message, mock_service_bus_client, mock_default_credential): + """Test runner with valid MSI ID.""" + mock_service_bus_client_instance, mock_credential = await setup_service_bus_client_and_credential(mock_service_bus_client, mock_default_credential, 'test_msi_id') + + config = {"vmss_msi_id": "test_msi_id", "service_bus_namespace": "test_namespace"} + + await runner(0, config) + + mock_default_credential.assert_called_once_with('test_msi_id') + mock_service_bus_client.assert_called_once_with("test_namespace", mock_credential) + mock_receive_message.assert_called_once_with(mock_service_bus_client_instance, config) + + +@pytest.mark.asyncio +@patch("resource_processor.vmss_porter.runner.receive_message") +async def test_runner_no_msi_id(mock_receive_message, mock_service_bus_client, mock_default_credential): + """Test runner with no MSI ID.""" + mock_service_bus_client_instance, mock_credential = await setup_service_bus_client_and_credential(mock_service_bus_client, mock_default_credential, None) + + config = {"vmss_msi_id": None, "service_bus_namespace": "test_namespace"} + + await runner(0, config) + + mock_default_credential.assert_called_once_with(None) + mock_service_bus_client.assert_called_once_with("test_namespace", mock_credential) + mock_receive_message.assert_called_once_with(mock_service_bus_client_instance, config) + + +@pytest.mark.asyncio +@patch("resource_processor.vmss_porter.runner.receive_message") +async def test_runner_exception(mock_receive_message, mock_service_bus_client, mock_default_credential): + """Test runner with an exception.""" + mock_service_bus_client_instance, mock_credential = await setup_service_bus_client_and_credential(mock_service_bus_client, mock_default_credential, 'test_msi_id') + mock_receive_message.side_effect = Exception("Test Exception") + + config = {"vmss_msi_id": "test_msi_id", "service_bus_namespace": "test_namespace"} + + with pytest.raises(Exception, match="Test Exception"): + await runner(0, config) + + mock_default_credential.assert_called_once_with('test_msi_id') + mock_service_bus_client.assert_called_once_with("test_namespace", mock_credential) + mock_receive_message.assert_called_once_with(mock_service_bus_client_instance, config) + + +@pytest.mark.asyncio +@patch("resource_processor.vmss_porter.runner.invoke_porter_action", return_value=True) +async def test_receive_message(mock_invoke_porter_action, mock_service_bus_client, mock_auto_lock_renewer): + mock_service_bus_client_instance = mock_service_bus_client.return_value + mock_auto_lock_renewer.return_value = AsyncMock() + + mock_receiver = AsyncMock() + mock_receiver.__aenter__.return_value = mock_receiver + mock_receiver.__aexit__.return_value = None + mock_receiver.session.session_id = "test_session_id" + mock_receiver.__aiter__.return_value = [AsyncMock()] + mock_receiver.__aiter__.return_value[0] = json.dumps({"id": "test_id", "action": "install", "stepId": "test_step_id", "operationId": "test_operation_id"}) + + mock_service_bus_client_instance.get_queue_receiver.return_value.__aenter__.return_value = mock_receiver + + run_once = Mock(side_effect=[True, False]) + + config = {"resource_request_queue": "test_queue"} + + await receive_message(mock_service_bus_client_instance, config, keep_running=run_once) + mock_receiver.complete_message.assert_called_once() + mock_service_bus_client_instance.get_queue_receiver.assert_called_once_with(queue_name="test_queue", max_wait_time=1, session_id=ServiceBusSessionFilter.NEXT_AVAILABLE) + + +@pytest.mark.asyncio +async def test_receive_message_unknown_exception(mock_auto_lock_renewer, mock_service_bus_client, mock_logger): + """Test receiving a message with an unknown exception.""" + mock_service_bus_client_instance = mock_service_bus_client.return_value + mock_auto_lock_renewer.return_value = AsyncMock() + + mock_receiver = AsyncMock() + mock_receiver.__aenter__.return_value = mock_receiver + mock_receiver.__aexit__.return_value = None + mock_receiver.session.session_id = "test_session_id" + mock_receiver.__aiter__.return_value = [AsyncMock()] + mock_receiver.__aiter__.return_value[0] = json.dumps({"id": "test_id", "action": "install", "stepId": "test_step_id", "operationId": "test_operation_id"}) + + mock_service_bus_client_instance.get_queue_receiver.return_value.__aenter__.return_value = mock_receiver + + run_once = Mock(side_effect=[True, False]) + + config = {"resource_request_queue": "test_queue"} + + with patch("resource_processor.vmss_porter.runner.receive_message", side_effect=Exception("Test Exception")): + await receive_message(mock_service_bus_client_instance, config, keep_running=run_once) + mock_logger.exception.assert_any_call("Unknown exception. Will retry...") + + +@pytest.mark.asyncio +@patch("resource_processor.vmss_porter.runner.build_porter_command", return_value=["porter install"]) +@patch("resource_processor.vmss_porter.runner.run_porter", return_value=(0, "stdout", "stderr")) +@patch("resource_processor.vmss_porter.runner.service_bus_message_generator", return_value="test_message") +async def test_invoke_porter_action(mock_service_bus_message_generator, mock_run_porter, mock_build_porter_command, mock_service_bus_client): + """Test invoking a porter action.""" + mock_sb_sender = AsyncMock() + mock_service_bus_client.get_queue_sender.return_value = mock_sb_sender + + config = {"deployment_status_queue": "test_queue"} + msg_body = {"id": "test_id", "action": "install", "stepId": "test_step_id", "operationId": "test_operation_id"} + + result = await invoke_porter_action(msg_body, mock_service_bus_client, config) + + assert result is True + mock_sb_sender.send_messages.assert_called() + + +@pytest.mark.asyncio +@patch("resource_processor.vmss_porter.runner.build_porter_command", return_value=["porter install"]) +@patch("resource_processor.vmss_porter.runner.run_porter", return_value=(1, "", "error")) +@patch("resource_processor.vmss_porter.runner.service_bus_message_generator", return_value="test_message") +async def test_invoke_porter_action_failure(mock_service_bus_message_generator, mock_run_porter, mock_build_porter_command, mock_service_bus_client): + """Test invoking a porter action with failure.""" + mock_sb_client = AsyncMock(spec=ServiceBusClient) + mock_sb_sender = AsyncMock() + mock_sb_client.get_queue_sender.return_value = mock_sb_sender + + config = {"deployment_status_queue": "test_queue"} + msg_body = {"id": "test_id", "action": "install", "stepId": "test_step_id", "operationId": "test_operation_id"} + + result = await invoke_porter_action(msg_body, mock_sb_client, config) + + assert result is False + mock_sb_sender.send_messages.assert_called() + + +@pytest.mark.asyncio +@patch("resource_processor.vmss_porter.runner.build_porter_command", return_value=["porter install"]) +@patch("resource_processor.vmss_porter.runner.run_porter", side_effect=[(1, "", "could not find installation"), (0, "", "")]) +@patch("resource_processor.vmss_porter.runner.service_bus_message_generator", return_value="test_message") +async def test_invoke_porter_action_upgrade_failure_install_success(mock_service_bus_message_generator, mock_run_porter, mock_build_porter_command, mock_service_bus_client): + """Test invoking a porter action with upgrade failure and install success.""" + mock_sb_client = AsyncMock(spec=ServiceBusClient) + mock_sb_sender = AsyncMock() + mock_sb_client.get_queue_sender.return_value = mock_sb_sender + + config = {"deployment_status_queue": "test_queue"} + msg_body = {"id": "test_id", "action": "upgrade", "stepId": "test_step_id", "operationId": "test_operation_id"} + + result = await invoke_porter_action(msg_body, mock_sb_client, config) + + assert result is True + mock_sb_sender.send_messages.assert_called() + + +@pytest.mark.asyncio +@patch("resource_processor.vmss_porter.runner.build_porter_command", return_value=["porter install"]) +@patch("resource_processor.vmss_porter.runner.run_porter", side_effect=[(1, "", "could not find installation"), (1, "", "installation failed")]) +@patch("resource_processor.vmss_porter.runner.service_bus_message_generator", return_value="test_message") +async def test_invoke_porter_action_upgrade_failure_install_failure(mock_service_bus_message_generator, mock_run_porter, mock_build_porter_command, mock_service_bus_client): + """Test invoking a porter action with upgrade and install failure.""" + mock_sb_client = AsyncMock(spec=ServiceBusClient) + mock_sb_sender = AsyncMock() + mock_sb_client.get_queue_sender.return_value = mock_sb_sender + + config = {"deployment_status_queue": "test_queue"} + msg_body = {"id": "test_id", "action": "upgrade", "stepId": "test_step_id", "operationId": "test_operation_id"} + + result = await invoke_porter_action(msg_body, mock_sb_client, config) + + assert result is False + mock_sb_sender.send_messages.assert_called() + + +@pytest.mark.asyncio +@patch("resource_processor.vmss_porter.runner.build_porter_command", return_value=["porter install"]) +@patch("resource_processor.vmss_porter.runner.run_porter", return_value=(1, "", "could not find installation")) +@patch("resource_processor.vmss_porter.runner.service_bus_message_generator", return_value="test_message") +async def test_invoke_porter_action_uninstall_failure(mock_service_bus_message_generator, mock_run_porter, mock_build_porter_command, mock_service_bus_client): + """Test invoking a porter action with uninstall failure.""" + mock_sb_client = AsyncMock(spec=ServiceBusClient) + mock_sb_sender = AsyncMock() + mock_sb_client.get_queue_sender.return_value = mock_sb_sender + + config = {"deployment_status_queue": "test_queue"} + msg_body = {"id": "test_id", "action": "uninstall", "stepId": "test_step_id", "operationId": "test_operation_id"} + + result = await invoke_porter_action(msg_body, mock_sb_client, config) + + assert result is True + mock_sb_sender.send_messages.assert_called() + + +@pytest.mark.asyncio +@patch("resource_processor.vmss_porter.runner.build_porter_command", return_value=["porter custom-action"]) +@patch("resource_processor.vmss_porter.runner.run_porter", return_value=(0, "stdout", "stderr")) +@patch("resource_processor.vmss_porter.runner.service_bus_message_generator", return_value="test_message") +async def test_invoke_porter_action_custom_action(mock_service_bus_message_generator, mock_run_porter, mock_build_porter_command, mock_service_bus_client): + """Test invoking a porter custom action.""" + mock_sb_client = AsyncMock(spec=ServiceBusClient) + mock_sb_sender = AsyncMock() + mock_sb_client.get_queue_sender.return_value = mock_sb_sender + + config = {"deployment_status_queue": "test_queue"} + msg_body = {"id": "test_id", "action": "custom-action", "stepId": "test_step_id", "operationId": "test_operation_id"} + + result = await invoke_porter_action(msg_body, mock_sb_client, config) + + assert result is True + mock_sb_sender.send_messages.assert_called() + + +@pytest.mark.asyncio +@patch("resource_processor.vmss_porter.runner.build_porter_command_for_outputs", return_value=["porter installations output list"]) +@patch("resource_processor.vmss_porter.runner.run_porter", return_value=(0, json.dumps([{"name": "output1", "value": "value1"}]), "stderr")) +async def test_get_porter_outputs(mock_run_porter, mock_build_porter_command_for_outputs): + """Test getting porter outputs.""" + config = {} + msg_body = {"id": "test_id", "action": "install"} + + success, outputs = await get_porter_outputs(msg_body, config) + + assert success is True + assert outputs == [{"name": "output1", "value": "value1"}] + + +@pytest.mark.asyncio +@patch("asyncio.sleep", new_callable=AsyncMock) +async def test_check_runners(_): + """Test checking runners.""" + mock_process = Mock() + mock_process.is_alive.return_value = False + processes = [mock_process] + mock_httpserver = AsyncMock() + + run_once = Mock(side_effect=[True, False]) + + await check_runners(processes, mock_httpserver, keep_running=run_once) + mock_httpserver.kill.assert_called_once() diff --git a/resource_processor/vmss_porter/runner.py b/resource_processor/vmss_porter/runner.py index 3de4ac06fe..6f6d6e21e1 100644 --- a/resource_processor/vmss_porter/runner.py +++ b/resource_processor/vmss_porter/runner.py @@ -4,14 +4,13 @@ import asyncio import logging import sys -from resources.commands import azure_acr_login_command, azure_login_command, build_porter_command, build_porter_command_for_outputs, apply_porter_credentials_sets_command +from helpers.commands import azure_acr_login_command, azure_login_command, build_porter_command, build_porter_command_for_outputs, apply_porter_credentials_sets_command from shared.config import get_config -from resources.helpers import get_installation_id -from resources.httpserver import start_server +from helpers.httpserver import start_server from shared.logging import initialize_logging, logger, shell_output_logger, tracer from shared.config import VERSION -from resources import statuses +from helpers import statuses from contextlib import asynccontextmanager from azure.servicebus import ServiceBusMessage, NEXT_AVAILABLE_SESSION from azure.servicebus.exceptions import OperationTimeoutError, ServiceBusConnectionError @@ -38,7 +37,7 @@ async def default_credentials(msi_id): await credential.close() -async def receive_message(service_bus_client, config: dict): +async def receive_message(service_bus_client, config: dict, keep_running=lambda: True): """ This method is run per process. Each process will connect to service bus and try to establish a session. If messages are there, the process will continue to receive all the messages associated with that session. @@ -46,7 +45,7 @@ async def receive_message(service_bus_client, config: dict): """ q_name = config["resource_request_queue"] - while True: + while keep_running(): try: logger.info("Looking for new session...") # max_wait_time=1 -> don't hold the session open after processing of the message has finished @@ -94,6 +93,7 @@ async def receive_message(service_bus_client, config: dict): except Exception: # Catch all other exceptions, log them via .exception to get the stack trace, sleep, and reconnect + logger.exception("Unknown exception. Will retry...") @@ -135,7 +135,7 @@ def service_bus_message_generator(sb_message: dict, status: str, deployment_mess """ Generate a resource request message """ - installation_id = get_installation_id(sb_message) + installation_id = sb_message["id"] message_dict = { "operationId": sb_message["operationId"], "stepId": sb_message["stepId"], @@ -156,7 +156,7 @@ async def invoke_porter_action(msg_body: dict, sb_client: ServiceBusClient, conf Handle resource message by invoking specified porter action (i.e. install, uninstall) """ - installation_id = get_installation_id(msg_body) + installation_id = msg_body["id"] action = msg_body["action"] logger.info(f"{action} action starting for {installation_id}...") sb_sender = sb_client.get_queue_sender(queue_name=config["deployment_status_queue"]) @@ -173,13 +173,25 @@ async def invoke_porter_action(msg_body: dict, sb_client: ServiceBusClient, conf logger.debug("Starting to run porter execution command...") returncode, _, err = await run_porter(porter_command, config) logger.debug("Finished running porter execution command.") - action_completed_without_error = True + + action_completed_without_error = False + + if returncode == 0: + action_completed_without_error = True # Handle command output if returncode != 0 and err is not None: error_message = "Error message: " + " ".join(err.split('\n')) + "; Command executed: " + " ".join(porter_command) action_completed_without_error = False + if "upgrade" == action and ("could not find installation" in err or "The installation cannot be upgraded, because it is not installed." in err): + logger.warning("Upgrade failed, attempting install...") + msg_body['action'] = "install" + porter_command = await build_porter_command(config, msg_body, False) + returncode, _, err = await run_porter(porter_command, config) + if returncode == 0: + action_completed_without_error = True + if "uninstall" == action and "could not find installation" in err: logger.warning("The installation doesn't exist. Treating as a successful action to allow the flow to proceed.") action_completed_without_error = True @@ -227,7 +239,8 @@ async def get_porter_outputs(msg_body: dict, config: dict): if returncode != 0: error_message = "Error context message = " + " ".join(err.split('\n')) - logger.info(f"{get_installation_id(msg_body)}: Failed to get outputs with error = {error_message}") + installation_id = msg_body["id"] + logger.info(f"{installation_id}: Failed to get outputs with error = {error_message}") return False, {} else: outputs_json = {} @@ -253,10 +266,10 @@ async def runner(process_number: int, config: dict): await receive_message(service_bus_client, config) -async def check_runners(processes: list, httpserver: Process): +async def check_runners(processes: list, httpserver: Process, keep_running=lambda: True): logger.info("Starting runners check...") - while True: + while keep_running(): await asyncio.sleep(30) if all(not process.is_alive() for process in processes): logger.error("All runner processes have failed!") From 0715eca9dfc0673a9a948d192233a0e3e0f76df7 Mon Sep 17 00:00:00 2001 From: Yuval Yaron <43217306+yuvalyaron@users.noreply.github.com> Date: Tue, 11 Feb 2025 18:02:24 +0200 Subject: [PATCH 06/18] Add moved block to handle firewall public IP count migration (#4357) Adds moved block to handle resource address change when adding count to the firewall's public IP resource, preventing unwanted recreation of the IP address --- templates/shared_services/firewall/porter.yaml | 2 +- templates/shared_services/firewall/terraform/firewall.tf | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/templates/shared_services/firewall/porter.yaml b/templates/shared_services/firewall/porter.yaml index d5759218a7..19e820c4d7 100644 --- a/templates/shared_services/firewall/porter.yaml +++ b/templates/shared_services/firewall/porter.yaml @@ -1,7 +1,7 @@ --- schemaVersion: 1.0.0 name: tre-shared-service-firewall -version: 1.3.1 +version: 1.3.2 description: "An Azure TRE Firewall shared service" dockerfile: Dockerfile.tmpl registry: azuretre diff --git a/templates/shared_services/firewall/terraform/firewall.tf b/templates/shared_services/firewall/terraform/firewall.tf index 3f42237a0f..6f4dedc816 100644 --- a/templates/shared_services/firewall/terraform/firewall.tf +++ b/templates/shared_services/firewall/terraform/firewall.tf @@ -11,7 +11,7 @@ resource "azurerm_public_ip" "fwtransit" { } moved { - from = azurerm_public_ip.fwpip + from = azurerm_public_ip.fwtransit to = azurerm_public_ip.fwtransit[0] } From 78f4a1351625ce9f1f6bd4fcc532e396bf989338 Mon Sep 17 00:00:00 2001 From: Ron Shakutai <58519179+ShakutaiGit@users.noreply.github.com> Date: Wed, 12 Feb 2025 01:57:30 +0200 Subject: [PATCH 07/18] Update Core Subnets Creation and upgrade core Terraform to 4.14.0 (#4255) --- CHANGELOG.md | 3 + core/terraform/.terraform.lock.hcl | 29 ++- core/terraform/cosmos_mongo.tf | 2 +- core/terraform/locals.tf | 15 +- core/terraform/main.tf | 2 +- core/terraform/migrate.sh | 110 ++++++--- core/terraform/network/.terraform.lock.hcl | 22 ++ core/terraform/network/locals.tf | 2 + core/terraform/network/main.tf | 2 +- core/terraform/network/network.tf | 210 ++++++++---------- .../network/network_security_groups.tf | 55 ----- core/terraform/network/outputs.tf | 20 +- core/terraform/statestore.tf | 3 +- core/version.txt | 2 +- 14 files changed, 231 insertions(+), 246 deletions(-) create mode 100644 core/terraform/network/.terraform.lock.hcl diff --git a/CHANGELOG.md b/CHANGELOG.md index 9cf9862269..879a3f47e5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,8 @@ ENHANCEMENTS: * Allow workspace App Service Plan SKU to be updated ([#4331](https://github.com/microsoft/AzureTRE/issues/4331)) * Remove public IP from TRE's firewall when forced tunneling is configured ([#4346](https://github.com/microsoft/AzureTRE/pull/4346)) +* Upgrade AzureRM Terraform provider from `3.117.0` to `4.14.0`. ([[#4255](https://github.com/microsoft/AzureTRE/pull/4255/)]) +* Subnet definitions are now inline in the `azurerm_virtual_network` resource, and NSG associations are set using `security_group` in each subnet block (no separate `azurerm_subnet_network_security_group_association` needed). ([[#4255](https://github.com/microsoft/AzureTRE/pull/4255/)]) BUG FIXES: * Fix upgrade when porter install has failed ([#4338](https://github.com/microsoft/AzureTRE/pull/4338)) @@ -78,6 +80,7 @@ BUG FIXES: * Recreate tre_output.json if empty. ([[#4292](https://github.com/microsoft/AzureTRE/issues/4292)]) * Ensure R directory is present before attempting to update package mirror URL ([#4332](https://github.com/microsoft/AzureTRE/pull/4332)) + COMPONENTS: | name | version | diff --git a/core/terraform/.terraform.lock.hcl b/core/terraform/.terraform.lock.hcl index 1c20359910..41d8da1a19 100644 --- a/core/terraform/.terraform.lock.hcl +++ b/core/terraform/.terraform.lock.hcl @@ -6,7 +6,6 @@ provider "registry.terraform.io/azure/azapi" { constraints = ">= 1.15.0, ~> 1.15.0" hashes = [ "h1:Y7ruMuPh8UJRTRl4rm+cdpGtmURx2taqiuqfYaH3o48=", - "h1:gIOgxVmFSxHrR+XOzgUEA+ybOmp8kxZlZH3eYeB/eFI=", "zh:0627a8bc77254debc25dc0c7b62e055138217c97b03221e593c3c56dc7550671", "zh:2fe045f07070ef75d0bec4b0595a74c14394daa838ddb964e2fd23cc98c40c34", "zh:343009f39c957883b2c06145a5954e524c70f93585f943f1ea3d28ef6995d0d0", @@ -23,22 +22,22 @@ provider "registry.terraform.io/azure/azapi" { } provider "registry.terraform.io/hashicorp/azurerm" { - version = "3.117.0" - constraints = ">= 3.117.0, 3.117.0" + version = "4.14.0" + constraints = ">= 3.117.0, 4.14.0" hashes = [ - "h1:Ynfg+Iy7x6K8M6W1AhqXCe3wkoiqIQhROlca7C3KC3w=", - "zh:2e25f47492366821a786762369f0e0921cc9452d64bfd5075f6fdfcf1a9c6d70", - "zh:41eb34f2f7469bf3eb1019dfb0e7fc28256f809824016f4f8b9d691bf473b2ac", - "zh:48bb9c87b3d928da1abc1d3db75453c9725de4674c612daf3800160cc7145d30", - "zh:5d6b0de0bbd78943fcc65c53944ef4496329e247f434c6eab86ed051c5cea67b", - "zh:78c9f6fdb1206a89cf0e6706b4f46178169a93b6c964a4cad8a321058ccbd9b4", - "zh:793b702c352589d4360b580d4a1cf654a7439d2ad6bdb7bfea91de07bc4b0fac", - "zh:7ed687ff0a5509463a592f97431863574fe5cc80a34e395be06766215b8c6285", - "zh:955ba18789bd15592824eb426a8d0f38595bd09fffc6939c1c58933489c1a71e", - "zh:bf5949a55be0714cd9c8815d472eae4baa48ba06d0f6bf2b96775869acda8a54", - "zh:da5d31f635abd2c645ffc76d6176d73f646128e73720cc368247cc424975c127", - "zh:eed5a66d59883c9c56729b0a964a2b60d758ea7489ef3e920a6fbd48518ce5f5", + "h1:FYZ9qh8i3X2gDmUTe1jJ/VzdSyjGjVmhBzv2R8D6CBo=", + "zh:05aaea16fc5f27b14d9fbad81654edf0638949ed3585576b2219c76a2bee095a", + "zh:065ce6ed16ba3fa7efcf77888ea582aead54e6a28f184c6701b73d71edd64bb0", + "zh:3c0cd17c249d18aa2e0120acb5f0c14810725158b379a67fec1331110e7c50df", + "zh:5a3ba3ffb2f1ce519fe3bf84a7296aa5862c437c70c62f0b0a5293bea9f2d01c", + "zh:7a8e9d72fa2714f4d567270b1761d4b4e788de7c15dada7db0cf0e29933185a2", + "zh:a11e190073f31c1238c15af29b9162e0f4564f6b0cd0310a3fa94102738450dc", + "zh:a5c004114410cc6dcb8fed584c9f3b84283b58025b0073a7e88d2bdb27840dfa", + "zh:a674a41db118e244eda7591e455d2ec338626664e0856e4125e909eb038f78db", + "zh:b5139010e4cbb2cb1a27c775610593c1c8063d3a7c82b00a65006509c434df2f", + "zh:cbb031223ccd8b099ac4d19b92641142f330b90f2fc6452843e445bae28f832c", "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + "zh:f7e7db1b94082a4ac3d4af3dabe7bbd335e1679305bf8e29d011f0ee440724ca", ] } diff --git a/core/terraform/cosmos_mongo.tf b/core/terraform/cosmos_mongo.tf index 65812cc8f1..6bb4ec4594 100644 --- a/core/terraform/cosmos_mongo.tf +++ b/core/terraform/cosmos_mongo.tf @@ -6,7 +6,7 @@ resource "azurerm_cosmosdb_account" "mongo" { kind = "MongoDB" automatic_failover_enabled = false mongo_server_version = 4.2 - ip_range_filter = "${local.azure_portal_cosmos_ips}${var.enable_local_debugging ? ",${local.myip}" : ""}" + ip_range_filter = local.cosmos_ip_filter_set capabilities { name = "EnableServerless" diff --git a/core/terraform/locals.tf b/core/terraform/locals.tf index 22d327f96f..15f066ae7b 100644 --- a/core/terraform/locals.tf +++ b/core/terraform/locals.tf @@ -14,7 +14,20 @@ locals { docker_registry_server = data.azurerm_container_registry.mgmt_acr.login_server # https://learn.microsoft.com/en-us/azure/cosmos-db/how-to-configure-firewall#allow-requests-from-the-azure-portal - azure_portal_cosmos_ips = "104.42.195.92,40.76.54.131,52.176.6.30,52.169.50.45,52.187.184.26" + + azure_portal_cosmos_ips_list = [ + "104.42.195.92", + "40.76.54.131", + "52.176.6.30", + "52.169.50.45", + "52.187.184.26" + ] + + cosmos_ip_filter_set = toset( + var.enable_local_debugging + ? concat(local.azure_portal_cosmos_ips_list, [local.myip]) + : local.azure_portal_cosmos_ips_list + ) # we define some zones in core despite not used by the core infra because # it's the easier way to make them available to other services in the system. diff --git a/core/terraform/main.tf b/core/terraform/main.tf index 4d6d910257..b231621603 100644 --- a/core/terraform/main.tf +++ b/core/terraform/main.tf @@ -3,7 +3,7 @@ terraform { required_providers { azurerm = { source = "hashicorp/azurerm" - version = "=3.117.0" + version = "=4.14.0" } random = { source = "hashicorp/random" diff --git a/core/terraform/migrate.sh b/core/terraform/migrate.sh index 3c88e2ed2d..5f64abeb15 100755 --- a/core/terraform/migrate.sh +++ b/core/terraform/migrate.sh @@ -5,16 +5,25 @@ set -o pipefail set -o nounset # set -o xtrace -# Configure AzureRM provider to user Azure AD to connect to storage accounts +get_resource_id() { + local json_data="$1" + local resource_addr="$2" + echo "$json_data" | jq -r --arg addr "$resource_addr" ' + def walk_resources: + (.resources[]?), + (.child_modules[]? | walk_resources); + .values.root_module | walk_resources | select(.address==$addr) | .values.id + ' +} + +# Configure AzureRM provider to use Azure AD to connect to storage accounts export ARM_STORAGE_USE_AZUREAD=true -# Configure AzureRM backend to user Azure AD to connect to storage accounts +# Configure AzureRM backend to use Azure AD to connect to storage accounts export ARM_USE_AZUREAD=true export ARM_USE_OIDC=true -# terraform_wrapper_path="../../devops/scripts/terraform_wrapper.sh" - -# This variables are loaded in for us +# These variables are loaded in for us # shellcheck disable=SC2154 terraform init -input=false -backend=true -reconfigure \ -backend-config="resource_group_name=${TF_VAR_mgmt_resource_group_name}" \ @@ -24,41 +33,68 @@ terraform init -input=false -backend=true -reconfigure \ echo "*** Migrating TF Resources... ***" +terraform refresh +# get TF state in JSON terraform_show_json=$(terraform show -json) -# Remove cnab-state legacy state path form state. Needs to be run before refresh, as refresh will fail. -state_store_legacy_path=$(echo "${terraform_show_json}" \ - | jq 'select(.values.root_module.resources != null) | .values.root_module.resources[] | select(.address=="azurerm_storage_share.storage_state_path") | .values.id') - -if [ -n "${state_store_legacy_path}" ]; then - echo -e "\n\e[96mRemoving legacy state path from TF state\e[0m..." - terraform state rm azurerm_storage_share.storage_state_path -fi - -# terraform show might fail if provider schema has changed. Since we don't call apply at this stage a refresh is needed -terraform refresh +# List of resource addresses to remove. +declare -a RESOURCES_TO_REMOVE=( + "module.network.azurerm_subnet_network_security_group_association.bastion" + "module.network.azurerm_subnet_network_security_group_association.app_gw" + "module.network.azurerm_subnet_network_security_group_association.shared" + "module.network.azurerm_subnet_network_security_group_association.web_app" + "module.network.azurerm_subnet_network_security_group_association.resource_processor" + "module.network.azurerm_subnet_network_security_group_association.airlock_processor" + "module.network.azurerm_subnet_network_security_group_association.airlock_notification" + "module.network.azurerm_subnet_network_security_group_association.airlock_storage" + "module.network.azurerm_subnet_network_security_group_association.airlock_events" + "module.network.azurerm_subnet.bastion" + "module.network.azurerm_subnet.azure_firewall" + "module.network.azurerm_subnet.app_gw" + "module.network.azurerm_subnet.web_app" + "module.network.azurerm_subnet.shared" + "module.network.azurerm_subnet.resource_processor" + "module.network.azurerm_subnet.airlock_processor" + "module.network.azurerm_subnet.airlock_notification" + "module.network.azurerm_subnet.airlock_storage" + "module.network.azurerm_subnet.airlock_events" + "module.network.azurerm_subnet.firewall_management" +) +vnet_address="module.network.azurerm_virtual_network.core" -# 1. Check we have a root_module in state -# 2. Grab the Resource ID -# 3. Delete the old resource from state -# 4. Import the new resource type in using the existing Azure Resource ID +# Check if migration is needed +migration_needed=0 +for resource in "${RESOURCES_TO_REMOVE[@]}"; do + resource_id=$(get_resource_id "${terraform_show_json}" "$resource") + if [ -n "$resource_id" ] && [ "$resource_id" != "null" ]; then + migration_needed=1 + break + fi +done -terraform_show_json=$(terraform show -json) +# Remove old resources +if [ "$migration_needed" -eq 1 ]; then + for resource in "${RESOURCES_TO_REMOVE[@]}"; do + resource_id=$(get_resource_id "${terraform_show_json}" "$resource") + if [ -n "$resource_id" ] && [ "$resource_id" != "null" ]; then + terraform state rm "$resource" + else + echo "Resource that was supposed to be removed not found in state: ${resource}" + fi + done -# example migration -# # azurerm_app_service_plan -> azurerm_service_plan -# core_app_service_plan_id=$(echo "${terraform_show_json}" \ -# | jq -r 'select(.values.root_module.resources != null) | .values.root_module.resources[] | select(.address=="azurerm_app_service_plan.core") | .values.id') -# if [ -n "${core_app_service_plan_id}" ]; then -# echo "Migrating ${core_app_service_plan_id}" -# terraform state rm azurerm_app_service_plan.core -# if [[ $(az resource list --query "[?id=='${core_app_service_plan_id}'] | length(@)") == 0 ]]; -# then -# echo "The resource doesn't exist on Azure. Skipping importing it back to state." -# else -# terraform import azurerm_service_plan.core "${core_app_service_plan_id}" -# fi -# fi - -echo "*** Migration is done. ***" + # Remove and re-import the VNet + vnet_address="module.network.azurerm_virtual_network.core" + vnet_id=$(get_resource_id "${terraform_show_json}" "$vnet_address" "vnet") + if [ -n "${vnet_id}" ] && [ "${vnet_id}" != "null" ]; then + terraform state rm "${vnet_address}" + terraform import "${vnet_address}" "${vnet_id}" + else + echo "VNet resource not found in state: ${vnet_address}" + fi + echo "*** Migration Done ***" +else + echo "No old resources found in the state, skipping migration." + echo "*** Migration Skipped ***" +fi diff --git a/core/terraform/network/.terraform.lock.hcl b/core/terraform/network/.terraform.lock.hcl new file mode 100644 index 0000000000..ec690305f6 --- /dev/null +++ b/core/terraform/network/.terraform.lock.hcl @@ -0,0 +1,22 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/azurerm" { + version = "4.14.0" + constraints = "4.14.0" + hashes = [ + "h1:FYZ9qh8i3X2gDmUTe1jJ/VzdSyjGjVmhBzv2R8D6CBo=", + "zh:05aaea16fc5f27b14d9fbad81654edf0638949ed3585576b2219c76a2bee095a", + "zh:065ce6ed16ba3fa7efcf77888ea582aead54e6a28f184c6701b73d71edd64bb0", + "zh:3c0cd17c249d18aa2e0120acb5f0c14810725158b379a67fec1331110e7c50df", + "zh:5a3ba3ffb2f1ce519fe3bf84a7296aa5862c437c70c62f0b0a5293bea9f2d01c", + "zh:7a8e9d72fa2714f4d567270b1761d4b4e788de7c15dada7db0cf0e29933185a2", + "zh:a11e190073f31c1238c15af29b9162e0f4564f6b0cd0310a3fa94102738450dc", + "zh:a5c004114410cc6dcb8fed584c9f3b84283b58025b0073a7e88d2bdb27840dfa", + "zh:a674a41db118e244eda7591e455d2ec338626664e0856e4125e909eb038f78db", + "zh:b5139010e4cbb2cb1a27c775610593c1c8063d3a7c82b00a65006509c434df2f", + "zh:cbb031223ccd8b099ac4d19b92641142f330b90f2fc6452843e445bae28f832c", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + "zh:f7e7db1b94082a4ac3d4af3dabe7bbd335e1679305bf8e29d011f0ee440724ca", + ] +} diff --git a/core/terraform/network/locals.tf b/core/terraform/network/locals.tf index aaa2aea7d1..82ae26fb2d 100644 --- a/core/terraform/network/locals.tf +++ b/core/terraform/network/locals.tf @@ -32,4 +32,6 @@ locals { "privatelink.queue.core.windows.net", "privatelink.table.core.windows.net" ]) + + subnet_ids_map = { for subnet in azurerm_virtual_network.core.subnet : subnet.name => subnet.id } } diff --git a/core/terraform/network/main.tf b/core/terraform/network/main.tf index a4eb095f9c..5cced47bb0 100644 --- a/core/terraform/network/main.tf +++ b/core/terraform/network/main.tf @@ -3,7 +3,7 @@ terraform { required_providers { azurerm = { source = "hashicorp/azurerm" - version = ">= 3.117" + version = ">= 4.14.0" } } } diff --git a/core/terraform/network/network.tf b/core/terraform/network/network.tf index db71fe554f..a511365326 100644 --- a/core/terraform/network/network.tf +++ b/core/terraform/network/network.tf @@ -5,146 +5,112 @@ resource "azurerm_virtual_network" "core" { address_space = [var.core_address_space] tags = local.tre_core_tags lifecycle { ignore_changes = [tags] } -} -resource "azurerm_subnet" "bastion" { - name = "AzureBastionSubnet" - virtual_network_name = azurerm_virtual_network.core.name - resource_group_name = var.resource_group_name - address_prefixes = [local.bastion_subnet_address_prefix] -} + subnet { + name = "AzureBastionSubnet" + address_prefixes = [local.bastion_subnet_address_prefix] + security_group = azurerm_network_security_group.bastion.id + } -resource "azurerm_subnet" "azure_firewall" { - name = "AzureFirewallSubnet" - virtual_network_name = azurerm_virtual_network.core.name - resource_group_name = var.resource_group_name - address_prefixes = [local.firewall_subnet_address_space] - depends_on = [azurerm_subnet.bastion] -} + subnet { + name = "AzureFirewallSubnet" + address_prefixes = [local.firewall_subnet_address_space] + } -resource "azurerm_subnet" "app_gw" { - name = "AppGwSubnet" - virtual_network_name = azurerm_virtual_network.core.name - resource_group_name = var.resource_group_name - address_prefixes = [local.app_gw_subnet_address_prefix] - private_endpoint_network_policies = "Disabled" - private_link_service_network_policies_enabled = true - depends_on = [azurerm_subnet.azure_firewall] -} + subnet { + name = "AppGwSubnet" + address_prefixes = [local.app_gw_subnet_address_prefix] + private_endpoint_network_policies = "Disabled" + private_link_service_network_policies_enabled = true + security_group = azurerm_network_security_group.app_gw.id + } -resource "azurerm_subnet" "web_app" { - name = "WebAppSubnet" - virtual_network_name = azurerm_virtual_network.core.name - resource_group_name = var.resource_group_name - address_prefixes = [local.web_app_subnet_address_prefix] - private_endpoint_network_policies = "Disabled" - private_link_service_network_policies_enabled = true - depends_on = [azurerm_subnet.app_gw] - - delegation { - name = "delegation" - - service_delegation { - name = "Microsoft.Web/serverFarms" - actions = ["Microsoft.Network/virtualNetworks/subnets/action"] + subnet { + name = "WebAppSubnet" + address_prefixes = [local.web_app_subnet_address_prefix] + private_endpoint_network_policies = "Disabled" + private_link_service_network_policies_enabled = true + security_group = azurerm_network_security_group.default_rules.id + + delegation { + name = "delegation" + + service_delegation { + name = "Microsoft.Web/serverFarms" + actions = ["Microsoft.Network/virtualNetworks/subnets/action"] + } } } -} -resource "azurerm_subnet" "shared" { - name = "SharedSubnet" - virtual_network_name = azurerm_virtual_network.core.name - resource_group_name = var.resource_group_name - address_prefixes = [local.shared_services_subnet_address_prefix] - # notice that private endpoints do not adhere to NSG rules - private_endpoint_network_policies = "Disabled" - depends_on = [azurerm_subnet.web_app] -} + subnet { + name = "SharedSubnet" + address_prefixes = [local.shared_services_subnet_address_prefix] + private_endpoint_network_policies = "Disabled" + security_group = azurerm_network_security_group.default_rules.id + } -resource "azurerm_subnet" "resource_processor" { - name = "ResourceProcessorSubnet" - virtual_network_name = azurerm_virtual_network.core.name - resource_group_name = var.resource_group_name - address_prefixes = [local.resource_processor_subnet_address_prefix] - # notice that private endpoints do not adhere to NSG rules - private_endpoint_network_policies = "Disabled" - depends_on = [azurerm_subnet.shared] -} + subnet { + name = "ResourceProcessorSubnet" + address_prefixes = [local.resource_processor_subnet_address_prefix] + private_endpoint_network_policies = "Disabled" + security_group = azurerm_network_security_group.default_rules.id + } + + subnet { + name = "AirlockProcessorSubnet" + address_prefixes = [local.airlock_processor_subnet_address_prefix] + private_endpoint_network_policies = "Disabled" + security_group = azurerm_network_security_group.default_rules.id -resource "azurerm_subnet" "airlock_processor" { - name = "AirlockProcessorSubnet" - virtual_network_name = azurerm_virtual_network.core.name - resource_group_name = var.resource_group_name - address_prefixes = [local.airlock_processor_subnet_address_prefix] - # notice that private endpoints do not adhere to NSG rules - private_endpoint_network_policies = "Disabled" - depends_on = [azurerm_subnet.resource_processor] - - delegation { - name = "delegation" - - service_delegation { - name = "Microsoft.Web/serverFarms" - actions = ["Microsoft.Network/virtualNetworks/subnets/action"] + delegation { + name = "delegation" + + service_delegation { + name = "Microsoft.Web/serverFarms" + actions = ["Microsoft.Network/virtualNetworks/subnets/action"] + } } + + service_endpoints = ["Microsoft.Storage"] } - # Todo: needed as we want to open the fw for this subnet in some of the airlock storages (export inprogress) - # https://github.com/microsoft/AzureTRE/issues/2098 - service_endpoints = ["Microsoft.Storage"] -} + subnet { + name = "AirlockNotifiactionSubnet" + address_prefixes = [local.airlock_notifications_subnet_address_prefix] + private_endpoint_network_policies = "Disabled" + security_group = azurerm_network_security_group.default_rules.id + + delegation { + name = "delegation" -resource "azurerm_subnet" "airlock_notification" { - name = "AirlockNotifiactionSubnet" - virtual_network_name = azurerm_virtual_network.core.name - resource_group_name = var.resource_group_name - address_prefixes = [local.airlock_notifications_subnet_address_prefix] - # notice that private endpoints do not adhere to NSG rules - private_endpoint_network_policies = "Disabled" - depends_on = [azurerm_subnet.airlock_processor] - - delegation { - name = "delegation" - - service_delegation { - name = "Microsoft.Web/serverFarms" - actions = ["Microsoft.Network/virtualNetworks/subnets/action"] + service_delegation { + name = "Microsoft.Web/serverFarms" + actions = ["Microsoft.Network/virtualNetworks/subnets/action"] + } } + service_endpoints = ["Microsoft.ServiceBus"] } - service_endpoints = ["Microsoft.ServiceBus"] -} -resource "azurerm_subnet" "airlock_storage" { - name = "AirlockStorageSubnet" - virtual_network_name = azurerm_virtual_network.core.name - resource_group_name = var.resource_group_name - address_prefixes = [local.airlock_storage_subnet_address_prefix] - # notice that private endpoints do not adhere to NSG rules - private_endpoint_network_policies = "Disabled" - depends_on = [azurerm_subnet.airlock_notification] -} + subnet { + name = "AirlockStorageSubnet" + address_prefixes = [local.airlock_storage_subnet_address_prefix] + private_endpoint_network_policies = "Disabled" + security_group = azurerm_network_security_group.default_rules.id + } -resource "azurerm_subnet" "airlock_events" { - name = "AirlockEventsSubnet" - virtual_network_name = azurerm_virtual_network.core.name - resource_group_name = var.resource_group_name - address_prefixes = [local.airlock_events_subnet_address_prefix] - # notice that private endpoints do not adhere to NSG rules - private_endpoint_network_policies = "Disabled" - depends_on = [azurerm_subnet.airlock_storage] - - # Eventgrid CAN'T send messages over private endpoints, hence we need to allow service endpoints to the service bus - # We are using service endpoints + managed identity to send these messaages - # https://docs.microsoft.com/en-us/azure/event-grid/consume-private-endpoints - service_endpoints = ["Microsoft.ServiceBus"] -} + subnet { + name = "AirlockEventsSubnet" + address_prefixes = [local.airlock_events_subnet_address_prefix] + private_endpoint_network_policies = "Disabled" + security_group = azurerm_network_security_group.default_rules.id -resource "azurerm_subnet" "firewall_management" { - name = "AzureFirewallManagementSubnet" - virtual_network_name = azurerm_virtual_network.core.name - resource_group_name = var.resource_group_name - address_prefixes = [local.firewall_management_subnet_address_prefix] - depends_on = [azurerm_subnet.airlock_events] + service_endpoints = ["Microsoft.ServiceBus"] + } + + subnet { + name = "AzureFirewallManagementSubnet" + address_prefixes = [local.firewall_management_subnet_address_prefix] + } } resource "azurerm_ip_group" "resource_processor" { diff --git a/core/terraform/network/network_security_groups.tf b/core/terraform/network/network_security_groups.tf index 50accf846b..34371dc145 100644 --- a/core/terraform/network/network_security_groups.tf +++ b/core/terraform/network/network_security_groups.tf @@ -105,13 +105,6 @@ resource "azurerm_network_security_group" "bastion" { lifecycle { ignore_changes = [tags] } } -resource "azurerm_subnet_network_security_group_association" "bastion" { - subnet_id = azurerm_subnet.bastion.id - network_security_group_id = azurerm_network_security_group.bastion.id - # depend on the last subnet we created in the vnet - depends_on = [azurerm_subnet.firewall_management] -} - # Network security group for Application Gateway # See https://docs.microsoft.com/azure/application-gateway/configuration-infrastructure#network-security-groups resource "azurerm_network_security_group" "app_gw" { @@ -147,12 +140,6 @@ resource "azurerm_network_security_group" "app_gw" { lifecycle { ignore_changes = [tags] } } -resource "azurerm_subnet_network_security_group_association" "app_gw" { - subnet_id = azurerm_subnet.app_gw.id - network_security_group_id = azurerm_network_security_group.app_gw.id - depends_on = [azurerm_subnet_network_security_group_association.bastion] -} - # Network security group with only default security rules # See https://docs.microsoft.com/azure/virtual-network/network-security-groups-overview#default-security-rules resource "azurerm_network_security_group" "default_rules" { @@ -163,45 +150,3 @@ resource "azurerm_network_security_group" "default_rules" { lifecycle { ignore_changes = [tags] } } - -resource "azurerm_subnet_network_security_group_association" "shared" { - subnet_id = azurerm_subnet.shared.id - network_security_group_id = azurerm_network_security_group.default_rules.id - depends_on = [azurerm_subnet_network_security_group_association.app_gw] -} - -resource "azurerm_subnet_network_security_group_association" "web_app" { - subnet_id = azurerm_subnet.web_app.id - network_security_group_id = azurerm_network_security_group.default_rules.id - depends_on = [azurerm_subnet_network_security_group_association.shared] -} - -resource "azurerm_subnet_network_security_group_association" "resource_processor" { - subnet_id = azurerm_subnet.resource_processor.id - network_security_group_id = azurerm_network_security_group.default_rules.id - depends_on = [azurerm_subnet_network_security_group_association.web_app] -} - -resource "azurerm_subnet_network_security_group_association" "airlock_processor" { - subnet_id = azurerm_subnet.airlock_processor.id - network_security_group_id = azurerm_network_security_group.default_rules.id - depends_on = [azurerm_subnet_network_security_group_association.resource_processor] -} - -resource "azurerm_subnet_network_security_group_association" "airlock_storage" { - subnet_id = azurerm_subnet.airlock_storage.id - network_security_group_id = azurerm_network_security_group.default_rules.id - depends_on = [azurerm_subnet_network_security_group_association.airlock_processor] -} - -resource "azurerm_subnet_network_security_group_association" "airlock_events" { - subnet_id = azurerm_subnet.airlock_events.id - network_security_group_id = azurerm_network_security_group.default_rules.id - depends_on = [azurerm_subnet_network_security_group_association.airlock_storage] -} - -resource "azurerm_subnet_network_security_group_association" "airlock_notification" { - subnet_id = azurerm_subnet.airlock_notification.id - network_security_group_id = azurerm_network_security_group.default_rules.id - depends_on = [azurerm_subnet_network_security_group_association.airlock_events] -} diff --git a/core/terraform/network/outputs.tf b/core/terraform/network/outputs.tf index 3e0aab407d..e2a7fba134 100644 --- a/core/terraform/network/outputs.tf +++ b/core/terraform/network/outputs.tf @@ -3,43 +3,43 @@ output "core_vnet_id" { } output "bastion_subnet_id" { - value = azurerm_subnet.bastion.id + value = local.subnet_ids_map["AzureBastionSubnet"] } output "azure_firewall_subnet_id" { - value = azurerm_subnet.azure_firewall.id + value = local.subnet_ids_map["AzureFirewallSubnet"] } output "app_gw_subnet_id" { - value = azurerm_subnet.app_gw.id + value = local.subnet_ids_map["AppGwSubnet"] } output "web_app_subnet_id" { - value = azurerm_subnet.web_app.id + value = local.subnet_ids_map["WebAppSubnet"] } output "shared_subnet_id" { - value = azurerm_subnet.shared.id + value = local.subnet_ids_map["SharedSubnet"] } output "airlock_processor_subnet_id" { - value = azurerm_subnet.airlock_processor.id + value = local.subnet_ids_map["AirlockProcessorSubnet"] } output "airlock_storage_subnet_id" { - value = azurerm_subnet.airlock_storage.id + value = local.subnet_ids_map["AirlockStorageSubnet"] } output "airlock_events_subnet_id" { - value = azurerm_subnet.airlock_events.id + value = local.subnet_ids_map["AirlockEventsSubnet"] } output "resource_processor_subnet_id" { - value = azurerm_subnet.resource_processor.id + value = local.subnet_ids_map["ResourceProcessorSubnet"] } output "airlock_notification_subnet_id" { - value = azurerm_subnet.airlock_notification.id + value = local.subnet_ids_map["AirlockNotifiactionSubnet"] } # DNS Zones diff --git a/core/terraform/statestore.tf b/core/terraform/statestore.tf index 66748fda58..ec8950084e 100644 --- a/core/terraform/statestore.tf +++ b/core/terraform/statestore.tf @@ -5,10 +5,9 @@ resource "azurerm_cosmosdb_account" "tre_db_account" { offer_type = "Standard" kind = "GlobalDocumentDB" automatic_failover_enabled = false - ip_range_filter = "${local.azure_portal_cosmos_ips}${var.enable_local_debugging ? ",${local.myip}" : ""}" + ip_range_filter = local.cosmos_ip_filter_set local_authentication_disabled = true tags = local.tre_core_tags - dynamic "capabilities" { # We can't change an existing cosmos for_each = var.is_cosmos_defined_throughput ? [] : [1] diff --git a/core/version.txt b/core/version.txt index 836582489b..ea370a8e55 100644 --- a/core/version.txt +++ b/core/version.txt @@ -1 +1 @@ -__version__ = "0.11.23" +__version__ = "0.12.0" From 72e98c370ee35b0f7c1c599460b2172c578d9e90 Mon Sep 17 00:00:00 2001 From: Marcus Robinson Date: Wed, 12 Feb 2025 15:47:24 +0000 Subject: [PATCH 08/18] Add requests endpoint to core TRE API (#4352) --- CHANGELOG.md | 1 + api_app/_version.py | 2 +- api_app/api/routes/api.py | 5 +- api_app/api/routes/requests.py | 38 ++ api_app/db/repositories/airlock_requests.py | 49 ++- api_app/resources/strings.py | 2 + .../test_api/test_routes/test_requests.py | 42 +++ .../test_airlock_request_repository.py | 196 +++++++++- ui/app/package.json | 2 +- ui/app/src/components/root/LeftNav.tsx | 36 +- ui/app/src/components/root/RootLayout.tsx | 63 ++-- ui/app/src/components/shared/RequestsList.tsx | 339 ++++++++++++++++++ ui/app/src/models/apiEndpoints.ts | 1 + 13 files changed, 728 insertions(+), 48 deletions(-) create mode 100644 api_app/api/routes/requests.py create mode 100644 api_app/tests_ma/test_api/test_routes/test_requests.py create mode 100644 ui/app/src/components/shared/RequestsList.tsx diff --git a/CHANGELOG.md b/CHANGELOG.md index 879a3f47e5..d451bec8fd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ ENHANCEMENTS: * Allow workspace App Service Plan SKU to be updated ([#4331](https://github.com/microsoft/AzureTRE/issues/4331)) +* Add core requests endpoint and UI to enable requests to be managed TRE wide. ([[#2510](https://github.com/microsoft/AzureTRE/issues/2510)]) * Remove public IP from TRE's firewall when forced tunneling is configured ([#4346](https://github.com/microsoft/AzureTRE/pull/4346)) * Upgrade AzureRM Terraform provider from `3.117.0` to `4.14.0`. ([[#4255](https://github.com/microsoft/AzureTRE/pull/4255/)]) * Subnet definitions are now inline in the `azurerm_virtual_network` resource, and NSG associations are set using `security_group` in each subnet block (no separate `azurerm_subnet_network_security_group_association` needed). ([[#4255](https://github.com/microsoft/AzureTRE/pull/4255/)]) diff --git a/api_app/_version.py b/api_app/_version.py index 8b8252f484..6a726d853b 100644 --- a/api_app/_version.py +++ b/api_app/_version.py @@ -1 +1 @@ -__version__ = "0.20.4" +__version__ = "0.21.0" diff --git a/api_app/api/routes/api.py b/api_app/api/routes/api.py index 0dd1a7d2c5..6dabc88275 100644 --- a/api_app/api/routes/api.py +++ b/api_app/api/routes/api.py @@ -8,7 +8,7 @@ from api.helpers import get_repository from db.repositories.workspaces import WorkspaceRepository from api.routes import health, ping, workspaces, workspace_templates, workspace_service_templates, user_resource_templates, \ - shared_services, shared_service_templates, migrations, costs, airlock, operations, metadata + shared_services, shared_service_templates, migrations, costs, airlock, operations, metadata, requests from core import config from resources import strings @@ -49,6 +49,7 @@ core_router.include_router(migrations.migrations_core_router, tags=["migrations"]) core_router.include_router(costs.costs_core_router, tags=["costs"]) core_router.include_router(costs.costs_workspace_router, tags=["costs"]) +core_router.include_router(requests.router, tags=["requests"]) core_swagger_router = APIRouter() swagger_disabled_router = APIRouter() @@ -112,7 +113,7 @@ async def get_disabled_swagger(): def get_scope(workspace) -> str: # Cope with the fact that scope id can have api:// at the front. - return f"api://{workspace.properties['scope_id'].replace('api://','')}/user_impersonation" + return f"api://{workspace.properties['scope_id'].replace('api://', '')}/user_impersonation" @workspace_swagger_router.get("/workspaces/{workspace_id}/openapi.json", include_in_schema=False, name="openapi_definitions") diff --git a/api_app/api/routes/requests.py b/api_app/api/routes/requests.py new file mode 100644 index 0000000000..1fffa6f356 --- /dev/null +++ b/api_app/api/routes/requests.py @@ -0,0 +1,38 @@ +from fastapi import APIRouter, Depends, HTTPException, status as status_code +from typing import List, Optional + +from api.helpers import get_repository +from resources import strings +from db.repositories.airlock_requests import AirlockRequestRepository +from models.domain.airlock_request import AirlockRequest, AirlockRequestStatus, AirlockRequestType +from services.authentication import get_current_tre_user_or_tre_admin + +router = APIRouter(dependencies=[Depends(get_current_tre_user_or_tre_admin)]) + + +@router.get("/requests", response_model=List[AirlockRequest], name=strings.API_LIST_REQUESTS) +async def get_requests( + user=Depends(get_current_tre_user_or_tre_admin), + airlock_request_repo: AirlockRequestRepository = Depends(get_repository(AirlockRequestRepository)), + airlock_manager: bool = False, + creator_user_id: Optional[str] = None, type: Optional[AirlockRequestType] = None, status: Optional[AirlockRequestStatus] = None, + order_by: Optional[str] = None, order_ascending: bool = True +) -> List[AirlockRequest]: + try: + if not airlock_manager: + requests = await airlock_request_repo.get_airlock_requests( + creator_user_id=creator_user_id or user.id, + type=type, + status=status, + order_by=order_by, + order_ascending=order_ascending, + ) + else: + requests = await airlock_request_repo.get_airlock_requests_for_airlock_manager(user) + + return requests + + except ValueError as ve: + raise HTTPException(status_code=status_code.HTTP_400_BAD_REQUEST, detail=str(ve)) + except Exception as e: + raise HTTPException(status_code=status_code.HTTP_500_INTERNAL_SERVER_ERROR, detail=str(e)) diff --git a/api_app/db/repositories/airlock_requests.py b/api_app/db/repositories/airlock_requests.py index f4a1926348..8bb8275aa6 100644 --- a/api_app/db/repositories/airlock_requests.py +++ b/api_app/db/repositories/airlock_requests.py @@ -7,6 +7,8 @@ from azure.cosmos.exceptions import CosmosResourceNotFoundError, CosmosAccessConditionFailedError from fastapi import HTTPException, status from pydantic import parse_obj_as +from db.repositories.workspaces import WorkspaceRepository +from services.authentication import get_access_service from models.domain.authentication import User from db.errors import EntityDoesNotExist from models.domain.airlock_request import AirlockFile, AirlockRequest, AirlockRequestStatus, \ @@ -107,27 +109,33 @@ def create_airlock_request_item(self, airlock_request_input: AirlockRequestInCre return airlock_request - async def get_airlock_requests(self, workspace_id: str, creator_user_id: Optional[str] = None, type: Optional[AirlockRequestType] = None, status: Optional[AirlockRequestStatus] = None, order_by: Optional[str] = None, order_ascending=True) -> List[AirlockRequest]: - query = self.airlock_requests_query() + f' WHERE c.workspaceId = "{workspace_id}"' + async def get_airlock_requests(self, workspace_id: Optional[str] = None, creator_user_id: Optional[str] = None, type: Optional[AirlockRequestType] = None, status: Optional[AirlockRequestStatus] = None, order_by: Optional[str] = None, order_ascending=True) -> List[AirlockRequest]: + query = self.airlock_requests_query() # optional filters + conditions = [] + parameters = [] + if workspace_id: + conditions.append('c.workspaceId=@workspace_id') + parameters.append({"name": "@workspace_id", "value": workspace_id}) if creator_user_id: - query += ' AND c.createdBy.id=@user_id' + conditions.append('c.createdBy.id=@user_id') + parameters.append({"name": "@user_id", "value": creator_user_id}) if status: - query += ' AND c.status=@status' + conditions.append('c.status=@status') + parameters.append({"name": "@status", "value": status}) if type: - query += ' AND c.type=@type' + conditions.append('c.type=@type') + parameters.append({"name": "@type", "value": type}) + + if conditions: + query += ' WHERE ' + ' AND '.join(conditions) # optional sorting if order_by: query += ' ORDER BY c.' + order_by query += ' ASC' if order_ascending else ' DESC' - parameters = [ - {"name": "@user_id", "value": creator_user_id}, - {"name": "@status", "value": status}, - {"name": "@type", "value": type}, - ] airlock_requests = await self.query(query=query, parameters=parameters) return parse_obj_as(List[AirlockRequest], airlock_requests) @@ -138,6 +146,27 @@ async def get_airlock_request_by_id(self, airlock_request_id: UUID4) -> AirlockR raise EntityDoesNotExist return parse_obj_as(AirlockRequest, airlock_requests) + async def get_airlock_requests_for_airlock_manager(self, user: User, type: Optional[AirlockRequestType] = None, status: Optional[AirlockRequestStatus] = None, order_by: Optional[str] = None, order_ascending=True) -> List[AirlockRequest]: + workspace_repo = await WorkspaceRepository.create() + access_service = get_access_service() + + workspaces = await workspace_repo.get_active_workspaces() + user_role_assignments = access_service.get_identity_role_assignments(user.id) + + valid_roles = {ra.role_id for ra in user_role_assignments} + + workspace_ids = [ + workspace.id + for workspace in workspaces + if workspace.properties["app_role_id_workspace_airlock_manager"] in valid_roles + ] + requests = [] + + for workspace_id in workspace_ids: + requests += await self.get_airlock_requests(workspace_id=workspace_id, type=type, status=status, order_by=order_by, order_ascending=order_ascending) + + return requests + async def update_airlock_request( self, original_request: AirlockRequest, diff --git a/api_app/resources/strings.py b/api_app/resources/strings.py index a9184cf289..896b385f85 100644 --- a/api_app/resources/strings.py +++ b/api_app/resources/strings.py @@ -34,6 +34,8 @@ API_UPDATE_USER_RESOURCE = "Update an existing user resource" API_INVOKE_ACTION_ON_USER_RESOURCE = "Invoke action on a user resource" +API_LIST_REQUESTS = "Get requests" + API_CREATE_AIRLOCK_REQUEST = "Create an airlock request" API_GET_AIRLOCK_REQUEST = "Get an airlock request" API_LIST_AIRLOCK_REQUESTS = "Get all airlock requests for a workspace" diff --git a/api_app/tests_ma/test_api/test_routes/test_requests.py b/api_app/tests_ma/test_api/test_routes/test_requests.py new file mode 100644 index 0000000000..4a10544ca1 --- /dev/null +++ b/api_app/tests_ma/test_api/test_routes/test_requests.py @@ -0,0 +1,42 @@ +import pytest +from fastapi import status +from mock import patch + +from resources import strings +from services.authentication import get_current_tre_user_or_tre_admin + + +pytestmark = pytest.mark.asyncio + + +class TestRequestsThatDontRequireAdminRigths: + @pytest.fixture(autouse=True, scope='class') + def log_in_with_non_admin_user(self, app, non_admin_user): + with patch('services.aad_authentication.AzureADAuthorization._get_user_from_token', return_value=non_admin_user()): + app.dependency_overrides[get_current_tre_user_or_tre_admin] = non_admin_user + yield + app.dependency_overrides = {} + + # [GET] /requests/ - get_requests + @patch("api.routes.airlock.AirlockRequestRepository.get_airlock_requests", return_value=[]) + async def test_get_all_requests_returns_200(self, _, app, client): + response = await client.get(app.url_path_for(strings.API_LIST_REQUESTS)) + assert response.status_code == status.HTTP_200_OK + + @patch("api.routes.airlock.AirlockRequestRepository.get_airlock_requests_for_airlock_manager") + async def test_get_airlock_manager_requests_returns_200(self, mock_get_airlock_requests_for_airlock_manager, app, client): + mock_get_airlock_requests_for_airlock_manager.return_value = [] + response = await client.get(app.url_path_for(strings.API_LIST_REQUESTS), params={"airlock_manager": True}) + + assert response.status_code == status.HTTP_200_OK + mock_get_airlock_requests_for_airlock_manager.assert_called_once() + + @patch("api.routes.airlock.AirlockRequestRepository.get_airlock_requests", side_effect=Exception("Internal Server Error")) + async def test_get_all_requests_returns_500(self, _, app, client): + response = await client.get(app.url_path_for(strings.API_LIST_REQUESTS)) + assert response.status_code == status.HTTP_500_INTERNAL_SERVER_ERROR + + @patch("api.routes.airlock.AirlockRequestRepository.get_airlock_requests_for_airlock_manager", side_effect=Exception("Internal Server Error")) + async def test_get_airlock_manager_requests_returns_500(self, _, app, client): + response = await client.get(app.url_path_for(strings.API_LIST_REQUESTS), params={"airlock_manager": True}) + assert response.status_code == status.HTTP_500_INTERNAL_SERVER_ERROR diff --git a/api_app/tests_ma/test_db/test_repositories/test_airlock_request_repository.py b/api_app/tests_ma/test_db/test_repositories/test_airlock_request_repository.py index 4c773db327..92ae8b7ae7 100644 --- a/api_app/tests_ma/test_db/test_repositories/test_airlock_request_repository.py +++ b/api_app/tests_ma/test_db/test_repositories/test_airlock_request_repository.py @@ -3,6 +3,8 @@ from mock import patch import pytest import pytest_asyncio +from models.domain.authentication import RoleAssignment, User +from models.domain.workspace import Workspace from tests_ma.test_api.conftest import create_test_user from models.schemas.airlock_request import AirlockRequestInCreate from models.domain.airlock_request import AirlockRequest, AirlockRequestStatus, AirlockRequestType @@ -63,6 +65,18 @@ def verify_dictionary_contains_all_enum_values(): raise Exception(f"Status '{status}' was not added to the ALLOWED_STATUS_CHANGES dictionary") +def sample_workspace(workspace_id=WORKSPACE_ID, workspace_properties: dict = {}) -> Workspace: + workspace = Workspace( + id=workspace_id, + templateName="tre-workspace-base", + templateVersion="0.1.0", + etag="", + properties=workspace_properties, + resourcePath=f'/workspaces/{workspace_id}' + ) + return workspace + + def airlock_request_mock(status=AirlockRequestStatus.Draft): airlock_request = AirlockRequest( id=AIRLOCK_REQUEST_ID, @@ -143,12 +157,186 @@ async def test_update_airlock_request_should_retry_update_when_etag_is_not_up_to async def test_get_airlock_requests_queries_db(airlock_request_repo): airlock_request_repo.container.query_items = MagicMock() - expected_query = airlock_request_repo.airlock_requests_query() + f' WHERE c.workspaceId = "{WORKSPACE_ID}"' + expected_query = airlock_request_repo.airlock_requests_query() + ' WHERE c.workspaceId=@workspace_id' expected_parameters = [ - {"name": "@user_id", "value": None}, - {"name": "@status", "value": None}, - {"name": "@type", "value": None}, + {"name": "@workspace_id", "value": WORKSPACE_ID}, ] await airlock_request_repo.get_airlock_requests(WORKSPACE_ID) airlock_request_repo.container.query_items.assert_called_once_with(query=expected_query, parameters=expected_parameters) + + +async def test_get_airlock_requests_with_user_id(airlock_request_repo): + airlock_request_repo.container.query_items = MagicMock() + user_id = "test_user_id" + expected_query = airlock_request_repo.airlock_requests_query() + ' WHERE c.createdBy.id=@user_id' + expected_parameters = [ + {"name": "@user_id", "value": user_id}, + ] + + await airlock_request_repo.get_airlock_requests(creator_user_id=user_id) + airlock_request_repo.container.query_items.assert_called_once_with(query=expected_query, parameters=expected_parameters) + + +async def test_get_airlock_requests_with_status(airlock_request_repo): + airlock_request_repo.container.query_items = MagicMock() + status = AirlockRequestStatus.Submitted + expected_query = airlock_request_repo.airlock_requests_query() + ' WHERE c.status=@status' + expected_parameters = [ + {"name": "@status", "value": status} + ] + + await airlock_request_repo.get_airlock_requests(status=status) + airlock_request_repo.container.query_items.assert_called_once_with(query=expected_query, parameters=expected_parameters) + + +async def test_get_airlock_requests_with_type(airlock_request_repo): + airlock_request_repo.container.query_items = MagicMock() + request_type = AirlockRequestType.Import + expected_query = airlock_request_repo.airlock_requests_query() + ' WHERE c.type=@type' + expected_parameters = [ + {"name": "@type", "value": request_type}, + ] + + await airlock_request_repo.get_airlock_requests(type=request_type) + airlock_request_repo.container.query_items.assert_called_once_with(query=expected_query, parameters=expected_parameters) + + +async def test_get_airlock_requests_with_multiple_filters(airlock_request_repo): + airlock_request_repo.container.query_items = MagicMock() + user_id = "test_user_id" + status = AirlockRequestStatus.Submitted + request_type = AirlockRequestType.Import + expected_query = airlock_request_repo.airlock_requests_query() + ' WHERE c.createdBy.id=@user_id AND c.status=@status AND c.type=@type' + expected_parameters = [ + {"name": "@user_id", "value": user_id}, + {"name": "@status", "value": status}, + {"name": "@type", "value": request_type}, + ] + + await airlock_request_repo.get_airlock_requests(creator_user_id=user_id, status=status, type=request_type) + airlock_request_repo.container.query_items.assert_called_once_with(query=expected_query, parameters=expected_parameters) + + +@pytest.mark.asyncio +@patch.object(AirlockRequestRepository, 'get_airlock_requests', new_callable=AsyncMock) +@patch('db.repositories.airlock_requests.get_access_service', autospec=True) +@patch('db.repositories.airlock_requests.WorkspaceRepository', autospec=True) +async def test_get_airlock_requests_for_airlock_manager_no_roles( + mock_workspace_repo, + mock_access_service, + mock_get_requests, + airlock_request_repo +): + # Mock no user roles + mock_access_service.return_value.get_identity_role_assignments.return_value = [] + + # Mock active workspaces + mock_workspace_instance = MagicMock() + mock_workspace_instance.get_active_workspaces = AsyncMock(return_value=[]) + mock_workspace_repo.create = AsyncMock(return_value=mock_workspace_instance) + + # Call function + user = User(id="user1", name="TestUser") + result = await airlock_request_repo.get_airlock_requests_for_airlock_manager(user) + + # validate + assert result == [] + mock_get_requests.assert_not_called() + + +@pytest.mark.asyncio +@patch.object(AirlockRequestRepository, 'get_airlock_requests', new_callable=AsyncMock) +@patch('db.repositories.airlock_requests.get_access_service', autospec=True) +@patch('db.repositories.airlock_requests.WorkspaceRepository', autospec=True) +async def test_get_airlock_requests_for_airlock_manager_single_workspace( + mock_workspace_repo, + mock_access_service, + mock_get_requests, + airlock_request_repo +): + # Setup workspace and manager role + workspace = sample_workspace(workspace_properties={"app_role_id_workspace_airlock_manager": "manager-role-1"}) + mock_workspace_instance = MagicMock() + mock_workspace_instance.get_active_workspaces = AsyncMock(return_value=[workspace]) + mock_workspace_repo.create = AsyncMock(return_value=mock_workspace_instance) + + # Setup user roles + role_assignment = RoleAssignment(resource_id="resource_id", role_id="manager-role-1") + mock_access_service.return_value.get_identity_role_assignments.return_value = [role_assignment] + + # Setup corresponding requests from that workspace + request_mock = AirlockRequest(id="request-1", workspaceId=WORKSPACE_ID, type=AirlockRequestType.Import, reviews=[]) + mock_get_requests.return_value = [request_mock] + + user = User(id="user1", name="TestUser") + result = await airlock_request_repo.get_airlock_requests_for_airlock_manager(user) + + assert len(result) == 1 + assert result[0].id == "request-1" + mock_get_requests.assert_called_once_with(workspace_id=WORKSPACE_ID, type=None, status=None, order_by=None, order_ascending=True) + + +@pytest.mark.asyncio +@patch.object(AirlockRequestRepository, 'get_airlock_requests', new_callable=AsyncMock) +@patch('db.repositories.airlock_requests.get_access_service', autospec=True) +@patch('db.repositories.airlock_requests.WorkspaceRepository', autospec=True) +async def test_get_airlock_requests_for_airlock_manager_multiple_workspaces( + mock_workspace_repo, + mock_access_service, + mock_get_requests, + airlock_request_repo +): + # Setup multiple workspaces + workspace1 = sample_workspace(workspace_properties={"app_role_id_workspace_airlock_manager": "manager-role-1"}) + workspace2 = sample_workspace(workspace_properties={"app_role_id_workspace_airlock_manager": "manager-role-2"}) + mock_workspace_instance = MagicMock() + mock_workspace_instance.get_active_workspaces = AsyncMock(return_value=[workspace1, workspace2]) + mock_workspace_repo.create = AsyncMock(return_value=mock_workspace_instance) + + # Setup user roles + role_assignment_1 = RoleAssignment(resource_id="resource_id", role_id="manager-role-1") + role_assignment_2 = RoleAssignment(resource_id="resource_id", role_id="manager-role-2") + mock_access_service.return_value.get_identity_role_assignments.return_value = [role_assignment_1, role_assignment_2] + + # Setup requests for each workspace + first_ws_requests = [AirlockRequest(id="request-1", workspaceId="workspace-1", type=AirlockRequestType.Import, reviews=[])] + second_ws_requests = [AirlockRequest(id="request-2", workspaceId="workspace-2", type=AirlockRequestType.Import, reviews=[])] + mock_get_requests.side_effect = [first_ws_requests, second_ws_requests] + + user = User(id="user1", name="TestUser") + result = await airlock_request_repo.get_airlock_requests_for_airlock_manager(user) + + # combined requests from both + assert len(result) == 2 + assert result[0].id == "request-1" + assert result[1].id == "request-2" + assert mock_get_requests.call_count == 2 + + +@pytest.mark.asyncio +@patch.object(AirlockRequestRepository, 'get_airlock_requests', new_callable=AsyncMock) +@patch('db.repositories.airlock_requests.get_access_service', autospec=True) +@patch('db.repositories.airlock_requests.WorkspaceRepository', autospec=True) +async def test_get_airlock_requests_for_airlock_manager_active_workspaces_but_no_manager_role( + mock_workspace_repo, + mock_access_service, + mock_get_requests, + airlock_request_repo +): + # Setup multiple workspaces, but user doesn't have manager roles + workspace1 = sample_workspace(workspace_properties={"app_role_id_workspace_airlock_manager": "manager-role-1"}) + workspace2 = sample_workspace(workspace_properties={"app_role_id_workspace_airlock_manager": "manager-role-2"}) + mock_workspace_instance = MagicMock() + mock_workspace_instance.get_active_workspaces = AsyncMock(return_value=[workspace1, workspace2]) + mock_workspace_repo.create = AsyncMock(return_value=mock_workspace_instance) + + # No matching roles for these workspaces + mock_access_service.return_value.get_identity_role_assignments.return_value = [ + RoleAssignment(resource_id="resource_id", role_id="some-other-role") + ] + + user = User(id="user1", name="TestUser") + result = await airlock_request_repo.get_airlock_requests_for_airlock_manager(user) + assert result == [] + mock_get_requests.assert_not_called() diff --git a/ui/app/package.json b/ui/app/package.json index b1a9a66992..b3a14712be 100644 --- a/ui/app/package.json +++ b/ui/app/package.json @@ -1,6 +1,6 @@ { "name": "tre-ui", - "version": "0.6.3", + "version": "0.7.0", "private": true, "dependencies": { "@azure/msal-browser": "^2.35.0", diff --git a/ui/app/src/components/root/LeftNav.tsx b/ui/app/src/components/root/LeftNav.tsx index 568d326151..706cc8c8b1 100644 --- a/ui/app/src/components/root/LeftNav.tsx +++ b/ui/app/src/components/root/LeftNav.tsx @@ -1,13 +1,16 @@ import React, { useContext } from 'react'; import { Nav, INavLinkGroup } from '@fluentui/react/lib/Nav'; -import { useNavigate } from 'react-router-dom'; +import { useLocation, useNavigate } from 'react-router-dom'; import { AppRolesContext } from '../../contexts/AppRolesContext'; import { RoleName } from '../../models/roleNames'; export const LeftNav: React.FunctionComponent = () => { const navigate = useNavigate(); + const location = useLocation(); const appRolesCtx = useContext(AppRolesContext); + const isRequestsRoute = location.pathname.startsWith('/requests'); // ← True if URL starts with /requests + const navLinkGroups: INavLinkGroup[] = [ { links: [ @@ -32,9 +35,38 @@ export const LeftNav: React.FunctionComponent = () => { }); } + const requestsLinkArray: { name: string; url: string; key: string; icon: string }[] = []; + + requestsLinkArray.push( + { + name: 'Airlock', + url: '/requests/airlock', + key: 'airlock', + icon: 'Lock', + + }); + + // add Requests link + navLinkGroups[0].links.push( + { + name: 'Requests', + url: '/requests', + key: 'requests', + icon: '', + links: requestsLinkArray, + isExpanded: isRequestsRoute + }); + return (