diff --git a/.github/labeler-issue-triage.yml b/.github/labeler-issue-triage.yml index 9a48ccaf3c45..d6a5cfbddfd4 100644 --- a/.github/labeler-issue-triage.yml +++ b/.github/labeler-issue-triage.yml @@ -182,6 +182,9 @@ service/iot-central: service/iot-hub: - '### (|New or )Affected Resource\(s\)\/Data Source\(s\)((.|\n)*)azurerm_iothub((.|\n)*)###' +service/iot-operations: + - '### (|New or )Affected Resource\(s\)\/Data Source\(s\)((.|\n)*)azurerm_iotoperations_((.|\n)*)###' + service/key-vault: - '### (|New or )Affected Resource\(s\)\/Data Source\(s\)((.|\n)*)azurerm_(key_vault\W+|key_vault_access_policy\W+|key_vault_certificate\W+|key_vault_certificate_contacts\W+|key_vault_certificate_data\W+|key_vault_certificate_issuer\W+|key_vault_certificates\W+|key_vault_encrypted_value\W+|key_vault_key\W+|key_vault_managed_storage_account\W+|key_vault_managed_storage_account_sas_token_definition\W+|key_vault_secret\W+|key_vault_secrets\W+)((.|\n)*)###' diff --git a/.github/labeler-pull-request-triage.yml b/.github/labeler-pull-request-triage.yml index 88310751524b..49c4e332f54a 100644 --- a/.github/labeler-pull-request-triage.yml +++ b/.github/labeler-pull-request-triage.yml @@ -304,6 +304,11 @@ service/iot-hub: - any-glob-to-any-file: - internal/services/iothub/**/* +service/iot-operations: +- changed-files: + - any-glob-to-any-file: + - internal/services/iotoperations/**/* + service/key-vault: - changed-files: - any-glob-to-any-file: diff --git a/.teamcity/components/generated/services.kt b/.teamcity/components/generated/services.kt index e8724ba28081..6bb5b11cdc40 100644 --- a/.teamcity/components/generated/services.kt +++ b/.teamcity/components/generated/services.kt @@ -65,6 +65,7 @@ var services = mapOf( "hybridcompute" to "Hybrid Compute", "iotcentral" to "IoT Central", "iothub" to "IoT Hub", + "iotoperations" to "IoT Operations", "keyvault" to "KeyVault", "kusto" to "Kusto", "legacy" to "Legacy", diff --git a/examples/iot/iotoperations_broker/README.md b/examples/iot/iotoperations_broker/README.md new file mode 100644 index 000000000000..90b03c3c393b --- /dev/null +++ b/examples/iot/iotoperations_broker/README.md @@ -0,0 +1,75 @@ +# IoT Operations Broker + +This example shows how to create an Azure IoT Operations broker using Terraform. + +## Prerequisites + +Before running this example, you need: + +1. **Azure CLI** installed and authenticated +2. **Terraform** 1.6 or later +3. **Existing Resource Group** in Azure +4. **Existing IoT Operations Instance** +5. **Arc-enabled Kubernetes cluster** with a Custom Location + +## Usage + +### Step 1: Set Variables + +Create a `terraform.tfvars` file: + +```hcl +# Prefix for resource naming +prefix = "mycompany" + +# Existing Resource Group +resource_group_name = "existing-resource-group-name" + +# Existing IoT Operations Instance +instance_name = "existing-iotoperations-instance" + +# Custom Location (Arc-enabled Kubernetes cluster) +custom_location_id = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/example-rg/providers/Microsoft.ExtendedLocation/customLocations/example-location" +``` + +### Step 2: Deploy + +```bash +terraform init +terraform plan +terraform apply +``` + +## Variables + +| Name | Description | Type | Required | +|------|-------------|------|----------| +| `prefix` | Prefix for resource naming | `string` | yes | +| `resource_group_name` | Name of existing resource group | `string` | yes | +| `instance_name` | Name of existing IoT Operations instance | `string` | yes | +| `custom_location_id` | ARM ID of Custom Location | `string` | yes | + +## Outputs + +| Name | Description | +|------|-------------| +| `iotoperations_broker_id` | ARM resource ID of the IoT Operations broker | + +## Architecture + +This example creates: + +- **IoT Operations Broker** (named `{prefix}-broker`) within an existing IoT Operations instance + +The broker requires: +- An existing Resource Group +- An existing IoT Operations Instance +- An Arc-enabled Kubernetes cluster (Custom Location) + +## Cleanup + +```bash +terraform destroy +``` + +Note: This will only destroy the broker. The IoT Operations instance, resource group, and Custom Location will remain. \ No newline at end of file diff --git a/examples/iot/iotoperations_broker/main.tf b/examples/iot/iotoperations_broker/main.tf new file mode 100644 index 000000000000..0d37647b8318 --- /dev/null +++ b/examples/iot/iotoperations_broker/main.tf @@ -0,0 +1,120 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_version = ">= 1.6" + required_providers { + azurerm = { + source = "hashicorp/azurerm" + } + } +} +provider "azurerm" { + features {} +} + +# Use existing resource group +data "azurerm_resource_group" "example" { + name = var.resource_group_name +} + +# IoT Operations broker +resource "azurerm_iotoperations_broker" "example" { + name = var.broker_name + resource_group_name = data.azurerm_resource_group.example.name + instance_name = var.instance_name + + extended_location { + name = var.custom_location_id + type = "CustomLocation" + } + + properties { + memory_profile = "Medium" + + cardinality { + backend_chain { + partitions = 2 + redundancy_factor = 1 + workers = 1 + } + + frontend { + replicas = 2 + workers = 1 + } + } + + advanced { + encrypt_internal_traffic = "Enabled" + + clients { + max_session_expiry_seconds = 3600 + max_message_expiry_seconds = 3600 + max_packet_size_bytes = 1048576 + max_receive_maximum = 100 + max_keep_alive_seconds = 3600 + + subscriber_queue_limit { + length = 1000 + strategy = "DropOldest" + } + } + + internal_certs { + duration = "8760h" + renew_before = "720h" + + private_key { + algorithm = "RSA" + rotation_policy = "Always" + } + } + } + + diagnostics { + logs { + level = "info" + } + + metrics { + prometheus_port = 9090 + } + + self_check { + mode = "Enabled" + interval_seconds = 30 + timeout_seconds = 15 + } + + traces { + mode = "Enabled" + cache_size_megabytes = 16 + span_channel_capacity = 1000 + + self_tracing { + mode = "Enabled" + interval_seconds = 30 + } + } + } + + disk_backed_message_buffer { + max_size = "1Gi" + + ephemeral_volume_claim_spec { + access_modes = ["ReadWriteOnce"] + + resources { + requests = { + "storage" = "1Gi" + } + } + } + } + + generate_resource_limits { + cpu = "Enabled" + } + } +} \ No newline at end of file diff --git a/examples/iot/iotoperations_broker/outputs.tf b/examples/iot/iotoperations_broker/outputs.tf new file mode 100644 index 000000000000..da22c5200078 --- /dev/null +++ b/examples/iot/iotoperations_broker/outputs.tf @@ -0,0 +1,7 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +output "iotoperations_broker_id" { + description = "The ARM resource ID of the IoT Operations broker" + value = azurerm_iotoperations_broker.example.id +} \ No newline at end of file diff --git a/examples/iot/iotoperations_broker/terraform.tfvars.example b/examples/iot/iotoperations_broker/terraform.tfvars.example new file mode 100644 index 000000000000..f3b694857f1b --- /dev/null +++ b/examples/iot/iotoperations_broker/terraform.tfvars.example @@ -0,0 +1,17 @@ +# Example terraform.tfvars file +# Copy this to terraform.tfvars and update with your values + +# Prefix for resource naming +prefix = "example" + +# Existing Resource Group +resource_group_name = "existing-resource-group-name" + +# Existing IoT Operations Instance +instance_name = "existing-iotoperations-instance" + +# IoT Operations Broker Name +broker_name = "example-broker" + +# Custom Location (Arc-enabled Kubernetes cluster) +custom_location_id = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/example-rg/providers/Microsoft.ExtendedLocation/customLocations/example-location" \ No newline at end of file diff --git a/examples/iot/iotoperations_broker/variables.tf b/examples/iot/iotoperations_broker/variables.tf new file mode 100644 index 000000000000..66800f6e6390 --- /dev/null +++ b/examples/iot/iotoperations_broker/variables.tf @@ -0,0 +1,27 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +variable "prefix" { + description = "The prefix used for all resources in this example" + type = string +} + +variable "resource_group_name" { + description = "The name of an existing resource group where resources will be created" + type = string +} + +variable "instance_name" { + description = "The name of the existing IoT Operations instance" + type = string +} + +variable "custom_location_id" { + description = "The ARM resource ID of the Custom Location (Arc-enabled Kubernetes cluster)" + type = string +} + +variable "broker_name" { + description = "The name of the IoT Operations broker" + type = string +} \ No newline at end of file diff --git a/examples/iot/iotoperations_broker_authentication/README.md b/examples/iot/iotoperations_broker_authentication/README.md new file mode 100644 index 000000000000..7a3c307a20e0 --- /dev/null +++ b/examples/iot/iotoperations_broker_authentication/README.md @@ -0,0 +1,85 @@ +# IoT Operations Broker Authentication + +This example shows how to create an Azure IoT Operations broker authentication using Terraform. + +## Prerequisites + +Before running this example, you need: + +1. **Azure CLI** installed and authenticated +2. **Terraform** 1.6 or later +3. **Existing Resource Group** in Azure +4. **Existing IoT Operations Instance** +5. **Existing IoT Operations Broker** + +## Usage + +### Step 1: Set Variables + +Create a `terraform.tfvars` file: + +```hcl +# Prefix for resource naming +prefix = "mycompany" + +# Existing Resource Group +resource_group_name = "existing-resource-group-name" + +# Existing IoT Operations Instance +instance_name = "existing-iotoperations-instance" + +# Existing IoT Operations Broker +broker_name = "existing-iotoperations-broker" + +# Authentication audience (optional) +audience = "aio-internal" +``` + +### Step 2: Deploy + +```bash +terraform init +terraform plan +terraform apply +``` + +## Variables + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|----------| +| `prefix` | Prefix for resource naming | `string` | n/a | yes | +| `resource_group_name` | Name of existing resource group | `string` | n/a | yes | +| `instance_name` | Name of existing IoT Operations instance | `string` | n/a | yes | +| `broker_name` | Name of existing IoT Operations broker | `string` | n/a | yes | +| `audience` | Authentication audience | `string` | `"aio-internal"` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| `iotoperations_broker_authentication_id` | ARM resource ID of the IoT Operations broker authentication | + +## Architecture + +This example creates: + +- **IoT Operations Broker Authentication** (named `{prefix}-broker-auth`) within an existing IoT Operations broker + +The broker authentication requires: +- An existing Resource Group +- An existing IoT Operations Instance +- An existing IoT Operations Broker + +## Authentication Methods + +The example configures: +- **ServiceAccountToken** authentication method +- **Custom settings** with audience configuration + +## Cleanup + +```bash +terraform destroy +``` + +Note: This will only destroy the broker authentication. The broker, IoT Operations instance, and resource group will remain. \ No newline at end of file diff --git a/examples/iot/iotoperations_broker_authentication/main.tf b/examples/iot/iotoperations_broker_authentication/main.tf new file mode 100644 index 000000000000..56519c33120b --- /dev/null +++ b/examples/iot/iotoperations_broker_authentication/main.tf @@ -0,0 +1,68 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_version = ">= 1.6" + required_providers { + azurerm = { + source = "hashicorp/azurerm" + } + } +} + +provider "azurerm" { + features {} +} + +# Use existing resource group +data "azurerm_resource_group" "example" { + name = var.resource_group_name +} + +# IoT Operations broker authentication +resource "azurerm_iotoperations_broker_authentication" "example" { + name = "${var.prefix}-broker-auth" + resource_group_name = data.azurerm_resource_group.example.name + instance_name = var.instance_name + broker_name = var.broker_name + + extended_location { + name = var.custom_location_id + type = "CustomLocation" + } + + authentication_methods { + method = "Custom" + + custom_settings { + endpoint = "https://www.example.com" + ca_cert_config_map = "pdecudefqyolvncbus" + + headers = { + "key8518" = "bwityjy" + } + + auth { + x509 { + secret_ref = "secret-name" + } + } + } + + service_account_token_settings { + audiences = ["jqyhyqatuydg"] + } + + x509_settings { + trusted_client_ca_cert = "vlctsqddl" + + authorization_attributes { + name = "key3384" + subject = "jpgwctfeixitptfgfnqhua" + attributes = { + key186 = "ucpajramsz" + } + } + } + } +} \ No newline at end of file diff --git a/examples/iot/iotoperations_broker_authentication/outputs.tf b/examples/iot/iotoperations_broker_authentication/outputs.tf new file mode 100644 index 000000000000..1dd615062439 --- /dev/null +++ b/examples/iot/iotoperations_broker_authentication/outputs.tf @@ -0,0 +1,7 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +output "iotoperations_broker_authentication_id" { + description = "The ARM resource ID of the IoT Operations broker authentication" + value = azurerm_iotoperations_broker_authentication.example.id +} \ No newline at end of file diff --git a/examples/iot/iotoperations_broker_authentication/terraform.tfvars.example b/examples/iot/iotoperations_broker_authentication/terraform.tfvars.example new file mode 100644 index 000000000000..a6b4332df4fd --- /dev/null +++ b/examples/iot/iotoperations_broker_authentication/terraform.tfvars.example @@ -0,0 +1,17 @@ +# Example terraform.tfvars file +# Copy this to terraform.tfvars and update with your values + +# Prefix for resource naming +prefix = "example" + +# Existing Resource Group +resource_group_name = "existing-resource-group-name" + +# Existing IoT Operations Instance +instance_name = "existing-iotoperations-instance" + +# Existing IoT Operations Broker +broker_name = "existing-iotoperations-broker" + +# Authentication audience +audience = "aio-internal" \ No newline at end of file diff --git a/examples/iot/iotoperations_broker_authentication/variables.tf b/examples/iot/iotoperations_broker_authentication/variables.tf new file mode 100644 index 000000000000..547bd1ebe143 --- /dev/null +++ b/examples/iot/iotoperations_broker_authentication/variables.tf @@ -0,0 +1,33 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +variable "prefix" { + description = "The prefix used for all resources in this example" + type = string +} + +variable "resource_group_name" { + description = "The name of an existing resource group where resources will be created" + type = string +} + +variable "instance_name" { + description = "The name of the existing IoT Operations instance" + type = string +} + +variable "broker_name" { + description = "The name of the existing IoT Operations broker" + type = string +} + +variable "audience" { + description = "The audience for the authentication" + type = string + default = "aio-internal" +} + +variable "custom_location_id" { + description = "The ID of the custom location to use for extended location." + type = string +} \ No newline at end of file diff --git a/examples/iot/iotoperations_broker_authorization/README.md b/examples/iot/iotoperations_broker_authorization/README.md new file mode 100644 index 000000000000..a71d470bae1c --- /dev/null +++ b/examples/iot/iotoperations_broker_authorization/README.md @@ -0,0 +1,95 @@ +# IoT Operations Broker Authorization + +This example shows how to create an Azure IoT Operations broker authorization using Terraform. + +## Prerequisites + +Before running this example, you need: + +1. **Azure CLI** installed and authenticated +2. **Terraform** 1.6 or later +3. **Existing Resource Group** in Azure +4. **Existing IoT Operations Instance** +5. **Existing IoT Operations Broker** + +## Usage + +### Step 1: Set Variables + +Create a `terraform.tfvars` file: + +```hcl +# Prefix for resource naming +prefix = "mycompany" + +# Existing Resource Group +resource_group_name = "existing-resource-group-name" + +# Existing IoT Operations Instance +instance_name = "existing-iotoperations-instance" + +# Existing IoT Operations Broker +broker_name = "existing-iotoperations-broker" +``` + +### Step 2: Deploy + +```bash +terraform init +terraform plan +terraform apply +``` + +## Variables + +| Name | Description | Type | Required | +|------|-------------|------|----------| +| `prefix` | Prefix for resource naming | `string` | yes | +| `resource_group_name` | Name of existing resource group | `string` | yes | +| `instance_name` | Name of existing IoT Operations instance | `string` | yes | +| `broker_name` | Name of existing IoT Operations broker | `string` | yes | + +## Outputs + +| Name | Description | +|------|-------------| +| `iotoperations_broker_authorization_id` | ARM resource ID of the IoT Operations broker authorization | + +## Architecture + +This example creates: + +- **IoT Operations Broker Authorization** (named `{prefix}-broker-authz`) within an existing IoT Operations broker + +The broker authorization requires: +- An existing Resource Group +- An existing IoT Operations Instance +- An existing IoT Operations Broker + +## Authorization Policies + +The example configures: +- **Cache**: Enabled for performance +- **Rules**: Authorization rules for broker access + - **Broker Resources**: `["*"]` (all resources) + - **Method**: `Connect` (connection method) + - **Clients**: `["*"]` (all clients) + - **State Store**: Key-value pairs for additional authorization context + +## Resource Hierarchy + +``` +Resource Group +└── IoT Operations Instance + └── IoT Operations Broker + ├── IoT Operations Broker Authentication + └── IoT Operations Broker Authorization ← This resource +``` + +## Cleanup + +```bash +terraform destroy +``` + +Note: This will only destroy the broker authorization. The broker, IoT Operations instance, and resource group will remain. \ No newline at end of file diff --git a/examples/iot/iotoperations_broker_authorization/main.tf b/examples/iot/iotoperations_broker_authorization/main.tf new file mode 100644 index 000000000000..71e9f87e5e41 --- /dev/null +++ b/examples/iot/iotoperations_broker_authorization/main.tf @@ -0,0 +1,64 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_version = ">= 1.6" + required_providers { + azurerm = { + source = "hashicorp/azurerm" + } + } +} + +provider "azurerm" { + features {} +} + +# Use existing resource group +data "azurerm_resource_group" "example" { + name = var.resource_group_name +} + +# IoT Operations broker authorization +resource "azurerm_iotoperations_broker_authorization" "example" { + name = "${var.prefix}-broker-authz" + resource_group_name = data.azurerm_resource_group.example.name + instance_name = var.instance_name + broker_name = var.broker_name + + extended_location { + name = var.custom_location_id + type = "CustomLocation" + } + + authorization_policies { + cache = "Enabled" + + rules { + principals { + clients = ["my-client-id"] + attributes = [ + { + "floor" = "floor1" + "site" = "site1" + } + ] + } + + broker_resources { + method = "Connect" + } + + broker_resources { + method = "Subscribe" + topics = ["topic", "topic/with/wildcard/#"] + } + + state_store_resources { + method = "ReadWrite" + key_type = "Pattern" + keys = ["*"] + } + } + } +} \ No newline at end of file diff --git a/examples/iot/iotoperations_broker_authorization/outputs.tf b/examples/iot/iotoperations_broker_authorization/outputs.tf new file mode 100644 index 000000000000..a73b82317aa2 --- /dev/null +++ b/examples/iot/iotoperations_broker_authorization/outputs.tf @@ -0,0 +1,7 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +output "iotoperations_broker_authorization_id" { + description = "The ARM resource ID of the IoT Operations broker authorization" + value = azurerm_iotoperations_broker_authorization.example.id +} \ No newline at end of file diff --git a/examples/iot/iotoperations_broker_authorization/terraform.tfvars.example b/examples/iot/iotoperations_broker_authorization/terraform.tfvars.example new file mode 100644 index 000000000000..341424ef44ab --- /dev/null +++ b/examples/iot/iotoperations_broker_authorization/terraform.tfvars.example @@ -0,0 +1,14 @@ +# Example terraform.tfvars file +# Copy this to terraform.tfvars and update with your values + +# Prefix for resource naming +prefix = "example" + +# Existing Resource Group +resource_group_name = "existing-resource-group-name" + +# Existing IoT Operations Instance +instance_name = "existing-iotoperations-instance" + +# Existing IoT Operations Broker +broker_name = "existing-iotoperations-broker" \ No newline at end of file diff --git a/examples/iot/iotoperations_broker_authorization/variables.tf b/examples/iot/iotoperations_broker_authorization/variables.tf new file mode 100644 index 000000000000..d470b2f622ba --- /dev/null +++ b/examples/iot/iotoperations_broker_authorization/variables.tf @@ -0,0 +1,27 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +variable "prefix" { + description = "The prefix used for all resources in this example" + type = string +} + +variable "resource_group_name" { + description = "The name of an existing resource group where resources will be created" + type = string +} + +variable "instance_name" { + description = "The name of the existing IoT Operations instance" + type = string +} + +variable "broker_name" { + description = "The name of the existing IoT Operations broker" + type = string +} + +variable "custom_location_id" { + description = "The ID of the custom location to use for extended location." + type = string +} \ No newline at end of file diff --git a/examples/iot/iotoperations_broker_listener/README.md b/examples/iot/iotoperations_broker_listener/README.md new file mode 100644 index 000000000000..ea8833ec19f5 --- /dev/null +++ b/examples/iot/iotoperations_broker_listener/README.md @@ -0,0 +1,123 @@ +# IoT Operations Broker Listener Example + +This example demonstrates how to create an Azure IoT Operations Broker Listener using Terraform. + +## Overview + +This Terraform configuration creates: +- A Resource Group +- An IoT Operations Instance +- An IoT Operations Broker +- An IoT Operations Broker Listener with configurable options + +## Prerequisites + +- Azure subscription +- Terraform installed +- Azure CLI installed and authenticated +- An Azure Kubernetes Service (AKS) cluster for IoT Operations deployment + +## Usage + +1. Clone this repository and navigate to this example directory: + ```bash + cd examples/iot/iotoperations_broker_listener + ``` + +2. Copy the example variables file: + ```bash + cp terraform.tfvars.example terraform.tfvars + ``` + +3. Edit `terraform.tfvars` with your desired values: + ```hcl + resource_group_name = "my-iotops-rg" + location = "East US 2" + instance_name = "my-iotops-instance" + broker_name = "my-broker" + listener_name = "mqtt-listener" + listener_port = 1883 + service_type = "LoadBalancer" + ``` + +4. Initialize Terraform: + ```bash + terraform init + ``` + +5. Plan the deployment: + ```bash + terraform plan + ``` + +6. Apply the configuration: + ```bash + terraform apply + ``` + +## Configuration + +### Broker Listener + +The broker listener is configured with the following key parameters: + +- **Port**: The port number on which the listener will accept connections (default: 1883 for MQTT) +- **Service Type**: The Kubernetes service type (ClusterIP, NodePort, LoadBalancer) +- **Service Name**: The name of the Kubernetes service + +### TLS Configuration + +TLS can be enabled for secure communication: + +```hcl +enable_tls = true +tls_mode = "Automatic" # or "Manual" +``` + +When TLS is enabled, the listener will use cert-manager to automatically provision and manage certificates. + +### Authentication and Authorization + +The listener can reference authentication and authorization policies: + +```hcl +authentication_ref_name = "my-auth-policy" +authorization_ref_name = "my-authz-policy" +``` + +These references link to separately created authentication and authorization resources. + +## Resource Hierarchy + +The IoT Operations resources follow this hierarchy: +1. **Instance** - Top-level IoT Operations instance +2. **Broker** - MQTT broker within the instance +3. **Listener** - Network endpoint for the broker + +Each listener belongs to a specific broker, which belongs to a specific instance. + +## Outputs + +This configuration provides the following outputs: +- Resource Group ID +- IoT Operations Instance ID +- Broker ID +- Broker Listener ID and configuration details + +## Clean Up + +To destroy the resources: +```bash +terraform destroy +``` + +## Notes + +- The listener port should not conflict with other services in your cluster +- When using LoadBalancer service type, ensure your cluster supports external load balancers +- TLS configuration requires cert-manager to be installed in your cluster +- Authentication and authorization policies must be created separately if referenced + +## More Information + +For more details about Azure IoT Operations, visit the [official documentation](https://docs.microsoft.com/azure/iot-operations/). \ No newline at end of file diff --git a/examples/iot/iotoperations_broker_listener/main.tf b/examples/iot/iotoperations_broker_listener/main.tf new file mode 100644 index 000000000000..1dbd0873c39a --- /dev/null +++ b/examples/iot/iotoperations_broker_listener/main.tf @@ -0,0 +1,163 @@ +terraform { + required_providers { + azurerm = { + source = "hashicorp/azurerm" + } + } +} + +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "example" { + name = var.resource_group_name + location = var.location +} + + +resource "azurerm_iotoperations_broker" "example" { + name = var.broker_name + resource_group_name = azurerm_resource_group.example.name + instance_name = var.instance_name + + extended_location { + name = var.custom_location_id + type = "CustomLocation" + } + + # Add other supported properties/blocks here as needed +} + +# Simple Broker Listener - Basic port configuration only +resource "azurerm_iotoperations_broker_listener" "simple" { + + name = var.simple_listener_name + resource_group_name = azurerm_resource_group.example.name + instance_name = var.instance_name + broker_name = azurerm_iotoperations_broker.example.name + + extended_location_name = var.custom_location_id + + service_name = var.simple_service_name + service_type = "ClusterIp" + + ports { + port = var.simple_listener_port + } +} + +# Complex Broker Listener - With TLS, authentication, and authorization +resource "azurerm_iotoperations_broker_listener" "complex" { + + name = var.complex_listener_name + resource_group_name = azurerm_resource_group.example.name + instance_name = var.instance_name + broker_name = azurerm_iotoperations_broker.example.name + + extended_location_name = var.custom_location_id + + service_name = var.complex_service_name + service_type = var.complex_service_type + + ports { + port = var.complex_listener_port + node_port = var.complex_node_port + protocol = "MQTT" + authentication_ref = "example-auth-ref" + authorization_ref = "example-authz-ref" + + tls { + mode = var.complex_tls_mode + + cert_manager_certificate_spec { + duration = var.complex_tls_cert_duration + secret_name = var.complex_tls_cert_secret_name + renew_before = var.complex_tls_cert_renew_before + + issuer_ref { + name = var.complex_tls_issuer_name + kind = var.complex_tls_issuer_kind + group = var.complex_tls_issuer_group + } + + private_key { + algorithm = "Rsa2048" + rotation_policy = var.complex_tls_private_key_rotation_policy + } + + san { + dns = var.complex_tls_san_dns + ip = var.complex_tls_san_ip + } + } + } + } +} + +# Full/Advanced Broker Listener - Multiple ports with all configuration options +resource "azurerm_iotoperations_broker_listener" "full" { + count = var.enable_full_listener ? 1 : 0 + + + name = var.full_listener_name + resource_group_name = azurerm_resource_group.example.name + instance_name = var.instance_name + broker_name = azurerm_iotoperations_broker.example.name + + extended_location_name = var.custom_location_id + + service_name = var.full_service_name + service_type = var.full_service_type + + # MQTT port with TLS + ports { + port = 8883 + node_port = 30883 + protocol = "MQTT" + authentication_ref = "example-auth-ref" + authorization_ref = "example-authz-ref" + + tls { + mode = "Automatic" + + cert_manager_certificate_spec { + duration = "8760h" + secret_name = "mqtt-tls-secret" + renew_before = "720h" + + issuer_ref { + name = "cluster-issuer" + kind = "ClusterIssuer" + group = "cert-manager.io" + } + + private_key { + algorithm = "Rsa2048" + rotation_policy = "Always" + } + + san { + dns = ["mqtt.example.com", "broker.local"] + ip = ["10.0.0.1"] + } + } + } + } + + # WebSocket port + ports { + port = 8080 + node_port = 30080 + protocol = "WebSockets" + authentication_ref = var.full_authentication_ref + authorization_ref = var.full_authorization_ref + } + + # HTTP port for management + ports { + port = 8081 + node_port = 30081 + protocol = "Http" + } +} \ No newline at end of file diff --git a/examples/iot/iotoperations_broker_listener/outputs.tf b/examples/iot/iotoperations_broker_listener/outputs.tf new file mode 100644 index 000000000000..566ce73d6521 --- /dev/null +++ b/examples/iot/iotoperations_broker_listener/outputs.tf @@ -0,0 +1,11 @@ +output "resource_group_id" { + description = "The ID of the resource group" + value = azurerm_resource_group.example.id +} + + +output "broker_id" { + description = "The ID of the IoT Operations broker" + value = azurerm_iotoperations_broker.example.id +} + diff --git a/examples/iot/iotoperations_broker_listener/terraform.tfvars.example b/examples/iot/iotoperations_broker_listener/terraform.tfvars.example new file mode 100644 index 000000000000..5cf8dba56b8f --- /dev/null +++ b/examples/iot/iotoperations_broker_listener/terraform.tfvars.example @@ -0,0 +1,38 @@ +# Example values for the IoT Operations Broker Listener deployment +# Copy this file to terraform.tfvars and modify the values as needed + +resource_group_name = "rg-iotoperations-broker-listener" +location = "East US 2" +instance_name = "my-iotops-instance" +broker_name = "my-broker" +listener_name = "mqtt-listener" + +# Listener configuration +listener_port = 1883 +service_name = "mqtt-service" +service_type = "LoadBalancer" + +# TLS configuration (optional) +enable_tls = true +tls_mode = "Automatic" +tls_cert_duration = "8760h" # 1 year +tls_issuer_name = "letsencrypt-prod" +tls_issuer_kind = "ClusterIssuer" +tls_issuer_group = "cert-manager.io" +tls_cert_renew_before = "720h" # 30 days +tls_cert_secret_name = "mqtt-listener-tls" +tls_subject_organization = "My Organization" +tls_subject_organizational_unit = "IoT Division" + +# Authentication and Authorization references (optional) +# Uncomment and set these if you have authentication/authorization resources +# authentication_ref_name = "my-auth-policy" +# authorization_ref_name = "my-authz-policy" + +# Tags +tags = { + Environment = "Production" + Project = "IoT Operations" + Owner = "IoT Team" + Purpose = "MQTT Broker Listener" +} \ No newline at end of file diff --git a/examples/iot/iotoperations_broker_listener/variables.tf b/examples/iot/iotoperations_broker_listener/variables.tf new file mode 100644 index 000000000000..ce916f9d8d29 --- /dev/null +++ b/examples/iot/iotoperations_broker_listener/variables.tf @@ -0,0 +1,220 @@ +# Required for azurerm_iotoperations_instance +variable "schema_registry_ref" { + description = "The resource ID of the schema registry to associate with the instance." + type = string +} +variable "resource_group_name" { + description = "The name of the resource group" + type = string + default = "rg-iotoperations-example" +} + +variable "location" { + description = "The Azure region where resources will be created" + type = string + default = "East US 2" +} + +variable "instance_name" { + description = "The name of the IoT Operations instance" + type = string + default = "iotops-instance-example" +} + +variable "custom_location_id" { + description = "The resource ID of the custom location (Arc-enabled Kubernetes cluster)" + type = string +} + +variable "broker_name" { + description = "The name of the IoT Operations broker" + type = string + default = "broker-example" +} + +# Simple Listener Variables +variable "simple_listener_name" { + description = "The name of the simple broker listener" + type = string + default = "simple-listener" +} + +variable "simple_listener_port" { + description = "The port number for the simple broker listener" + type = number + default = 1883 +} + +variable "simple_service_name" { + description = "The service name for the simple broker listener" + type = string + default = "simple-listener-service" +} + +variable "simple_service_type" { + description = "The service type for the simple broker listener" + type = string + default = "ClusterIP" +} + +# Complex Listener Variables +variable "complex_listener_name" { + description = "The name of the complex broker listener" + type = string + default = "complex-listener" +} + +variable "complex_listener_port" { + description = "The port number for the complex broker listener" + type = number + default = 8883 +} + +variable "complex_node_port" { + description = "The node port for the complex broker listener" + type = number + default = 30883 +} + +variable "complex_protocol" { + description = "The protocol for the complex broker listener" + type = string + default = "Mqtt" +} + +variable "complex_service_name" { + description = "The service name for the complex broker listener" + type = string + default = "complex-listener-service" +} + +variable "complex_service_type" { + description = "The service type for the complex broker listener" + type = string + default = "LoadBalancer" +} + +variable "complex_authentication_ref" { + description = "Authentication reference for the complex broker listener" + type = string + default = "" +} + +variable "complex_authorization_ref" { + description = "Authorization reference for the complex broker listener" + type = string + default = "" +} + +# Complex TLS Variables +variable "complex_tls_mode" { + description = "TLS mode for the complex broker listener" + type = string + default = "Automatic" +} + +variable "complex_tls_cert_duration" { + description = "TLS certificate duration for complex listener" + type = string + default = "8760h" +} + +variable "complex_tls_cert_secret_name" { + description = "TLS certificate secret name for complex listener" + type = string + default = "complex-tls-secret" +} + +variable "complex_tls_cert_renew_before" { + description = "TLS certificate renew before duration for complex listener" + type = string + default = "720h" +} + +variable "complex_tls_issuer_name" { + description = "TLS certificate issuer name for complex listener" + type = string + default = "cluster-issuer" +} + +variable "complex_tls_issuer_kind" { + description = "TLS certificate issuer kind for complex listener" + type = string + default = "ClusterIssuer" +} + +variable "complex_tls_issuer_group" { + description = "TLS certificate issuer group for complex listener" + type = string + default = "cert-manager.io" +} + +variable "complex_tls_private_key_algorithm" { + description = "TLS private key algorithm for complex listener" + type = string + default = "RSA" +} + +variable "complex_tls_private_key_rotation_policy" { + description = "TLS private key rotation policy for complex listener" + type = string + default = "Always" +} + +variable "complex_tls_san_dns" { + description = "TLS SAN DNS names for complex listener" + type = list(string) + default = ["broker.example.com"] +} + +variable "complex_tls_san_ip" { + description = "TLS SAN IP addresses for complex listener" + type = list(string) + default = ["10.0.0.1"] +} + +# Full Listener Variables +variable "enable_full_listener" { + description = "Enable the full/advanced broker listener" + type = bool + default = false +} + +variable "full_listener_name" { + description = "The name of the full broker listener" + type = string + default = "full-listener" +} + +variable "full_service_name" { + description = "The service name for the full broker listener" + type = string + default = "full-listener-service" +} + +variable "full_service_type" { + description = "The service type for the full broker listener" + type = string + default = "LoadBalancer" +} + +variable "full_authentication_ref" { + description = "Authentication reference for the full broker listener" + type = string + default = "" +} + +variable "full_authorization_ref" { + description = "Authorization reference for the full broker listener" + type = string + default = "" +} + +variable "tags" { + description = "A mapping of tags to assign to the resources" + type = map(string) + default = { + Environment = "Example" + Purpose = "IoT Operations Broker Listener Demo" + } +} \ No newline at end of file diff --git a/examples/iot/iotoperations_dataflow/README.md b/examples/iot/iotoperations_dataflow/README.md new file mode 100644 index 000000000000..81ba81643ea9 --- /dev/null +++ b/examples/iot/iotoperations_dataflow/README.md @@ -0,0 +1,179 @@ +# IoT Operations Dataflow Example + +This example demonstrates how to create an Azure IoT Operations Dataflow using Terraform. + +## Overview + +This Terraform configuration creates: +- A Resource Group +- An IoT Operations Instance +- An IoT Operations Dataflow Profile +- An IoT Operations Dataflow with sources, destinations, transformations, and operations + +## Prerequisites + +- Azure subscription +- Terraform installed +- Azure CLI installed and authenticated +- An Azure Kubernetes Service (AKS) cluster for IoT Operations deployment +- IoT Operations endpoints (MQTT broker, Azure Data Explorer, etc.) configured + +## Usage + +1. Clone this repository and navigate to this example directory: + ```bash + cd examples/iot/iotoperations_dataflow + ``` + +2. Copy the example variables file: + ```bash + cp terraform.tfvars.example terraform.tfvars + ``` + +3. Edit `terraform.tfvars` with your desired values: + ```hcl + resource_group_name = "my-iotops-rg" + location = "East US 2" + instance_name = "my-iotops-instance" + dataflow_profile_name = "my-dataflow-profile" + dataflow_name = "temperature-processing-flow" + ``` + +4. Initialize Terraform: + ```bash + terraform init + ``` + +5. Plan the deployment: + ```bash + terraform plan + ``` + +6. Apply the configuration: + ```bash + terraform apply + ``` + +## Configuration + +### Dataflow Profile + +The dataflow profile manages the compute resources for dataflow operations: + +- **Instance Count**: Number of dataflow instances to run +- **Log Level**: Logging verbosity (trace, debug, info, warn, error) +- **Metrics**: Prometheus metrics configuration + +### Dataflow + +The dataflow defines the data processing pipeline with these components: + +#### Sources +Data input endpoints that can include: +- MQTT topics from broker endpoints +- Asset references for structured data +- Schema references for data validation +- Serialization format (JSON, Avro, etc.) + +#### Destinations +Data output endpoints such as: +- Azure Data Explorer (ADX) +- Azure Blob Storage +- Event Hubs +- Custom endpoints + +#### Transformations +Data processing operations: +- **Filter**: Conditional logic to filter data +- **Map**: Transform data structure and add computed fields +- **Aggregate**: Group and summarize data + +#### Operations +High-level data flow operations that connect sources to destinations through transformations. + +## Resource Hierarchy + +The IoT Operations resources follow this hierarchy: +1. **Instance** - Top-level IoT Operations instance +2. **Dataflow Profile** - Compute resource manager for dataflows +3. **Dataflow** - Data processing pipeline definition + +## Example Data Flow + +This example creates a data flow that: +1. Reads temperature and humidity data from MQTT topics +2. Filters data based on temperature and humidity thresholds +3. Transforms the data by adding metadata and computed fields +4. Writes processed data to Azure Data Explorer +5. Archives raw data to Blob Storage + +## Advanced Configuration + +### Multiple Sources and Destinations + +You can configure multiple data sources and destinations: + +```hcl +dataflow_sources = [ + { + name = "sensor-data" + endpoint_ref = "mqtt-endpoint" + asset_ref = "temperature-asset" + }, + { + name = "machine-data" + endpoint_ref = "opcua-endpoint" + asset_ref = "pressure-asset" + } +] +``` + +### Complex Transformations + +Build sophisticated data transformations: + +```hcl +dataflow_transformations = [ + { + type = "filter" + filter = { + expression = "temperature > 20 && status == 'active'" + type = "condition" + } + }, + { + type = "map" + map = { + expression = "{ temp_f: temperature * 9/5 + 32, alert: temperature > 30 }" + type = "newProperties" + } + } +] +``` + +## Outputs + +This configuration provides the following outputs: +- Resource Group ID +- IoT Operations Instance ID +- Dataflow Profile ID and configuration +- Dataflow ID and configuration details + +## Clean Up + +To destroy the resources: +```bash +terraform destroy +``` + +## Notes + +- Ensure your IoT Operations instance is properly configured with the necessary endpoints +- Schema references must point to valid schema registry entries +- Endpoint references must match configured IoT Operations endpoints +- Transformation expressions use JSONPath and mathematical operations +- Monitor dataflow performance through Prometheus metrics + +## More Information + +For more details about Azure IoT Operations Dataflows, visit the [official documentation](https://docs.microsoft.com/azure/iot-operations/process-data/). \ No newline at end of file diff --git a/examples/iot/iotoperations_dataflow/main.tf b/examples/iot/iotoperations_dataflow/main.tf new file mode 100644 index 000000000000..e57bfcf638a3 --- /dev/null +++ b/examples/iot/iotoperations_dataflow/main.tf @@ -0,0 +1,98 @@ +terraform { + required_providers { + azurerm = { + source = "hashicorp/azurerm" + } + } +} + +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "example" { + name = var.resource_group_name + location = var.location +} + +resource "azurerm_iotoperations_instance" "example" { + name = var.instance_name + resource_group_name = azurerm_resource_group.example.name + location = azurerm_resource_group.example.location + + extended_location_name = var.custom_location_id + extended_location_type = "CustomLocation" + + schema_registry_ref = var.schema_registry_ref + + tags = var.tags +} + +resource "azurerm_iotoperations_dataflow_profile" "example" { + name = var.dataflow_profile_name + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = "CustomLocation" + } + + instance_count = var.dataflow_profile_instance_count + + diagnostics { + logs { + level = var.dataflow_profile_log_level + } + metrics { + prometheus_port = var.dataflow_profile_prometheus_port + } + } +} + +resource "azurerm_iotoperations_dataflow" "example" { + name = var.dataflow_name + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + dataflow_profile_name = azurerm_iotoperations_dataflow_profile.example.name + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = "CustomLocation" + } + + mode = var.dataflow_mode + + operations { + name = "operation-transform" + operation_type = "BuiltInTransformation" + + source_settings { + data_sources = ["source-mqtt"] + asset_ref = "temperature-asset" + endpoint_ref = "mqtt-endpoint" + schema_ref = "temperature-schema" + serialization_format = "Json" + } + + destination_settings { + data_destination = "destination-adx" + endpoint_ref = "adx-endpoint" + } + + built_in_transformation_settings { + datasets { + key = "dataset1" + inputs = ["input1"] + } + filter { + expression = "temperature > 20" + inputs = ["input1"] + } + map { + output = "output1" + inputs = ["input1"] + } + } + } +} \ No newline at end of file diff --git a/examples/iot/iotoperations_dataflow/outputs.tf b/examples/iot/iotoperations_dataflow/outputs.tf new file mode 100644 index 000000000000..24b0bedf18af --- /dev/null +++ b/examples/iot/iotoperations_dataflow/outputs.tf @@ -0,0 +1,35 @@ +output "resource_group_id" { + description = "The ID of the resource group" + value = azurerm_resource_group.example.id +} + +output "instance_id" { + description = "The ID of the IoT Operations instance" + value = azurerm_iotoperations_instance.example.id +} + +output "dataflow_profile_id" { + description = "The ID of the IoT Operations dataflow profile" + value = azurerm_iotoperations_dataflow_profile.example.id +} + +output "dataflow_profile_name" { + description = "The name of the IoT Operations dataflow profile" + value = azurerm_iotoperations_dataflow_profile.example.name +} + +output "dataflow_id" { + description = "The ID of the IoT Operations dataflow" + value = azurerm_iotoperations_dataflow.example.id +} + +output "dataflow_name" { + description = "The name of the IoT Operations dataflow" + value = azurerm_iotoperations_dataflow.example.name +} + +output "dataflow_mode" { + description = "The mode of the IoT Operations dataflow" + value = azurerm_iotoperations_dataflow.example.mode +} + diff --git a/examples/iot/iotoperations_dataflow/terraform.tfvars.example b/examples/iot/iotoperations_dataflow/terraform.tfvars.example new file mode 100644 index 000000000000..db1491882e49 --- /dev/null +++ b/examples/iot/iotoperations_dataflow/terraform.tfvars.example @@ -0,0 +1,107 @@ +# Example values for the IoT Operations Dataflow deployment +# Copy this file to terraform.tfvars and modify the values as needed + +resource_group_name = "rg-iotoperations-dataflow" +location = "East US 2" +instance_name = "my-iotops-instance" +dataflow_profile_name = "my-dataflow-profile" + +# Dataflow Profile Configuration +dataflow_profile_instance_count = 2 +dataflow_profile_log_level = "info" +dataflow_profile_prometheus_port = 9090 + +# Dataflow Configuration +dataflow_name = "temperature-processing-flow" +dataflow_mode = "Enabled" + +# Dataflow Sources +dataflow_sources = [ + { + name = "mqtt-temperature-source" + endpoint_ref = "mqtt-broker-endpoint" + asset_ref = "temperature-sensor-asset" + schema_ref = "temperature-schema" + serialization = { + format = "Json" + } + }, + { + name = "mqtt-humidity-source" + endpoint_ref = "mqtt-broker-endpoint" + asset_ref = "humidity-sensor-asset" + schema_ref = "humidity-schema" + serialization = { + format = "Json" + } + } +] + +# Dataflow Destinations +dataflow_destinations = [ + { + name = "adx-processed-data" + endpoint_ref = "azure-data-explorer-endpoint" + schema_ref = "processed-telemetry-schema" + serialization = { + format = "Json" + } + }, + { + name = "storage-raw-data" + endpoint_ref = "blob-storage-endpoint" + serialization = { + format = "Parquet" + } + } +] + +# Dataflow Transformations +dataflow_transformations = [ + { + type = "filter" + filter = { + expression = "temperature > 15 && humidity < 80" + type = "condition" + } + }, + { + type = "map" + map = { + expression = "{ temperature_celsius: temperature, humidity_percent: humidity, processed_at: $metadata.timestamp, device_id: $metadata.deviceId }" + type = "newProperties" + } + } +] + +# Dataflow Operations +dataflow_operations = [ + { + name = "process-temperature-data" + operation_type = "source" + source_name = "mqtt-temperature-source" + destination_name = "adx-processed-data" + built_in_transformation = { + serialize_type = "Json" + schema_ref = "processed-telemetry-schema" + } + }, + { + name = "archive-raw-data" + operation_type = "source" + source_name = "mqtt-humidity-source" + destination_name = "storage-raw-data" + built_in_transformation = { + serialize_type = "Parquet" + schema_ref = "raw-telemetry-schema" + } + } +] + +# Tags +tags = { + Environment = "Production" + Project = "IoT Operations" + Owner = "Data Engineering Team" + Purpose = "Temperature and Humidity Data Processing" +} \ No newline at end of file diff --git a/examples/iot/iotoperations_dataflow/variables.tf b/examples/iot/iotoperations_dataflow/variables.tf new file mode 100644 index 000000000000..75498954d8b7 --- /dev/null +++ b/examples/iot/iotoperations_dataflow/variables.tf @@ -0,0 +1,181 @@ +variable "resource_group_name" { + description = "The name of the resource group" + type = string + default = "rg-iotoperations-dataflow" +} + +variable "location" { + description = "The Azure region where resources will be created" + type = string + default = "East US 2" +} + +variable "instance_name" { + description = "The name of the IoT Operations instance" + type = string + default = "iotops-instance-dataflow" +} + +variable "custom_location_id" { + description = "The resource ID of the custom location (Arc-enabled Kubernetes cluster)" + type = string +} + +variable "tags" { + description = "A mapping of tags to assign to the resources" + type = map(string) + default = {} +} + +variable "dataflow_profile_name" { + description = "The name of the dataflow profile" + type = string + default = "dataflow-profile-example" +} + +variable "dataflow_profile_instance_count" { + description = "The number of dataflow profile instances" + type = number + default = 1 +} + +variable "dataflow_profile_log_level" { + description = "The log level for the dataflow profile" + type = string + default = "info" + validation { + condition = contains(["trace", "debug", "info", "warn", "error"], var.dataflow_profile_log_level) + error_message = "Log level must be one of: trace, debug, info, warn, error." + } +} + +variable "dataflow_profile_prometheus_port" { + description = "The Prometheus metrics port for the dataflow profile" + type = number + default = 9090 +} + +variable "dataflow_name" { + description = "The name of the dataflow" + type = string + default = "dataflow-example" +} + +variable "dataflow_mode" { + description = "The mode of the dataflow (Enabled, Disabled)" + type = string + default = "Enabled" + validation { + condition = contains(["Enabled", "Disabled"], var.dataflow_mode) + error_message = "Dataflow mode must be either 'Enabled' or 'Disabled'." + } +} + +variable "dataflow_sources" { + description = "List of dataflow sources" + type = list(object({ + name = string + endpoint_ref = string + asset_ref = optional(string) + schema_ref = optional(string) + serialization = optional(object({ + format = string + })) + })) + default = [ + { + name = "source-mqtt" + endpoint_ref = "mqtt-endpoint" + asset_ref = "temperature-asset" + schema_ref = "temperature-schema" + serialization = { + format = "Json" + } + } + ] +} + +variable "dataflow_destinations" { + description = "List of dataflow destinations" + type = list(object({ + name = string + endpoint_ref = string + schema_ref = optional(string) + serialization = optional(object({ + format = string + })) + })) + default = [ + { + name = "destination-adx" + endpoint_ref = "adx-endpoint" + schema_ref = "processed-schema" + serialization = { + format = "Json" + } + } + ] +} + +variable "dataflow_transformations" { + description = "List of dataflow transformations" + type = list(object({ + type = string + filter = optional(object({ + expression = string + type = string + })) + map = optional(object({ + expression = string + type = string + })) + })) + default = [ + { + type = "filter" + filter = { + expression = "temperature > 20" + type = "condition" + } + }, + { + type = "map" + map = { + expression = "{ temp_celsius: temperature, timestamp: $metadata.timestamp }" + type = "newProperties" + } + } + ] +} + +variable "dataflow_operations" { + description = "List of dataflow operations" + type = list(object({ + name = string + operation_type = string + source_name = string + destination_name = string + built_in_transformation = optional(object({ + serialize_type = string + schema_ref = string + })) + })) + default = [ + { + name = "operation-transform" + operation_type = "source" + source_name = "source-mqtt" + destination_name = "destination-adx" + built_in_transformation = { + serialize_type = "Json" + schema_ref = "processed-schema" + } + } + ] +} + +variable "schema_registry_ref" { + description = "The resource ID of the schema registry to associate with the instance." + type = string +} + diff --git a/examples/iot/iotoperations_dataflow_endpoint/README.md b/examples/iot/iotoperations_dataflow_endpoint/README.md new file mode 100644 index 000000000000..d70764a346d3 --- /dev/null +++ b/examples/iot/iotoperations_dataflow_endpoint/README.md @@ -0,0 +1,263 @@ +# IoT Operations Dataflow Endpoints Example + +This example demonstrates how to create various Azure IoT Operations Dataflow Endpoints using Terraform. + +## Overview + +This Terraform configuration creates: +- A Resource Group +- An IoT Operations Instance +- Multiple Dataflow Endpoints for different data sources and destinations: + - **MQTT Endpoint** - For IoT device data ingestion + - **Azure Data Explorer (ADX) Endpoint** - For real-time analytics + - **Azure Blob Storage Endpoint** - For data archival + - **Local Storage Endpoint** - For temporary buffering + - **Fabric OneLake Endpoint** - For Microsoft Fabric analytics (optional) + +## Prerequisites + +- Azure subscription +- Terraform installed +- Azure CLI installed and authenticated +- An Azure Kubernetes Service (AKS) cluster for IoT Operations deployment +- Required Azure services configured: + - Azure Data Explorer cluster (for ADX endpoint) + - Azure Storage Account (for blob storage endpoint) + - Microsoft Fabric workspace (for Fabric endpoint, optional) + +## Usage + +1. Clone this repository and navigate to this example directory: + ```bash + cd examples/iot/iotoperations_dataflow_endpoint + ``` + +2. Copy the example variables file: + ```bash + cp terraform.tfvars.example terraform.tfvars + ``` + +3. Edit `terraform.tfvars` with your specific values: + ```hcl + resource_group_name = "my-iotops-rg" + location = "East US 2" + instance_name = "my-iotops-instance" + + # Update endpoints with your actual service URLs + adx_cluster_uri = "https://myiotcluster.eastus2.kusto.windows.net" + storage_account_host = "https://myiotdatalake.blob.core.windows.net" + ``` + +4. Initialize Terraform: + ```bash + terraform init + ``` + +5. Plan the deployment: + ```bash + terraform plan + ``` + +6. Apply the configuration: + ```bash + terraform apply + ``` + +## Endpoint Types and Configuration + +### MQTT Endpoint + +The MQTT endpoint is used for ingesting data from IoT devices via MQTT protocol: + +```hcl +endpoint_type = "Mqtt" +mqtt_settings { + host = "mqtt-broker.iotoperations.svc.cluster.local" + port = 1883 + + authentication { + method = "UsernamePassword" + username = "iot-client" + password_secret_name = "mqtt-credentials" + } + + tls { + mode = "Enabled" + trusted_ca_certificate_config_map = "mqtt-ca-cert" + } +} +``` + +**Features:** +- Username/password authentication +- TLS encryption support +- Configurable QoS levels +- Keep-alive and session management +- Message retention options + +### Azure Data Explorer (ADX) Endpoint + +The ADX endpoint sends processed data to Azure Data Explorer for real-time analytics: + +```hcl +endpoint_type = "DataExplorer" +data_explorer_settings { + host = "https://mycluster.eastus2.kusto.windows.net" + database = "iottelemetry" + + authentication { + method = "SystemAssignedManagedIdentity" + system_assigned_managed_identity_audience = "https://kusto.windows.net" + } + + batching { + latency_seconds = 5 + max_messages = 1000 + } +} +``` + +**Features:** +- Managed Identity authentication +- Configurable batching for performance +- Direct integration with KQL queries +- Real-time data ingestion + +### Azure Blob Storage Endpoint + +The storage endpoint archives data to Azure Blob Storage: + +```hcl +endpoint_type = "DataLakeStorage" +data_lake_storage_settings { + host = "https://mystorageaccount.blob.core.windows.net" + container_name = "iotdata" + + authentication { + method = "SystemAssignedManagedIdentity" + system_assigned_managed_identity_audience = "https://storage.azure.com" + } + + batching { + latency_seconds = 60 + max_messages = 10000 + } +} +``` + +**Features:** +- Long-term data archival +- Large batch processing +- Cost-effective storage +- Integration with Azure Data Lake + +### Local Storage Endpoint + +The local storage endpoint provides temporary data buffering: + +```hcl +endpoint_type = "LocalStorage" +local_storage_settings { + persistent_volume_claim_ref = "iot-local-storage" +} +``` + +**Features:** +- High-speed local caching +- Edge computing scenarios +- Kubernetes PVC integration +- Data resilience during network outages + +### Fabric OneLake Endpoint (Optional) + +The Fabric endpoint integrates with Microsoft Fabric for advanced analytics: + +```hcl +endpoint_type = "FabricOneLake" +fabric_one_lake_settings { + host = "https://onelake.dfs.fabric.microsoft.com" + workspace_id = "12345678-1234-5678-9abc-123456789012" + lakehouse_name = "iotlakehouse" + + authentication { + method = "SystemAssignedManagedIdentity" + system_assigned_managed_identity_audience = "https://onelake.dfs.fabric.microsoft.com" + } +} +``` + +**Features:** +- Microsoft Fabric integration +- Advanced analytics and AI capabilities +- Delta Lake format support +- Power BI integration + +## Authentication and Security + +All endpoints support Managed Identity authentication for secure, passwordless connections: + +- **System Assigned Managed Identity**: Automatically created and managed by Azure +- **Audience-specific authentication**: Each service has its specific audience URL +- **TLS encryption**: Secure data transmission +- **Secret management**: Kubernetes secrets for sensitive data + +## Batching Configuration + +Endpoints support batching for optimal performance: + +- **Latency**: Maximum time to wait before sending a batch +- **Max Messages**: Maximum number of messages per batch +- **Performance tuning**: Balance between latency and throughput + +## Resource Hierarchy + +The IoT Operations resources follow this hierarchy: +1. **Instance** - Top-level IoT Operations instance +2. **Dataflow Endpoints** - Data source and destination definitions +3. **Dataflows** - Processing pipelines that connect endpoints + +## Example Data Flow Architecture + +``` +IoT Devices → MQTT Endpoint → Dataflow Processing → { + ├── ADX Endpoint (Real-time analytics) + ├── Storage Endpoint (Long-term archive) + ├── Local Endpoint (Edge caching) + └── Fabric Endpoint (Advanced analytics) + } +``` + +## Outputs + +This configuration provides comprehensive outputs including: +- Individual endpoint IDs and names +- Endpoint configuration summaries +- Connection details for integration with dataflows + +## Clean Up + +To destroy the resources: +```bash +terraform destroy +``` + +## Notes + +- Ensure all referenced Azure services exist before deployment +- Managed Identity requires appropriate RBAC permissions on target services +- Test endpoint connectivity before creating dataflows +- Monitor endpoint performance and adjust batching settings as needed +- Local storage requires properly configured Kubernetes PVCs + +## Troubleshooting + +Common issues and solutions: + +1. **Authentication failures**: Verify Managed Identity permissions +2. **Connection timeouts**: Check network connectivity and firewall rules +3. **Batching issues**: Adjust latency and message count settings +4. **Storage full**: Monitor local storage usage and implement cleanup policies + +## More Information + +For more details about Azure IoT Operations Dataflow Endpoints, visit the [official documentation](https://docs.microsoft.com/azure/iot-operations/connect-to-cloud/). \ No newline at end of file diff --git a/examples/iot/iotoperations_dataflow_endpoint/main.tf b/examples/iot/iotoperations_dataflow_endpoint/main.tf new file mode 100644 index 000000000000..b158dd8439c1 --- /dev/null +++ b/examples/iot/iotoperations_dataflow_endpoint/main.tf @@ -0,0 +1,134 @@ +terraform { + required_providers { + azurerm = { + source = "hashicorp/azurerm" + } + } +} + +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "example" { + name = var.resource_group_name + location = var.location +} + +resource "azurerm_iotoperations_instance" "example" { + name = var.instance_name + resource_group_name = azurerm_resource_group.example.name + location = azurerm_resource_group.example.location + + schema_registry_ref = var.schema_registry_ref +} + +# MQTT Endpoint for IoT data ingestion +resource "azurerm_iotoperations_dataflow_endpoint" "mqtt" { + name = "mqtt-endpoint" + resource_group_name = var.resource_group_name + instance_name = var.instance_name + + extended_location { + name = var.custom_location_id + type = "CustomLocation" + } + + endpoint_type = "Mqtt" + + mqtt_settings { + host = var.mqtt_host + + authentication { + method = "SystemAssignedManagedIdentity" + } + } +} + +# Azure Data Explorer Endpoint for processed data +resource "azurerm_iotoperations_dataflow_endpoint" "adx" { + name = "adx-endpoint" + resource_group_name = var.resource_group_name + instance_name = var.instance_name + + extended_location { + name = var.custom_location_id + type = "CustomLocation" + } + + endpoint_type = "DataExplorer" + + data_explorer_settings { + host = var.adx_host + database = var.adx_database + + authentication { + method = "SystemAssignedManagedIdentity" + } + } +} + +# Azure Blob Storage Endpoint for data archival +resource "azurerm_iotoperations_dataflow_endpoint" "storage" { + name = "storage-endpoint" + resource_group_name = var.resource_group_name + instance_name = var.instance_name + + extended_location { + name = var.custom_location_id + type = "CustomLocation" + } + + endpoint_type = "DataLakeStorage" + + data_lake_storage_settings { + host = var.storage_host + + authentication { + method = "SystemAssignedManagedIdentity" + } + } +} + +# Local Storage Endpoint for temporary data +resource "azurerm_iotoperations_dataflow_endpoint" "local" { + name = "local-endpoint" + resource_group_name = var.resource_group_name + instance_name = var.instance_name + + extended_location { + name = var.custom_location_id + type = "CustomLocation" + } + + endpoint_type = "LocalStorage" + + local_storage_settings { + path = "/mnt/data" + } +} + +# Fabric OneLake Endpoint for analytics +resource "azurerm_iotoperations_dataflow_endpoint" "fabric" { + name = "fabric-endpoint" + resource_group_name = var.resource_group_name + instance_name = var.instance_name + + extended_location { + name = var.custom_location_id + type = "CustomLocation" + } + + endpoint_type = "FabricOneLake" + + fabric_one_lake_settings { + host = var.fabric_host + names = [var.fabric_lakehouse_name, var.fabric_workspace_name] + one_lake_path_type = var.fabric_one_lake_path_type + workspace = var.fabric_workspace_name + + authentication { + method = "SystemAssignedManagedIdentity" + } + } +} \ No newline at end of file diff --git a/examples/iot/iotoperations_dataflow_endpoint/outputs.tf b/examples/iot/iotoperations_dataflow_endpoint/outputs.tf new file mode 100644 index 000000000000..5d92130ce37c --- /dev/null +++ b/examples/iot/iotoperations_dataflow_endpoint/outputs.tf @@ -0,0 +1,98 @@ +output "resource_group_id" { + description = "The ID of the resource group" + value = azurerm_resource_group.example.id +} + +output "instance_id" { + description = "The ID of the IoT Operations instance" + value = azurerm_iotoperations_instance.example.id +} + +# MQTT Endpoint Outputs +output "mqtt_endpoint_id" { + description = "The ID of the MQTT dataflow endpoint" + value = azurerm_iotoperations_dataflow_endpoint.mqtt.id +} + +output "mqtt_endpoint_name" { + description = "The name of the MQTT dataflow endpoint" + value = azurerm_iotoperations_dataflow_endpoint.mqtt.name +} + +# Azure Data Explorer Endpoint Outputs +output "adx_endpoint_id" { + description = "The ID of the Azure Data Explorer dataflow endpoint" + value = azurerm_iotoperations_dataflow_endpoint.adx.id +} + +output "adx_endpoint_name" { + description = "The name of the Azure Data Explorer dataflow endpoint" + value = azurerm_iotoperations_dataflow_endpoint.adx.name +} + +# Azure Storage Endpoint Outputs +output "storage_endpoint_id" { + description = "The ID of the Azure Storage dataflow endpoint" + value = azurerm_iotoperations_dataflow_endpoint.storage.id +} + +output "storage_endpoint_name" { + description = "The name of the Azure Storage dataflow endpoint" + value = azurerm_iotoperations_dataflow_endpoint.storage.name +} + +# Local Storage Endpoint Outputs +output "local_endpoint_id" { + description = "The ID of the local storage dataflow endpoint" + value = azurerm_iotoperations_dataflow_endpoint.local.id +} + +output "local_endpoint_name" { + description = "The name of the local storage dataflow endpoint" + value = azurerm_iotoperations_dataflow_endpoint.local.name +} + +# Fabric OneLake Endpoint Outputs +output "fabric_endpoint_id" { + description = "The ID of the Fabric OneLake dataflow endpoint." + value = var.enable_fabric_endpoint ? azurerm_iotoperations_dataflow_endpoint.fabric.id : null +} + +output "fabric_endpoint_name" { + description = "The name of the Fabric OneLake dataflow endpoint." + value = var.enable_fabric_endpoint ? azurerm_iotoperations_dataflow_endpoint.fabric.name : null +} + +# Endpoint Configuration Summary +output "endpoint_summary" { + description = "Summary of all configured dataflow endpoints" + value = { + mqtt_endpoint = { + id = azurerm_iotoperations_dataflow_endpoint.mqtt.id + name = azurerm_iotoperations_dataflow_endpoint.mqtt.name + type = azurerm_iotoperations_dataflow_endpoint.mqtt.endpoint_type + host = var.mqtt_host + port = var.mqtt_port + } + adx_endpoint = { + id = azurerm_iotoperations_dataflow_endpoint.adx.id + name = azurerm_iotoperations_dataflow_endpoint.adx.name + type = azurerm_iotoperations_dataflow_endpoint.adx.endpoint_type + host = var.adx_cluster_uri + database = var.adx_database_name + } + storage_endpoint = { + id = azurerm_iotoperations_dataflow_endpoint.storage.id + name = azurerm_iotoperations_dataflow_endpoint.storage.name + type = azurerm_iotoperations_dataflow_endpoint.storage.endpoint_type + host = var.storage_account_host + container = var.storage_container_name + } + local_endpoint = { + id = azurerm_iotoperations_dataflow_endpoint.local.id + name = azurerm_iotoperations_dataflow_endpoint.local.name + type = azurerm_iotoperations_dataflow_endpoint.local.endpoint_type + pvc = var.local_storage_pvc_name + } + } +} \ No newline at end of file diff --git a/examples/iot/iotoperations_dataflow_endpoint/terraform.tfvars.example b/examples/iot/iotoperations_dataflow_endpoint/terraform.tfvars.example new file mode 100644 index 000000000000..8c201b4af6ca --- /dev/null +++ b/examples/iot/iotoperations_dataflow_endpoint/terraform.tfvars.example @@ -0,0 +1,67 @@ +# Example values for the IoT Operations Dataflow Endpoints deployment +# Copy this file to terraform.tfvars and modify the values as needed + +resource_group_name = "rg-iotoperations-endpoints" +location = "East US 2" +instance_name = "my-iotops-instance" + +# MQTT Endpoint Configuration +mqtt_endpoint_name = "mqtt-telemetry-source" +mqtt_host = "mqtt-broker.iotoperations.svc.cluster.local" +mqtt_port = 8883 # Use 8883 for TLS, 1883 for non-TLS + +# Enable MQTT Authentication +mqtt_authentication_enabled = true +mqtt_auth_method = "UsernamePassword" +mqtt_username = "iot-client" +mqtt_password_secret = "mqtt-credentials" + +# Enable MQTT TLS +mqtt_tls_enabled = true +mqtt_tls_mode = "Enabled" +mqtt_tls_ca_cert_config_map = "mqtt-ca-cert" + +# MQTT Connection Settings +mqtt_keep_alive_seconds = 60 +mqtt_retain = false +mqtt_session_expiry_seconds = 3600 +mqtt_qos = 1 + +# Azure Data Explorer Endpoint Configuration +adx_endpoint_name = "adx-analytics-sink" +adx_cluster_uri = "https://myiotcluster.eastus2.kusto.windows.net" +adx_database_name = "iottelemetrydb" +adx_audience = "https://kusto.windows.net" +adx_batching_latency = 5 +adx_batching_max_messages = 1000 + +# Azure Storage Endpoint Configuration +storage_endpoint_name = "blob-archive-sink" +storage_account_host = "https://myiotdatalake.blob.core.windows.net" +storage_container_name = "iot-raw-data" +storage_audience = "https://storage.azure.com" +storage_batching_latency = 60 +storage_batching_max_messages = 10000 + +# Local Storage Endpoint Configuration +local_endpoint_name = "local-buffer" +local_storage_pvc_name = "iot-local-storage-pvc" + +# Fabric OneLake Endpoint Configuration (Optional) +enable_fabric_endpoint = true +fabric_endpoint_name = "fabric-analytics-sink" +fabric_host = "https://onelake.dfs.fabric.microsoft.com" +fabric_workspace_id = "12345678-1234-5678-9abc-123456789012" +fabric_lakehouse_name = "iot-analytics-lakehouse" +fabric_audience = "https://onelake.dfs.fabric.microsoft.com" +fabric_batching_latency = 30 +fabric_batching_max_messages = 5000 + +# Tags +tags = { + Environment = "Production" + Project = "IoT Operations" + Owner = "Data Engineering Team" + Purpose = "IoT Data Ingestion and Processing Endpoints" + CostCenter = "Engineering" +} \ No newline at end of file diff --git a/examples/iot/iotoperations_dataflow_endpoint/variables.tf b/examples/iot/iotoperations_dataflow_endpoint/variables.tf new file mode 100644 index 000000000000..e12803b157a5 --- /dev/null +++ b/examples/iot/iotoperations_dataflow_endpoint/variables.tf @@ -0,0 +1,286 @@ +variable "resource_group_name" { + description = "The name of the resource group" + type = string + default = "rg-iotoperations-endpoints" +} + +variable "location" { + description = "The Azure region where resources will be created" + type = string + default = "East US 2" +} + +variable "instance_name" { + description = "The name of the IoT Operations instance" + type = string + default = "iotops-instance-endpoints" +} + +# MQTT Endpoint Variables +variable "mqtt_endpoint_name" { + description = "The name of the MQTT dataflow endpoint" + type = string + default = "mqtt-endpoint" +} + +variable "mqtt_host" { + description = "The MQTT broker host" + type = string + default = "mqtt-broker.iotoperations.svc.cluster.local" +} + +variable "mqtt_port" { + description = "The MQTT broker port" + type = number + default = 1883 +} + +variable "mqtt_authentication_enabled" { + description = "Enable MQTT authentication" + type = bool + default = false +} + +variable "mqtt_auth_method" { + description = "MQTT authentication method" + type = string + default = "UsernamePassword" +} + +variable "mqtt_username" { + description = "MQTT username" + type = string + default = "" +} + +variable "mqtt_password_secret" { + description = "Name of the secret containing MQTT password" + type = string + default = "" +} + +variable "mqtt_tls_enabled" { + description = "Enable TLS for MQTT connection" + type = bool + default = false +} + +variable "mqtt_tls_mode" { + description = "TLS mode for MQTT connection" + type = string + default = "Enabled" +} + +variable "mqtt_tls_ca_cert_config_map" { + description = "ConfigMap containing CA certificate for MQTT TLS" + type = string + default = "" +} + +variable "mqtt_keep_alive_seconds" { + description = "MQTT keep alive interval in seconds" + type = number + default = 60 +} + +variable "mqtt_retain" { + description = "Enable MQTT message retention" + type = bool + default = false +} + +variable "mqtt_session_expiry_seconds" { + description = "MQTT session expiry in seconds" + type = number + default = 3600 +} + +variable "mqtt_qos" { + description = "MQTT Quality of Service level" + type = number + default = 1 + validation { + condition = contains([0, 1, 2], var.mqtt_qos) + error_message = "MQTT QoS must be 0, 1, or 2." + } +} + +# Azure Data Explorer Endpoint Variables +variable "adx_endpoint_name" { + description = "The name of the Azure Data Explorer dataflow endpoint" + type = string + default = "adx-endpoint" +} + +variable "adx_cluster_uri" { + description = "Azure Data Explorer cluster URI" + type = string + default = "https://mycluster.eastus2.kusto.windows.net" +} + +variable "adx_database_name" { + description = "Azure Data Explorer database name" + type = string + default = "iottelemetry" +} + +variable "adx_audience" { + description = "Azure Data Explorer audience for authentication" + type = string + default = "https://kusto.windows.net" +} + +variable "adx_batching_latency" { + description = "ADX batching latency in seconds" + type = number + default = 5 +} + +variable "adx_batching_max_messages" { + description = "ADX batching maximum messages" + type = number + default = 1000 +} + +# Azure Storage Endpoint Variables +variable "storage_endpoint_name" { + description = "The name of the Azure Storage dataflow endpoint" + type = string + default = "storage-endpoint" +} + +variable "storage_account_host" { + description = "Azure Storage account host" + type = string + default = "https://mystorageaccount.blob.core.windows.net" +} + +variable "storage_container_name" { + description = "Azure Storage container name" + type = string + default = "iotdata" +} + +variable "storage_audience" { + description = "Azure Storage audience for authentication" + type = string + default = "https://storage.azure.com" +} + +variable "storage_batching_latency" { + description = "Storage batching latency in seconds" + type = number + default = 60 +} + +variable "storage_batching_max_messages" { + description = "Storage batching maximum messages" + type = number + default = 10000 +} + +# Local Storage Endpoint Variables +variable "local_endpoint_name" { + description = "The name of the local storage dataflow endpoint" + type = string + default = "local-endpoint" +} + +variable "local_storage_pvc_name" { + description = "Name of the Persistent Volume Claim for local storage" + type = string + default = "iot-local-storage" +} + +# Fabric OneLake Endpoint Variables +variable "enable_fabric_endpoint" { + description = "Enable Fabric OneLake endpoint" + type = bool + default = false +} + +variable "fabric_endpoint_name" { + description = "The name of the Fabric OneLake dataflow endpoint" + type = string + default = "fabric-endpoint" +} + +variable "fabric_host" { + description = "Fabric OneLake host" + type = string + default = "https://onelake.dfs.fabric.microsoft.com" +} + +variable "fabric_workspace_id" { + description = "Fabric workspace ID" + type = string + default = "" +} + +variable "fabric_lakehouse_name" { + description = "Fabric lakehouse name" + type = string + default = "iotlakehouse" +} + +variable "fabric_audience" { + description = "Fabric OneLake audience for authentication" + type = string + default = "https://onelake.dfs.fabric.microsoft.com" +} + +variable "fabric_batching_latency" { + description = "Fabric batching latency in seconds" + type = number + default = 30 +} + +variable "fabric_batching_max_messages" { + description = "Fabric batching maximum messages" + type = number + default = 5000 +} + +variable "tags" { + description = "A mapping of tags to assign to the resources" + type = map(string) + default = { + Environment = "Example" + Purpose = "IoT Operations Dataflow Endpoints Demo" + } +} + +variable "custom_location_id" { + description = "The ID of the custom location to use for extended location." + type = string +} + +variable "adx_host" { + description = "Azure Data Explorer host" + type = string +} + +variable "adx_database" { + description = "Azure Data Explorer database" + type = string +} + +variable "storage_host" { + description = "Azure Storage host" + type = string +} + +variable "fabric_workspace_name" { + description = "Fabric workspace name" + type = string +} + +variable "fabric_one_lake_path_type" { + description = "Fabric OneLake path type" + type = string +} + +variable "schema_registry_ref" { + description = "The resource ID of the schema registry." + type = string +} \ No newline at end of file diff --git a/examples/iot/iotoperations_dataflow_profile/README.md b/examples/iot/iotoperations_dataflow_profile/README.md new file mode 100644 index 000000000000..310d30f2cb73 --- /dev/null +++ b/examples/iot/iotoperations_dataflow_profile/README.md @@ -0,0 +1,303 @@ +# IoT Operations Dataflow Profiles Example + +This example demonstrates how to create multiple Azure IoT Operations Dataflow Profiles using Terraform for different processing scenarios. + +## Overview + +This Terraform configuration creates: +- A Resource Group +- An IoT Operations Instance +- Multiple Dataflow Profiles optimized for different use cases: + - **High Performance Profile** - For real-time, high-throughput processing + - **Standard Profile** - For regular batch processing workloads + - **Edge Profile** - For resource-constrained edge environments + - **Development Profile** - For testing and development (optional) + +## Prerequisites + +- Azure subscription +- Terraform installed +- Azure CLI installed and authenticated +- An Azure Kubernetes Service (AKS) cluster for IoT Operations deployment + +## Usage + +1. Clone this repository and navigate to this example directory: + ```bash + cd examples/iot/iotoperations_dataflow_profile + ``` + +2. Copy the example variables file: + ```bash + cp terraform.tfvars.example terraform.tfvars + ``` + +3. Edit `terraform.tfvars` with your desired values: + ```hcl + resource_group_name = "my-iotops-rg" + location = "East US 2" + instance_name = "my-iotops-instance" + + # Configure profiles based on your workload requirements + high_performance_instance_count = 4 + standard_instance_count = 2 + edge_instance_count = 1 + ``` + +4. Initialize Terraform: + ```bash + terraform init + ``` + +5. Plan the deployment: + ```bash + terraform plan + ``` + +6. Apply the configuration: + ```bash + terraform apply + ``` + +## Dataflow Profile Types + +### High Performance Profile + +Optimized for real-time, high-throughput data processing: + +```hcl +instance_count = 4 +diagnostics { + logs { + level = "warn" # Reduced logging for performance + } + metrics { + prometheus_port = 9090 + } + self_check { + mode = "Enabled" + interval_seconds = 30 # Frequent health checks + timeout_seconds = 15 # Quick timeout + } +} +``` + +**Use Cases:** +- Real-time analytics +- High-frequency sensor data processing +- Low-latency alerting systems +- Stream processing applications + +**Characteristics:** +- Multiple instances for horizontal scaling +- Reduced logging to minimize performance impact +- Frequent health checks for reliability +- Optimized for throughput over resource usage + +### Standard Profile + +Balanced configuration for regular batch processing: + +```hcl +instance_count = 2 +diagnostics { + logs { + level = "info" # Standard logging level + } + metrics { + prometheus_port = 9091 + } + self_check { + mode = "Enabled" + interval_seconds = 60 # Regular health checks + timeout_seconds = 30 # Standard timeout + } +} +``` + +**Use Cases:** +- Batch data processing +- ETL operations +- Scheduled data transformations +- General-purpose data flows + +**Characteristics:** +- Moderate instance count for balanced performance +- Standard logging for operational visibility +- Regular health checks +- Good balance of performance and resource usage + +### Edge Profile + +Optimized for resource-constrained edge environments: + +```hcl +instance_count = 1 +diagnostics { + logs { + level = "error" # Minimal logging + } + metrics { + prometheus_port = 9092 + } + self_check { + mode = "Enabled" + interval_seconds = 120 # Less frequent checks + timeout_seconds = 60 # Longer timeout + } +} +``` + +**Use Cases:** +- Edge computing scenarios +- IoT gateway processing +- Remote site data processing +- Resource-limited environments + +**Characteristics:** +- Single instance to minimize resource usage +- Error-only logging to reduce I/O +- Less frequent health checks to save resources +- Optimized for minimal resource consumption + +### Development Profile (Optional) + +Configuration for testing and development: + +```hcl +instance_count = 1 +diagnostics { + logs { + level = "debug" # Verbose logging for debugging + } + metrics { + prometheus_port = 9093 + } + self_check { + mode = "Enabled" + interval_seconds = 30 # Frequent checks for development + timeout_seconds = 15 # Quick feedback + } +} +``` + +**Use Cases:** +- Development and testing +- Debugging data flows +- Proof of concept implementations +- Learning and experimentation + +**Characteristics:** +- Single instance for simplicity +- Debug-level logging for detailed insights +- Frequent health checks for quick feedback +- Easy to enable/disable via variable + +## Configuration Options + +### Instance Count + +Controls the number of dataflow processing instances: +- **1**: Minimal resource usage, suitable for light workloads +- **2-3**: Balanced performance for moderate workloads +- **4+**: High performance for demanding workloads + +### Log Levels + +Available logging levels in order of verbosity: +- **trace**: Most verbose, includes all operations +- **debug**: Detailed information for debugging +- **info**: General operational information +- **warn**: Warning messages and above +- **error**: Error messages only (minimal) + +### Self-Check Configuration + +Health monitoring settings: +- **Mode**: Enable/disable health checks +- **Interval**: How often to perform health checks +- **Timeout**: Maximum time to wait for health check response + +### Metrics + +Prometheus metrics configuration: +- **Port**: Different ports for each profile to avoid conflicts +- **Endpoint**: Accessible at `http://localhost:{port}/metrics` + +## Monitoring and Observability + +Each profile exposes metrics on different ports: +- High Performance: `:9090/metrics` +- Standard: `:9091/metrics` +- Edge: `:9092/metrics` +- Development: `:9093/metrics` + +Monitor key metrics: +- Processing throughput +- Error rates +- Resource utilization +- Health check status + +## Resource Hierarchy + +The IoT Operations resources follow this hierarchy: +1. **Instance** - Top-level IoT Operations instance +2. **Dataflow Profiles** - Compute resource managers +3. **Dataflows** - Processing pipelines that use profiles +4. **Dataflow Endpoints** - Data sources and destinations + +## Profile Selection Strategy + +Choose profiles based on your requirements: + +| Requirement | Recommended Profile | +|-------------|-------------------| +| Real-time processing | High Performance | +| Batch processing | Standard | +| Edge computing | Edge | +| Development/Testing | Development | +| Mixed workloads | Multiple profiles | + +## Best Practices + +1. **Resource Planning**: Size instance counts based on expected load +2. **Monitoring**: Use different Prometheus ports for each profile +3. **Log Management**: Balance logging verbosity with performance needs +4. **Health Checks**: Adjust intervals based on environment reliability +5. **Environment Separation**: Use different profiles for dev/test/prod + +## Outputs + +This configuration provides comprehensive outputs including: +- Individual profile IDs and names +- Configuration summaries for all profiles +- Prometheus metrics endpoints +- Resource hierarchy information + +## Clean Up + +To destroy the resources: +```bash +terraform destroy +``` + +## Notes + +- Profiles define compute resources but don't process data by themselves +- Create dataflows that reference these profiles for actual data processing +- Monitor resource usage to optimize instance counts +- Adjust self-check intervals based on your reliability requirements +- Use appropriate log levels for your operational needs + +## Troubleshooting + +Common issues and solutions: + +1. **Resource constraints**: Reduce instance counts or use edge profile +2. **Port conflicts**: Ensure each profile uses a unique Prometheus port +3. **Performance issues**: Increase instance count or use high-performance profile +4. **Monitoring gaps**: Verify Prometheus metrics endpoints are accessible + +## More Information + +For more details about Azure IoT Operations Dataflow Profiles, visit the [official documentation](https://docs.microsoft.com/azure/iot-operations/process-data/). \ No newline at end of file diff --git a/examples/iot/iotoperations_dataflow_profile/main.tf b/examples/iot/iotoperations_dataflow_profile/main.tf new file mode 100644 index 000000000000..42ceeb7729b8 --- /dev/null +++ b/examples/iot/iotoperations_dataflow_profile/main.tf @@ -0,0 +1,124 @@ +terraform { + required_providers { + azurerm = { + source = "hashicorp/azurerm" + } + } +} + +provider "azurerm" { + features {} + +} + +resource "azurerm_resource_group" "example" { + name = var.resource_group_name + location = var.location +} + +resource "azurerm_iotoperations_instance" "example" { + name = var.instance_name + resource_group_name = azurerm_resource_group.example.name + location = azurerm_resource_group.example.location + + extended_location_name = var.custom_location_id + extended_location_type = "CustomLocation" + + schema_registry_ref = var.schema_registry_ref + + tags = var.tags +} + +# High-performance dataflow profile for real-time processing +resource "azurerm_iotoperations_dataflow_profile" "high_performance" { + name = var.high_performance_profile_name + resource_group_name = var.resource_group_name + instance_name = var.instance_name + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = "CustomLocation" + } + + instance_count = var.high_performance_instance_count + + diagnostics { + logs { + level = var.high_performance_log_level + } + metrics { + prometheus_port = var.high_performance_prometheus_port + } + } +} + +# Standard dataflow profile for batch processing +resource "azurerm_iotoperations_dataflow_profile" "standard" { + name = var.standard_profile_name + resource_group_name = var.resource_group_name + instance_name = var.instance_name + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = "CustomLocation" + } + + instance_count = var.standard_instance_count + + diagnostics { + logs { + level = var.standard_log_level + } + metrics { + prometheus_port = var.standard_prometheus_port + } + } +} + +# Low-resource dataflow profile for edge scenarios +resource "azurerm_iotoperations_dataflow_profile" "edge" { + name = var.edge_profile_name + resource_group_name = var.resource_group_name + instance_name = var.instance_name + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = "CustomLocation" + } + + instance_count = var.edge_instance_count + + diagnostics { + logs { + level = var.edge_log_level + } + metrics { + prometheus_port = var.edge_prometheus_port + } + } +} + +# Development/testing dataflow profile +resource "azurerm_iotoperations_dataflow_profile" "development" { + count = var.create_development_profile ? 1 : 0 + + name = var.development_profile_name + resource_group_name = var.resource_group_name + instance_name = var.instance_name + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = "CustomLocation" + } + + instance_count = var.development_instance_count + + diagnostics { + logs { + level = var.development_log_level + } + metrics { + prometheus_port = var.development_prometheus_port + } + } +} \ No newline at end of file diff --git a/examples/iot/iotoperations_dataflow_profile/outputs.tf b/examples/iot/iotoperations_dataflow_profile/outputs.tf new file mode 100644 index 000000000000..b41511703fe2 --- /dev/null +++ b/examples/iot/iotoperations_dataflow_profile/outputs.tf @@ -0,0 +1,99 @@ +output "resource_group_id" { + description = "The ID of the resource group" + value = azurerm_resource_group.example.id +} + +output "instance_id" { + description = "The ID of the IoT Operations instance" + value = azurerm_iotoperations_instance.example.id +} + +# High Performance Profile Outputs +output "high_performance_profile_id" { + description = "The ID of the high-performance dataflow profile" + value = azurerm_iotoperations_dataflow_profile.high_performance.id +} + +output "high_performance_profile_name" { + description = "The name of the high-performance dataflow profile" + value = azurerm_iotoperations_dataflow_profile.high_performance.name +} + +# Standard Profile Outputs +output "standard_profile_id" { + description = "The ID of the standard dataflow profile" + value = azurerm_iotoperations_dataflow_profile.standard.id +} + +output "standard_profile_name" { + description = "The name of the standard dataflow profile" + value = azurerm_iotoperations_dataflow_profile.standard.name +} + +# Edge Profile Outputs +output "edge_profile_id" { + description = "The ID of the edge dataflow profile" + value = azurerm_iotoperations_dataflow_profile.edge.id +} + +output "edge_profile_name" { + description = "The name of the edge dataflow profile" + value = azurerm_iotoperations_dataflow_profile.edge.name +} + +# Development Profile Outputs +output "development_profile_id" { + description = "The ID of the development dataflow profile" + value = var.create_development_profile ? azurerm_iotoperations_dataflow_profile.development[0].id : null +} + +output "development_profile_name" { + description = "The name of the development dataflow profile" + value = var.create_development_profile ? azurerm_iotoperations_dataflow_profile.development[0].name : null +} + +# Profile Configuration Summary +output "profiles_summary" { + description = "Summary of all configured dataflow profiles" + value = { + high_performance = { + id = azurerm_iotoperations_dataflow_profile.high_performance.id + name = azurerm_iotoperations_dataflow_profile.high_performance.name + instance_count = azurerm_iotoperations_dataflow_profile.high_performance.instance_count + log_level = var.high_performance_log_level + prometheus_port = var.high_performance_prometheus_port + } + standard = { + id = azurerm_iotoperations_dataflow_profile.standard.id + name = azurerm_iotoperations_dataflow_profile.standard.name + instance_count = azurerm_iotoperations_dataflow_profile.standard.instance_count + log_level = var.standard_log_level + prometheus_port = var.standard_prometheus_port + } + edge = { + id = azurerm_iotoperations_dataflow_profile.edge.id + name = azurerm_iotoperations_dataflow_profile.edge.name + instance_count = azurerm_iotoperations_dataflow_profile.edge.instance_count + log_level = var.edge_log_level + prometheus_port = var.edge_prometheus_port + } + development = var.create_development_profile ? { + id = azurerm_iotoperations_dataflow_profile.development[0].id + name = azurerm_iotoperations_dataflow_profile.development[0].name + instance_count = azurerm_iotoperations_dataflow_profile.development[0].instance_count + log_level = var.development_log_level + prometheus_port = var.development_prometheus_port + } : null + } +} + +# Metrics Endpoints Summary +output "metrics_endpoints" { + description = "Prometheus metrics endpoints for all profiles" + value = { + high_performance = "http://localhost:${var.high_performance_prometheus_port}/metrics" + standard = "http://localhost:${var.standard_prometheus_port}/metrics" + edge = "http://localhost:${var.edge_prometheus_port}/metrics" + development = var.create_development_profile ? "http://localhost:${var.development_prometheus_port}/metrics" : null + } +} \ No newline at end of file diff --git a/examples/iot/iotoperations_dataflow_profile/terraform.tfvars.example b/examples/iot/iotoperations_dataflow_profile/terraform.tfvars.example new file mode 100644 index 000000000000..08cec939c1b5 --- /dev/null +++ b/examples/iot/iotoperations_dataflow_profile/terraform.tfvars.example @@ -0,0 +1,52 @@ +# Example values for the IoT Operations Dataflow Profiles deployment +# Copy this file to terraform.tfvars and modify the values as needed + +resource_group_name = "rg-iotoperations-dataflow-profiles" +location = "East US 2" +instance_name = "my-iotops-instance" + +# High Performance Profile - For real-time, high-throughput scenarios +high_performance_profile_name = "realtime-processing" +high_performance_instance_count = 4 +high_performance_log_level = "warn" +high_performance_prometheus_port = 9090 +high_performance_self_check_mode = "Enabled" +high_performance_self_check_interval = 30 +high_performance_self_check_timeout = 15 + +# Standard Profile - For regular batch processing workloads +standard_profile_name = "batch-processing" +standard_instance_count = 2 +standard_log_level = "info" +standard_prometheus_port = 9091 +standard_self_check_mode = "Enabled" +standard_self_check_interval = 60 +standard_self_check_timeout = 30 + +# Edge Profile - For resource-constrained edge environments +edge_profile_name = "edge-processing" +edge_instance_count = 1 +edge_log_level = "error" +edge_prometheus_port = 9092 +edge_self_check_mode = "Enabled" +edge_self_check_interval = 120 +edge_self_check_timeout = 60 + +# Development Profile - For testing and development (optional) +create_development_profile = true +development_profile_name = "dev-testing" +development_instance_count = 1 +development_log_level = "debug" +development_prometheus_port = 9093 +development_self_check_mode = "Enabled" +development_self_check_interval = 30 +development_self_check_timeout = 15 + +# Tags +tags = { + Environment = "Production" + Project = "IoT Operations" + Owner = "Data Engineering Team" + Purpose = "Dataflow Processing Profiles" + CostCenter = "Engineering" +} \ No newline at end of file diff --git a/examples/iot/iotoperations_dataflow_profile/variables.tf b/examples/iot/iotoperations_dataflow_profile/variables.tf new file mode 100644 index 000000000000..65e1e2421443 --- /dev/null +++ b/examples/iot/iotoperations_dataflow_profile/variables.tf @@ -0,0 +1,259 @@ +variable "resource_group_name" { + description = "The name of the resource group" + type = string + default = "rg-iotoperations-dataflow-profiles" +} + +variable "location" { + description = "The Azure region where resources will be created" + type = string + default = "East US 2" +} + +variable "instance_name" { + description = "The name of the IoT Operations instance" + type = string + default = "iotops-instance-profiles" +} + +variable "custom_location_id" { + description = "The resource ID of the custom location (Arc-enabled Kubernetes cluster)" + type = string +} + +variable "tags" { + description = "A mapping of tags to assign to the resources" + type = map(string) + default = {} +} + +# High Performance Profile Variables +variable "high_performance_profile_name" { + description = "The name of the high-performance dataflow profile" + type = string + default = "high-performance-profile" +} + +variable "high_performance_instance_count" { + description = "Number of instances for high-performance profile" + type = number + default = 4 + validation { + condition = var.high_performance_instance_count >= 1 && var.high_performance_instance_count <= 10 + error_message = "Instance count must be between 1 and 10." + } +} + +variable "high_performance_log_level" { + description = "Log level for high-performance profile" + type = string + default = "warn" + validation { + condition = contains(["trace", "debug", "info", "warn", "error"], var.high_performance_log_level) + error_message = "Log level must be one of: trace, debug, info, warn, error." + } +} + +variable "high_performance_prometheus_port" { + description = "Prometheus metrics port for high-performance profile" + type = number + default = 9090 +} + +variable "high_performance_self_check_mode" { + description = "Self-check mode for high-performance profile" + type = string + default = "Enabled" + validation { + condition = contains(["Enabled", "Disabled"], var.high_performance_self_check_mode) + error_message = "Self-check mode must be either 'Enabled' or 'Disabled'." + } +} + +variable "high_performance_self_check_interval" { + description = "Self-check interval in seconds for high-performance profile" + type = number + default = 30 +} + +variable "high_performance_self_check_timeout" { + description = "Self-check timeout in seconds for high-performance profile" + type = number + default = 15 +} + +# Standard Profile Variables +variable "standard_profile_name" { + description = "The name of the standard dataflow profile" + type = string + default = "standard-profile" +} + +variable "standard_instance_count" { + description = "Number of instances for standard profile" + type = number + default = 2 + validation { + condition = var.standard_instance_count >= 1 && var.standard_instance_count <= 10 + error_message = "Instance count must be between 1 and 10." + } +} + +variable "standard_log_level" { + description = "Log level for standard profile" + type = string + default = "info" + validation { + condition = contains(["trace", "debug", "info", "warn", "error"], var.standard_log_level) + error_message = "Log level must be one of: trace, debug, info, warn, error." + } +} + +variable "standard_prometheus_port" { + description = "Prometheus metrics port for standard profile" + type = number + default = 9091 +} + +variable "standard_self_check_mode" { + description = "Self-check mode for standard profile" + type = string + default = "Enabled" + validation { + condition = contains(["Enabled", "Disabled"], var.standard_self_check_mode) + error_message = "Self-check mode must be either 'Enabled' or 'Disabled'." + } +} + +variable "standard_self_check_interval" { + description = "Self-check interval in seconds for standard profile" + type = number + default = 60 +} + +variable "standard_self_check_timeout" { + description = "Self-check timeout in seconds for standard profile" + type = number + default = 30 +} + +# Edge Profile Variables +variable "edge_profile_name" { + description = "The name of the edge dataflow profile" + type = string + default = "edge-profile" +} + +variable "edge_instance_count" { + description = "Number of instances for edge profile" + type = number + default = 1 + validation { + condition = var.edge_instance_count >= 1 && var.edge_instance_count <= 10 + error_message = "Instance count must be between 1 and 10." + } +} + +variable "edge_log_level" { + description = "Log level for edge profile" + type = string + default = "error" + validation { + condition = contains(["trace", "debug", "info", "warn", "error"], var.edge_log_level) + error_message = "Log level must be one of: trace, debug, info, warn, error." + } +} + +variable "edge_prometheus_port" { + description = "Prometheus metrics port for edge profile" + type = number + default = 9092 +} + +variable "edge_self_check_mode" { + description = "Self-check mode for edge profile" + type = string + default = "Enabled" + validation { + condition = contains(["Enabled", "Disabled"], var.edge_self_check_mode) + error_message = "Self-check mode must be either 'Enabled' or 'Disabled'." + } +} + +variable "edge_self_check_interval" { + description = "Self-check interval in seconds for edge profile" + type = number + default = 120 +} + +variable "edge_self_check_timeout" { + description = "Self-check timeout in seconds for edge profile" + type = number + default = 60 +} + +# Development Profile Variables +variable "create_development_profile" { + description = "Whether to create a development profile" + type = bool + default = false +} + +variable "development_profile_name" { + description = "The name of the development dataflow profile" + type = string + default = "development-profile" +} + +variable "development_instance_count" { + description = "Number of instances for development profile" + type = number + default = 1 + validation { + condition = var.development_instance_count >= 1 && var.development_instance_count <= 10 + error_message = "Instance count must be between 1 and 10." + } +} + +variable "development_log_level" { + description = "Log level for development profile" + type = string + default = "debug" + validation { + condition = contains(["trace", "debug", "info", "warn", "error"], var.development_log_level) + error_message = "Log level must be one of: trace, debug, info, warn, error." + } +} + +variable "development_prometheus_port" { + description = "Prometheus metrics port for development profile" + type = number + default = 9093 +} + +variable "development_self_check_mode" { + description = "Self-check mode for development profile" + type = string + default = "Enabled" + validation { + condition = contains(["Enabled", "Disabled"], var.development_self_check_mode) + error_message = "Self-check mode must be either 'Enabled' or 'Disabled'." + } +} + +variable "development_self_check_interval" { + description = "Self-check interval in seconds for development profile" + type = number + default = 30 +} + +variable "development_self_check_timeout" { + description = "Self-check timeout in seconds for development profile" + type = number + default = 15 +} + +variable "schema_registry_ref" { + description = "The resource ID of the schema registry to use." + type = string +} diff --git a/examples/iot/iotoperations_instance/README.md b/examples/iot/iotoperations_instance/README.md new file mode 100644 index 000000000000..8df2ab011bdc --- /dev/null +++ b/examples/iot/iotoperations_instance/README.md @@ -0,0 +1,85 @@ +# IoT Operations Instance + +This example shows how to create an Azure IoT Operations instance using Terraform. + +## Prerequisites + +Before running this example, you need: + +1. **Azure CLI** installed and authenticated +2. **Terraform** 1.6 or later +3. **Existing Resource Group** in Azure +4. **Arc-enabled Kubernetes cluster** with a Custom Location +5. **Schema Registry** in Azure Device Registry + +## Usage + +### Step 1: Set Variables + +Create a `terraform.tfvars` file: + +```hcl +# Prefix for resource naming +prefix = "mycompany" + +# Existing Resource Group +resource_group_name = "existing-resource-group-name" + +# Required Resource IDs (replace with your actual values) +custom_location_id = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/example-rg/providers/Microsoft.ExtendedLocation/customLocations/example-location" +schema_registry_ref = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/example-rg/providers/Microsoft.DeviceRegistry/schemaRegistries/example-registry" +``` + +### Step 2: Find Required Resources + +Find available Custom Locations: +```bash +az resource list --resource-type "Microsoft.ExtendedLocation/customLocations" --query "[].{Name:name, Id:id}" -o table +``` + +Find available Schema Registries: +```bash +az resource list --resource-type "Microsoft.DeviceRegistry/schemaRegistries" --query "[].{Name:name, Id:id}" -o table +``` + +### Step 3: Deploy + +```bash +terraform init +terraform plan +terraform apply +``` + +## Variables + +| Name | Description | Type | Required | +|------|-------------|------|----------| +| `prefix` | Prefix for resource naming | `string` | yes | +| `resource_group_name` | Name of existing resource group | `string` | yes | +| `custom_location_id` | ARM ID of Custom Location | `string` | yes | +| `schema_registry_id` | ARM ID of Schema Registry | `string` | yes | + +## Outputs + +| Name | Description | +|------|-------------| +| `iotoperations_instance_id` | ARM resource ID of the IoT Operations instance | + +## Architecture + +This example creates: + +- **IoT Operations Instance** (named `{prefix}-iotoperations`) via ARM template deployment + +The IoT Operations instance requires: +- An existing Resource Group +- An Arc-enabled Kubernetes cluster (Custom Location) +- A Schema Registry for data schemas + +## Cleanup + +```bash +terraform destroy +``` + +Note: This will only destroy the IoT Operations instance. The resource group, Custom Location, and Schema Registry will remain. \ No newline at end of file diff --git a/examples/iot/iotoperations_instance/main.tf b/examples/iot/iotoperations_instance/main.tf new file mode 100644 index 000000000000..b28b9acbcbd3 --- /dev/null +++ b/examples/iot/iotoperations_instance/main.tf @@ -0,0 +1,37 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_version = ">= 1.6" + required_providers { + azurerm = { + source = "hashicorp/azurerm" + } + } +} + +provider "azurerm" { + features {} +} + +# Use existing resource group +data "azurerm_resource_group" "example" { + name = var.resource_group_name +} + +# IoT Operations instance +resource "azurerm_iotoperations_instance" "example" { + name = "${var.prefix}-iotoperations" + resource_group_name = data.azurerm_resource_group.example.name + location = data.azurerm_resource_group.example.location + + # Extended location (Custom Location for Arc-enabled Kubernetes) + extended_location_name = var.custom_location_id + extended_location_type = "CustomLocation" + + # Required schema registry reference + schema_registry_ref = var.schema_registry_ref + + # Optional properties + description = "IoT Operations instance created via Terraform" +} diff --git a/examples/iot/iotoperations_instance/outputs.tf b/examples/iot/iotoperations_instance/outputs.tf new file mode 100644 index 000000000000..34551f44be80 --- /dev/null +++ b/examples/iot/iotoperations_instance/outputs.tf @@ -0,0 +1,7 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +output "iotoperations_instance_id" { + description = "The ARM resource ID of the IoT Operations instance" + value = azurerm_iotoperations_instance.example.id +} \ No newline at end of file diff --git a/examples/iot/iotoperations_instance/terraform.tfvars.example b/examples/iot/iotoperations_instance/terraform.tfvars.example new file mode 100644 index 000000000000..8b44fc04d243 --- /dev/null +++ b/examples/iot/iotoperations_instance/terraform.tfvars.example @@ -0,0 +1,14 @@ +# Example terraform.tfvars file +# Copy this to terraform.tfvars and update with your values + +# Prefix for resource naming +prefix = "example" + +# Existing Resource Group +resource_group_name = "existing-resource-group-name" + +# Custom Location (Arc-enabled Kubernetes cluster) +custom_location_id = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/example-rg/providers/Microsoft.ExtendedLocation/customLocations/example-location" + +# Device Registry Schema Registry +schema_registry_ref = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/example-rg/providers/Microsoft.DeviceRegistry/schemaRegistries/example-registry" \ No newline at end of file diff --git a/examples/iot/iotoperations_instance/variables.tf b/examples/iot/iotoperations_instance/variables.tf new file mode 100644 index 000000000000..29bde367102d --- /dev/null +++ b/examples/iot/iotoperations_instance/variables.tf @@ -0,0 +1,22 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +variable "prefix" { + description = "The prefix used for all resources in this example" + type = string +} + +variable "resource_group_name" { + description = "The name of an existing resource group where resources will be created" + type = string +} + +variable "custom_location_id" { + description = "The ARM resource ID of the Custom Location (Arc-enabled Kubernetes cluster)" + type = string +} + +variable "schema_registry_ref" { + description = "The ARM resource ID of the Device Registry Schema Registry" + type = string +} \ No newline at end of file diff --git a/internal/clients/client.go b/internal/clients/client.go index 96ff6ab3c1c9..bd88d2d32bad 100644 --- a/internal/clients/client.go +++ b/internal/clients/client.go @@ -83,6 +83,7 @@ import ( hybridcompute "github.com/hashicorp/terraform-provider-azurerm/internal/services/hybridcompute/client" iotcentral "github.com/hashicorp/terraform-provider-azurerm/internal/services/iotcentral/client" iothub "github.com/hashicorp/terraform-provider-azurerm/internal/services/iothub/client" + iotoperations "github.com/hashicorp/terraform-provider-azurerm/internal/services/iotoperations/clients" keyvault "github.com/hashicorp/terraform-provider-azurerm/internal/services/keyvault/client" kusto "github.com/hashicorp/terraform-provider-azurerm/internal/services/kusto/client" lighthouse "github.com/hashicorp/terraform-provider-azurerm/internal/services/lighthouse/client" @@ -286,6 +287,7 @@ type Client struct { VoiceServices *voiceServices.Client Web *web.Client Workloads *workloads_v2024_09_01.Client + IoTOperations *iotoperations.Client } // NOTE: it should be possible for this method to become Private once the top level Client's removed @@ -680,6 +682,9 @@ func (client *Client) Build(ctx context.Context, o *common.ClientOptions) error if client.Workloads, err = workloads.NewClient(o); err != nil { return fmt.Errorf("building clients for Workloads: %+v", err) } + if client.IoTOperations, err = iotoperations.NewClient(o); err != nil { + return fmt.Errorf("building clients for IoTOperations: %+v", err) + } return nil } diff --git a/internal/provider/services.go b/internal/provider/services.go index 142d47c2f0e5..a376f7b6ba39 100644 --- a/internal/provider/services.go +++ b/internal/provider/services.go @@ -66,6 +66,7 @@ import ( "github.com/hashicorp/terraform-provider-azurerm/internal/services/hybridcompute" "github.com/hashicorp/terraform-provider-azurerm/internal/services/iotcentral" "github.com/hashicorp/terraform-provider-azurerm/internal/services/iothub" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/iotoperations" "github.com/hashicorp/terraform-provider-azurerm/internal/services/keyvault" "github.com/hashicorp/terraform-provider-azurerm/internal/services/kusto" "github.com/hashicorp/terraform-provider-azurerm/internal/services/legacy" @@ -183,6 +184,7 @@ func SupportedTypedServices() []sdk.TypedServiceRegistration { hybridcompute.Registration{}, iotcentral.Registration{}, iothub.Registration{}, + iotoperations.Registration{}, keyvault.Registration{}, kusto.Registration{}, loadbalancer.Registration{}, diff --git a/internal/services/iotoperations/clients/client.go b/internal/services/iotoperations/clients/client.go new file mode 100644 index 000000000000..a07ae357d8f2 --- /dev/null +++ b/internal/services/iotoperations/clients/client.go @@ -0,0 +1,87 @@ +package client + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker" + "github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication" + "github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization" + "github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener" + "github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow" + "github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint" + "github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile" + "github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance" + "github.com/hashicorp/terraform-provider-azurerm/internal/common" +) + +type Client struct { + BrokerAuthenticationClient *brokerauthentication.BrokerAuthenticationClient + BrokerAuthorizationClient *brokerauthorization.BrokerAuthorizationClient + BrokerClient *broker.BrokerClient + BrokerListenerClient *brokerlistener.BrokerListenerClient + DataflowClient *dataflow.DataflowClient + DataflowEndpointClient *dataflowendpoint.DataflowEndpointClient + DataflowProfileClient *dataflowprofile.DataflowProfileClient + InstanceClient *instance.InstanceClient +} + +func NewClient(o *common.ClientOptions) (*Client, error) { + brokerAuthenticationClient, err := brokerauthentication.NewBrokerAuthenticationClientWithBaseURI(o.Environment.ResourceManager) + if err != nil { + return nil, fmt.Errorf("building BrokerAuthentication client: %+v", err) + } + o.Configure(brokerAuthenticationClient.Client, o.Authorizers.ResourceManager) + + brokerAuthorizationClient, err := brokerauthorization.NewBrokerAuthorizationClientWithBaseURI(o.Environment.ResourceManager) + if err != nil { + return nil, fmt.Errorf("building BrokerAuthorization client: %+v", err) + } + o.Configure(brokerAuthorizationClient.Client, o.Authorizers.ResourceManager) + + brokerClient, err := broker.NewBrokerClientWithBaseURI(o.Environment.ResourceManager) + if err != nil { + return nil, fmt.Errorf("building Broker client: %+v", err) + } + o.Configure(brokerClient.Client, o.Authorizers.ResourceManager) + + brokerListenerClient, err := brokerlistener.NewBrokerListenerClientWithBaseURI(o.Environment.ResourceManager) + if err != nil { + return nil, fmt.Errorf("building BrokerListener client: %+v", err) + } + o.Configure(brokerListenerClient.Client, o.Authorizers.ResourceManager) + + dataflowClient, err := dataflow.NewDataflowClientWithBaseURI(o.Environment.ResourceManager) + if err != nil { + return nil, fmt.Errorf("building Dataflow client: %+v", err) + } + o.Configure(dataflowClient.Client, o.Authorizers.ResourceManager) + + dataflowEndpointClient, err := dataflowendpoint.NewDataflowEndpointClientWithBaseURI(o.Environment.ResourceManager) + if err != nil { + return nil, fmt.Errorf("building DataflowEndpoint client: %+v", err) + } + o.Configure(dataflowEndpointClient.Client, o.Authorizers.ResourceManager) + + dataflowProfileClient, err := dataflowprofile.NewDataflowProfileClientWithBaseURI(o.Environment.ResourceManager) + if err != nil { + return nil, fmt.Errorf("building DataflowProfile client: %+v", err) + } + o.Configure(dataflowProfileClient.Client, o.Authorizers.ResourceManager) + + instanceClient, err := instance.NewInstanceClientWithBaseURI(o.Environment.ResourceManager) + if err != nil { + return nil, fmt.Errorf("building Instance client: %+v", err) + } + o.Configure(instanceClient.Client, o.Authorizers.ResourceManager) + + return &Client{ + BrokerAuthenticationClient: brokerAuthenticationClient, + BrokerAuthorizationClient: brokerAuthorizationClient, + BrokerClient: brokerClient, + BrokerListenerClient: brokerListenerClient, + DataflowClient: dataflowClient, + DataflowEndpointClient: dataflowEndpointClient, + DataflowProfileClient: dataflowProfileClient, + InstanceClient: instanceClient, + }, nil +} diff --git a/internal/services/iotoperations/iotoperations_broker_listener_resource.go b/internal/services/iotoperations/iotoperations_broker_listener_resource.go new file mode 100644 index 000000000000..f61429211602 --- /dev/null +++ b/internal/services/iotoperations/iotoperations_broker_listener_resource.go @@ -0,0 +1,716 @@ +package iotoperations + +import ( + "context" + "fmt" + "regexp" + "time" + + "github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-azurerm/internal/sdk" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" +) + +type BrokerListenerResource struct{} + +var _ sdk.ResourceWithUpdate = BrokerListenerResource{} + +type BrokerListenerModel struct { + Name string `tfschema:"name"` + ResourceGroupName string `tfschema:"resource_group_name"` + InstanceName string `tfschema:"instance_name"` + BrokerName string `tfschema:"broker_name"` + ExtendedLocationName *string `tfschema:"extended_location_name"` + ExtendedLocationType *string `tfschema:"extended_location_type"` + ServiceName *string `tfschema:"service_name"` + ServiceType *string `tfschema:"service_type"` + Ports []BrokerListenerPortModel `tfschema:"ports"` + ProvisioningState *string `tfschema:"provisioning_state"` +} + +type BrokerListenerPortModel struct { + Port int `tfschema:"port"` + NodePort *int `tfschema:"node_port"` + Protocol *string `tfschema:"protocol"` + AuthenticationRef *string `tfschema:"authentication_ref"` + AuthorizationRef *string `tfschema:"authorization_ref"` + Tls *BrokerListenerTlsModel `tfschema:"tls"` +} + +type BrokerListenerTlsModel struct { + Mode string `tfschema:"mode"` + CertManagerCertificateSpec *BrokerListenerCertManagerCertificateSpecModel `tfschema:"cert_manager_certificate_spec"` + Manual *BrokerListenerManualModel `tfschema:"manual"` +} + +type BrokerListenerCertManagerCertificateSpecModel struct { + Duration *string `tfschema:"duration"` + SecretName *string `tfschema:"secret_name"` + RenewBefore *string `tfschema:"renew_before"` + IssuerRef BrokerListenerIssuerRefModel `tfschema:"issuer_ref"` // Required field + PrivateKey *BrokerListenerPrivateKeyModel `tfschema:"private_key"` + San *BrokerListenerSanModel `tfschema:"san"` +} + +type BrokerListenerIssuerRefModel struct { + Group string `tfschema:"group"` // Required + Kind string `tfschema:"kind"` // Required + Name string `tfschema:"name"` // Required +} + +type BrokerListenerPrivateKeyModel struct { + Algorithm string `tfschema:"algorithm"` // Required + RotationPolicy string `tfschema:"rotation_policy"` // Required +} + +type BrokerListenerSanModel struct { + Dns []string `tfschema:"dns"` // Required + Ip []string `tfschema:"ip"` // Required +} + +type BrokerListenerManualModel struct { + SecretRef string `tfschema:"secret_ref"` // Required +} + +func (r BrokerListenerResource) ModelObject() interface{} { + return &BrokerListenerModel{} +} + +func (r BrokerListenerResource) ResourceType() string { + return "azurerm_iotoperations_broker_listener" +} + +func (r BrokerListenerResource) IDValidationFunc() pluginsdk.SchemaValidateFunc { + return brokerlistener.ValidateListenerID +} + +func (r BrokerListenerResource) Arguments() map[string]*pluginsdk.Schema { + return map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(3, 63), + validation.StringMatch(regexp.MustCompile("^[a-z0-9][a-z0-9-]*[a-z0-9]$"), "must match ^[a-z0-9][a-z0-9-]*[a-z0-9]$"), + ), + }, + "resource_group_name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(1, 90), + }, + "instance_name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(3, 63), + validation.StringMatch(regexp.MustCompile("^[a-z0-9][a-z0-9-]*[a-z0-9]$"), "must match ^[a-z0-9][a-z0-9-]*[a-z0-9]$"), + ), + }, + "broker_name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(3, 63), + validation.StringMatch(regexp.MustCompile("^[a-z0-9][a-z0-9-]*[a-z0-9]$"), "must match ^[a-z0-9][a-z0-9-]*[a-z0-9]$"), + ), + }, + "extended_location_name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + "service_name": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 63), + }, + "service_type": { + Type: pluginsdk.TypeString, + Optional: true, + Default: "ClusterIp", + ValidateFunc: validation.StringInSlice([]string{ + "LoadBalancer", + "NodePort", + "ClusterIp", // Corrected from "ClusterIP" + }, false), + }, + "ports": { + Type: pluginsdk.TypeList, + Required: true, + MinItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "port": { + Type: pluginsdk.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(1, 65535), + }, + "node_port": { + Type: pluginsdk.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(30000, 32767), + }, + "protocol": { + Type: pluginsdk.TypeString, + Optional: true, + Default: "Mqtt", + ValidateFunc: validation.StringInSlice([]string{ + "MQTT", + "WebSockets", + }, false), + }, + "authentication_ref": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 253), + }, + "authorization_ref": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 253), + }, + "tls": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "mode": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + "Automatic", // Only supported modes + "Manual", + }, false), + }, + "cert_manager_certificate_spec": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "duration": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 50), + }, + "secret_name": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 253), + }, + "renew_before": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 50), + }, + "issuer_ref": { + Type: pluginsdk.TypeList, + Required: true, // Changed from Optional + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "group": { + Type: pluginsdk.TypeString, + Required: true, // Changed from Optional + ValidateFunc: validation.StringLenBetween(1, 253), + }, + "kind": { + Type: pluginsdk.TypeString, + Required: true, // Changed from Optional + ValidateFunc: validation.StringInSlice([]string{ + "ClusterIssuer", + "Issuer", + }, false), + }, + "name": { + Type: pluginsdk.TypeString, + Required: true, // Changed from Optional + ValidateFunc: validation.StringLenBetween(1, 253), + }, + }, + }, + }, + "private_key": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "algorithm": { + Type: pluginsdk.TypeString, + Required: true, // Changed from Optional + ValidateFunc: validation.StringInSlice([]string{ + "Rsa2048", + "Rsa4096", + "Rsa8192", + "Ec256", + "Ec384", + "Ec521", + "Ed25519", + }, false), + }, + "rotation_policy": { + Type: pluginsdk.TypeString, + Required: true, // Changed from Optional + ValidateFunc: validation.StringInSlice([]string{ + "Always", + "Never", + }, false), + }, + }, + }, + }, + "san": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "dns": { + Type: pluginsdk.TypeList, + Required: true, // Changed from Optional + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + ValidateFunc: validation.StringLenBetween(1, 253), + }, + }, + "ip": { + Type: pluginsdk.TypeList, + Required: true, // Changed from Optional + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + ValidateFunc: validation.IsIPAddress, + }, + }, + }, + }, + }, + }, + }, + }, + "manual": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "secret_ref": { + Type: pluginsdk.TypeString, + Required: true, // Changed from Optional + ValidateFunc: validation.StringLenBetween(1, 253), + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func (r BrokerListenerResource) Attributes() map[string]*pluginsdk.Schema { + return map[string]*pluginsdk.Schema{ + "provisioning_state": { + Type: pluginsdk.TypeString, + Computed: true, + }, + } +} + +func (r BrokerListenerResource) Create() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 30 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + client := metadata.Client.IoTOperations.BrokerListenerClient + + var model BrokerListenerModel + if err := metadata.Decode(&model); err != nil { + return fmt.Errorf("decoding: %+v", err) + } + + subscriptionId := metadata.Client.Account.SubscriptionId + id := brokerlistener.NewListenerID(subscriptionId, model.ResourceGroupName, model.InstanceName, model.BrokerName, model.Name) + + // Check if resource already exists + existing, err := client.Get(ctx, id) + if err == nil && existing.Model != nil { + return fmt.Errorf("IoT Operations Broker Listener %q already exists", id.ListenerName) + } + + // Build payload with required ExtendedLocation + payload := brokerlistener.BrokerListenerResource{ + ExtendedLocation: brokerlistener.ExtendedLocation{ + Name: *model.ExtendedLocationName, + Type: brokerlistener.ExtendedLocationType(*model.ExtendedLocationType), + }, + Properties: expandBrokerListenerProperties(model), + } + + if err := client.CreateOrUpdateThenPoll(ctx, id, payload); err != nil { + return fmt.Errorf("creating %s: %+v", id, err) + } + + metadata.SetID(id) + return nil + }, + } +} + +func (r BrokerListenerResource) Read() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 5 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + client := metadata.Client.IoTOperations.BrokerListenerClient + + id, err := brokerlistener.ParseListenerID(metadata.ResourceData.Id()) + if err != nil { + return err + } + + resp, err := client.Get(ctx, *id) + if err != nil { + return fmt.Errorf("reading %s: %+v", *id, err) + } + + model := BrokerListenerModel{ + Name: id.ListenerName, + ResourceGroupName: id.ResourceGroupName, + InstanceName: id.InstanceName, + BrokerName: id.BrokerName, + } + + if respModel := resp.Model; respModel != nil { + model.ExtendedLocationName = &respModel.ExtendedLocation.Name + extendedLocationType := string(respModel.ExtendedLocation.Type) + model.ExtendedLocationType = &extendedLocationType + + if respModel.Properties != nil { + flattenBrokerListenerProperties(respModel.Properties, &model) + + if respModel.Properties.ProvisioningState != nil { + provisioningState := string(*respModel.Properties.ProvisioningState) + model.ProvisioningState = &provisioningState + } + } + } + + return metadata.Encode(&model) + }, + } +} + +func (r BrokerListenerResource) Update() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 30 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + client := metadata.Client.IoTOperations.BrokerListenerClient + + id, err := brokerlistener.ParseListenerID(metadata.ResourceData.Id()) + if err != nil { + return err + } + + var model BrokerListenerModel + if err := metadata.Decode(&model); err != nil { + return fmt.Errorf("decoding: %+v", err) + } + + // Since there's no separate Update method, use CreateOrUpdate + payload := brokerlistener.BrokerListenerResource{ + ExtendedLocation: brokerlistener.ExtendedLocation{ + Name: *model.ExtendedLocationName, + Type: brokerlistener.ExtendedLocationType(*model.ExtendedLocationType), + }, + Properties: expandBrokerListenerProperties(model), + } + + if err := client.CreateOrUpdateThenPoll(ctx, *id, payload); err != nil { + return fmt.Errorf("updating %s: %+v", *id, err) + } + + return nil + }, + } +} + +func (r BrokerListenerResource) Delete() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 30 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + client := metadata.Client.IoTOperations.BrokerListenerClient + + id, err := brokerlistener.ParseListenerID(metadata.ResourceData.Id()) + if err != nil { + return err + } + + if err := client.DeleteThenPoll(ctx, *id); err != nil { + return fmt.Errorf("deleting %s: %+v", *id, err) + } + + return nil + }, + } +} + +// Helper functions for expand/flatten operations +func expandBrokerListenerProperties(model BrokerListenerModel) *brokerlistener.BrokerListenerProperties { + props := &brokerlistener.BrokerListenerProperties{ + Ports: expandBrokerListenerPorts(model.Ports), + } + + if model.ServiceName != nil { + props.ServiceName = model.ServiceName + } + + if model.ServiceType != nil { + serviceType := brokerlistener.ServiceType(*model.ServiceType) + props.ServiceType = &serviceType + } + + return props +} + +func expandBrokerListenerPorts(ports []BrokerListenerPortModel) []brokerlistener.ListenerPort { + result := make([]brokerlistener.ListenerPort, 0, len(ports)) + + for _, port := range ports { + listenerPort := brokerlistener.ListenerPort{ + Port: int64(port.Port), + } + + if port.NodePort != nil { + listenerPort.NodePort = func(i int) *int64 { v := int64(i); return &v }(*port.NodePort) + } + + if port.Protocol != nil { + protocol := brokerlistener.BrokerProtocolType(*port.Protocol) + listenerPort.Protocol = &protocol + } + + if port.AuthenticationRef != nil { + listenerPort.AuthenticationRef = port.AuthenticationRef + } + + if port.AuthorizationRef != nil { + listenerPort.AuthorizationRef = port.AuthorizationRef + } + + if port.Tls != nil { + listenerPort.Tls = expandBrokerListenerTls(*port.Tls) + } + + result = append(result, listenerPort) + } + + return result +} + +func expandBrokerListenerTls(tls BrokerListenerTlsModel) *brokerlistener.TlsCertMethod { + tlsMode := brokerlistener.TlsCertMethodMode(tls.Mode) + result := &brokerlistener.TlsCertMethod{ + Mode: tlsMode, + } + + if tls.CertManagerCertificateSpec != nil { + result.CertManagerCertificateSpec = expandBrokerListenerCertManagerSpec(*tls.CertManagerCertificateSpec) + } + + if tls.Manual != nil { + result.Manual = expandBrokerListenerManual(*tls.Manual) + } + + return result +} + +func expandBrokerListenerCertManagerSpec(spec BrokerListenerCertManagerCertificateSpecModel) *brokerlistener.CertManagerCertificateSpec { + result := &brokerlistener.CertManagerCertificateSpec{ + IssuerRef: expandBrokerListenerIssuerRef(spec.IssuerRef), // Required field + } + + if spec.Duration != nil { + result.Duration = spec.Duration + } + + if spec.SecretName != nil { + result.SecretName = spec.SecretName + } + + if spec.RenewBefore != nil { + result.RenewBefore = spec.RenewBefore + } + + if spec.PrivateKey != nil { + result.PrivateKey = expandBrokerListenerPrivateKey(*spec.PrivateKey) + } + + if spec.San != nil { + result.San = expandBrokerListenerSan(*spec.San) + } + + return result +} + +func expandBrokerListenerIssuerRef(issuerRef BrokerListenerIssuerRefModel) brokerlistener.CertManagerIssuerRef { + return brokerlistener.CertManagerIssuerRef{ + Group: issuerRef.Group, + Kind: brokerlistener.CertManagerIssuerKind(issuerRef.Kind), + Name: issuerRef.Name, + } +} + +func expandBrokerListenerPrivateKey(privateKey BrokerListenerPrivateKeyModel) *brokerlistener.CertManagerPrivateKey { + return &brokerlistener.CertManagerPrivateKey{ + Algorithm: brokerlistener.PrivateKeyAlgorithm(privateKey.Algorithm), + RotationPolicy: brokerlistener.PrivateKeyRotationPolicy(privateKey.RotationPolicy), + } +} + +func expandBrokerListenerSan(san BrokerListenerSanModel) *brokerlistener.SanForCert { + return &brokerlistener.SanForCert{ + Dns: san.Dns, + IP: san.Ip, + } +} + +func expandBrokerListenerManual(manual BrokerListenerManualModel) *brokerlistener.X509ManualCertificate { + return &brokerlistener.X509ManualCertificate{ + SecretRef: manual.SecretRef, + } +} + +func flattenBrokerListenerProperties(props *brokerlistener.BrokerListenerProperties, model *BrokerListenerModel) { + if props == nil { + return + } + + if props.ServiceName != nil { + model.ServiceName = props.ServiceName + } + + if props.ServiceType != nil { + serviceType := string(*props.ServiceType) + model.ServiceType = &serviceType + } + + model.Ports = flattenBrokerListenerPorts(props.Ports) +} + +func flattenBrokerListenerPorts(ports []brokerlistener.ListenerPort) []BrokerListenerPortModel { + result := make([]BrokerListenerPortModel, 0, len(ports)) + + for _, port := range ports { + portModel := BrokerListenerPortModel{ + Port: int(port.Port), + } + + if port.NodePort != nil { + nodePort := int(*port.NodePort) + portModel.NodePort = &nodePort + } + + if port.Protocol != nil { + protocol := string(*port.Protocol) + portModel.Protocol = &protocol + } + + if port.AuthenticationRef != nil { + portModel.AuthenticationRef = port.AuthenticationRef + } + + if port.AuthorizationRef != nil { + portModel.AuthorizationRef = port.AuthorizationRef + } + + if port.Tls != nil { + portModel.Tls = flattenBrokerListenerTls(*port.Tls) + } + + result = append(result, portModel) + } + + return result +} + +func flattenBrokerListenerTls(tls brokerlistener.TlsCertMethod) *BrokerListenerTlsModel { + result := &BrokerListenerTlsModel{ + Mode: string(tls.Mode), + } + + if tls.CertManagerCertificateSpec != nil { + result.CertManagerCertificateSpec = flattenBrokerListenerCertManagerSpec(*tls.CertManagerCertificateSpec) + } + + if tls.Manual != nil { + result.Manual = flattenBrokerListenerManual(*tls.Manual) + } + + return result +} + +func flattenBrokerListenerCertManagerSpec(spec brokerlistener.CertManagerCertificateSpec) *BrokerListenerCertManagerCertificateSpecModel { + result := &BrokerListenerCertManagerCertificateSpecModel{ + IssuerRef: flattenBrokerListenerIssuerRef(spec.IssuerRef), // Required field + } + + if spec.Duration != nil { + result.Duration = spec.Duration + } + + if spec.SecretName != nil { + result.SecretName = spec.SecretName + } + + if spec.RenewBefore != nil { + result.RenewBefore = spec.RenewBefore + } + + if spec.PrivateKey != nil { + result.PrivateKey = flattenBrokerListenerPrivateKey(*spec.PrivateKey) + } + + if spec.San != nil { + result.San = flattenBrokerListenerSan(*spec.San) + } + + return result +} + +func flattenBrokerListenerIssuerRef(issuerRef brokerlistener.CertManagerIssuerRef) BrokerListenerIssuerRefModel { + return BrokerListenerIssuerRefModel{ + Group: issuerRef.Group, + Kind: string(issuerRef.Kind), + Name: issuerRef.Name, + } +} + +func flattenBrokerListenerPrivateKey(privateKey brokerlistener.CertManagerPrivateKey) *BrokerListenerPrivateKeyModel { + return &BrokerListenerPrivateKeyModel{ + Algorithm: string(privateKey.Algorithm), + RotationPolicy: string(privateKey.RotationPolicy), + } +} + +func flattenBrokerListenerSan(san brokerlistener.SanForCert) *BrokerListenerSanModel { + return &BrokerListenerSanModel{ + Dns: san.Dns, + Ip: san.IP, + } +} + +func flattenBrokerListenerManual(manual brokerlistener.X509ManualCertificate) *BrokerListenerManualModel { + return &BrokerListenerManualModel{ + SecretRef: manual.SecretRef, + } +} diff --git a/internal/services/iotoperations/iotoperations_broker_listener_resource_test.go b/internal/services/iotoperations/iotoperations_broker_listener_resource_test.go new file mode 100644 index 000000000000..a925ae3947a8 --- /dev/null +++ b/internal/services/iotoperations/iotoperations_broker_listener_resource_test.go @@ -0,0 +1,233 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package iotoperations_test + +import ( + "context" + "fmt" + "testing" + + "github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener" + "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" + "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" + "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" + "github.com/hashicorp/terraform-provider-azurerm/utils" +) + +// IotOperationsBrokerListenerResource is a test harness for azurerm_iotoperations_broker_listener acceptance tests. +type IotOperationsBrokerListenerResource struct{} + +func TestAccIotOperationsBrokerListener_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_iotoperations_broker_listener", "test") + r := IotOperationsBrokerListenerResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccIotOperationsBrokerListener_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_iotoperations_broker_listener", "test") + r := IotOperationsBrokerListenerResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + { + Config: r.requiresImport(data), + ExpectError: acceptance.RequiresImportError("azurerm_iotoperations_broker_listener"), + }, + }) +} + +func TestAccIotOperationsBrokerListener_complete(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_iotoperations_broker_listener", "test") + r := IotOperationsBrokerListenerResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccIotOperationsBrokerListener_update(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_iotoperations_broker_listener", "test") + r := IotOperationsBrokerListenerResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func (IotOperationsBrokerListenerResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + // Parse the ID to get the structured ID object, not individual strings + id, err := brokerlistener.ParseListenerID(state.ID) + if err != nil { + return nil, fmt.Errorf("parsing %s: %+v", state.ID, err) + } + + // Use the parsed ID object in the Get call + resp, err := clients.IoTOperations.BrokerListenerClient.Get(ctx, *id) + if err != nil { + return nil, fmt.Errorf("retrieving %s: %+v", state.ID, err) + } + + return utils.Bool(resp.Model != nil), nil +} + +// template builds the minimal provider + resource_group; NOTE: you must create an IoT Operations instance and broker +func (IotOperationsBrokerListenerResource) template(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-iotops-%d" + location = "%s" +} +`, data.RandomInteger, data.Locations.Primary) +} + +func (r IotOperationsBrokerListenerResource) basic(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +# NOTE: These values should be replaced with actual IoT Operations instance and broker names +# that exist in your test environment. You can either: +# 1. Set environment variables: IOT_OPERATIONS_INSTANCE_NAME, IOT_OPERATIONS_BROKER_NAME +# 2. Create the instance and broker resources in this template +# 3. Reference existing resources in your test subscription + +resource "azurerm_iotoperations_broker_listener" "test" { + name = "acctest-bl-%s" + resource_group_name = azurerm_resource_group.test.name + + # TODO: Replace these with actual values or environment variables + instance_name = "test-instance-%d" # or use: os.Getenv("IOT_OPERATIONS_INSTANCE_NAME") + broker_name = "test-broker-%d" # or use: os.Getenv("IOT_OPERATIONS_BROKER_NAME") + + properties { + ports { + port = 1883 + } + } +} +`, r.template(data), data.RandomString, data.RandomInteger, data.RandomInteger) +} + +func (r IotOperationsBrokerListenerResource) requiresImport(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_iotoperations_broker_listener" "import" { + name = azurerm_iotoperations_broker_listener.test.name + resource_group_name = azurerm_iotoperations_broker_listener.test.resource_group_name + instance_name = azurerm_iotoperations_broker_listener.test.instance_name + broker_name = azurerm_iotoperations_broker_listener.test.broker_name + + properties { + ports { + port = 1883 + } + } +} +`, r.basic(data)) +} + +func (r IotOperationsBrokerListenerResource) complete(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +# NOTE: Same as basic template - replace with actual IoT Operations instance and broker names + +resource "azurerm_iotoperations_broker_listener" "test" { + name = "acctest-bl-%s" + resource_group_name = azurerm_resource_group.test.name + instance_name = "test-instance-%s" # TODO: Replace with actual value or env var + broker_name = "test-broker-%s" # TODO: Replace with actual value or env var + location = azurerm_resource_group.test.location + + tags = { + ENV = "Test" + } + + properties { + service_type = "LoadBalancer" + ports { + port = 8080 + protocol = "WebSockets" + authentication_ref = "example-auth" + } + ports { + port = 8443 + protocol = "WebSockets" + authentication_ref = "example-auth" + tls { + mode = "Automatic" + cert_manager_certificate_spec { + issuer_ref { + group = "example-group" + name = "example-issuer" + kind = "Issuer" + } + } + } + } + ports { + port = 1883 + authentication_ref = "example-auth" + } + ports { + port = 8883 + authentication_ref = "example-auth" + tls { + mode = "Manual" + manual { + secret_ref = "example-secret" + } + } + } + } +} +`, r.template(data), data.RandomString, data.RandomString, data.RandomString) +} + + diff --git a/internal/services/iotoperations/iotoperations_broker_resource.go b/internal/services/iotoperations/iotoperations_broker_resource.go new file mode 100644 index 000000000000..76fe93c6ea87 --- /dev/null +++ b/internal/services/iotoperations/iotoperations_broker_resource.go @@ -0,0 +1,1361 @@ +package iotoperations + +import ( + "context" + "fmt" + "regexp" + "time" + + "github.com/hashicorp/go-azure-helpers/lang/response" + "github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-azurerm/internal/sdk" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" +) + +type BrokerResource struct{} + +var _ sdk.ResourceWithUpdate = BrokerResource{} + +type BrokerModel struct { + Name string `tfschema:"name"` + InstanceName string `tfschema:"instance_name"` + ResourceGroupName string `tfschema:"resource_group_name"` + ExtendedLocationName *string `tfschema:"extended_location_name"` + ExtendedLocationType *string `tfschema:"extended_location_type"` + Properties *BrokerPropertiesModel `tfschema:"properties"` + ProvisioningState *string `tfschema:"provisioning_state"` +} + +type BrokerPropertiesModel struct { + Advanced *AdvancedSettingsModel `tfschema:"advanced"` + Cardinality *CardinalityModel `tfschema:"cardinality"` + Diagnostics *BrokerDiagnosticsModel `tfschema:"diagnostics"` + DiskBackedMessageBuffer *DiskBackedMessageBufferModel `tfschema:"disk_backed_message_buffer"` + GenerateResourceLimits *GenerateResourceLimitsModel `tfschema:"generate_resource_limits"` + MemoryProfile *string `tfschema:"memory_profile"` +} + +type AdvancedSettingsModel struct { + Clients *ClientConfigModel `tfschema:"clients"` + EncryptInternalTraffic *string `tfschema:"encrypt_internal_traffic"` + InternalCerts *CertManagerCertOptionsModel `tfschema:"internal_certs"` +} + +type ClientConfigModel struct { + MaxSessionExpirySeconds *int `tfschema:"max_session_expiry_seconds"` + MaxMessageExpirySeconds *int `tfschema:"max_message_expiry_seconds"` + MaxPacketSizeBytes *int `tfschema:"max_packet_size_bytes"` + SubscriberQueueLimit *SubscriberQueueLimitModel `tfschema:"subscriber_queue_limit"` + MaxReceiveMaximum *int `tfschema:"max_receive_maximum"` + MaxKeepAliveSeconds *int `tfschema:"max_keep_alive_seconds"` +} + +type SubscriberQueueLimitModel struct { + Length *int `tfschema:"length"` + Strategy *string `tfschema:"strategy"` +} + +type CertManagerCertOptionsModel struct { + Duration *string `tfschema:"duration"` + RenewBefore *string `tfschema:"renew_before"` + PrivateKey *CertManagerPrivateKeyModel `tfschema:"private_key"` +} + +type CertManagerPrivateKeyModel struct { + Algorithm *string `tfschema:"algorithm"` + RotationPolicy *string `tfschema:"rotation_policy"` +} + +type CardinalityModel struct { + BackendChain BackendChainModel `tfschema:"backend_chain"` + Frontend FrontendModel `tfschema:"frontend"` +} + +// Note: BackendChain fields are required in SDK, not optional +type BackendChainModel struct { + Partitions int `tfschema:"partitions"` // Required in SDK + RedundancyFactor int `tfschema:"redundancy_factor"` // Required in SDK + Workers *int `tfschema:"workers"` // Optional in SDK +} + +type FrontendModel struct { + Replicas int `tfschema:"replicas"` // Required in SDK + Workers *int `tfschema:"workers"` // Optional in SDK +} + +type BrokerDiagnosticsModel struct { + Logs *DiagnosticsLogsModel `tfschema:"logs"` + Metrics *MetricsModel `tfschema:"metrics"` + SelfCheck *SelfCheckModel `tfschema:"self_check"` + Traces *TracesModel `tfschema:"traces"` +} + +type DiagnosticsLogsModel struct { + Level *string `tfschema:"level"` +} + +type MetricsModel struct { + PrometheusPort *int `tfschema:"prometheus_port"` +} + +type SelfCheckModel struct { + Mode *string `tfschema:"mode"` + IntervalSeconds *int `tfschema:"interval_seconds"` + TimeoutSeconds *int `tfschema:"timeout_seconds"` +} + +type TracesModel struct { + Mode *string `tfschema:"mode"` + CacheSizeMegabytes *int `tfschema:"cache_size_megabytes"` + SelfTracing *SelfTracingModel `tfschema:"self_tracing"` + SpanChannelCapacity *int `tfschema:"span_channel_capacity"` +} + +type SelfTracingModel struct { + Mode *string `tfschema:"mode"` + IntervalSeconds *int `tfschema:"interval_seconds"` +} + +type DiskBackedMessageBufferModel struct { + MaxSize *string `tfschema:"max_size"` + EphemeralVolumeClaimSpec *VolumeClaimSpecModel `tfschema:"ephemeral_volume_claim_spec"` + PersistentVolumeClaimSpec *VolumeClaimSpecModel `tfschema:"persistent_volume_claim_spec"` +} + +type GenerateResourceLimitsModel struct { + Cpu *string `tfschema:"cpu"` +} + +type VolumeClaimSpecModel struct { + VolumeName *string `tfschema:"volume_name"` + VolumeMode *string `tfschema:"volume_mode"` + StorageClassName *string `tfschema:"storage_class_name"` + AccessModes []string `tfschema:"access_modes"` + DataSource *DataSourceModel `tfschema:"data_source"` + DataSourceRef *DataSourceRefModel `tfschema:"data_source_ref"` + Resources *ResourceRequirementsModel `tfschema:"resources"` + Selector *LabelSelectorModel `tfschema:"selector"` +} + +type DataSourceModel struct { + ApiGroup *string `tfschema:"api_group"` + Kind *string `tfschema:"kind"` + Name *string `tfschema:"name"` +} + +type DataSourceRefModel struct { + ApiGroup *string `tfschema:"api_group"` + Kind *string `tfschema:"kind"` + Name *string `tfschema:"name"` + Namespace *string `tfschema:"namespace"` +} + +type ResourceRequirementsModel struct { + Limits map[string]string `tfschema:"limits"` + Requests map[string]string `tfschema:"requests"` +} + +type LabelSelectorModel struct { + MatchExpressions []MatchExpressionModel `tfschema:"match_expressions"` + MatchLabels map[string]string `tfschema:"match_labels"` +} + +type MatchExpressionModel struct { + Key *string `tfschema:"key"` + Operator *string `tfschema:"operator"` + Values []string `tfschema:"values"` +} + +func (r BrokerResource) ModelObject() interface{} { + return &BrokerModel{} +} + +func (r BrokerResource) ResourceType() string { + return "azurerm_iotoperations_broker" +} + +func (r BrokerResource) IDValidationFunc() pluginsdk.SchemaValidateFunc { + return broker.ValidateBrokerID +} + +func (r BrokerResource) Arguments() map[string]*pluginsdk.Schema { + return map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(3, 63), + validation.StringMatch(regexp.MustCompile("^[a-z0-9][a-z0-9-]*[a-z0-9]$"), "must match ^[a-z0-9][a-z0-9-]*[a-z0-9]$"), + ), + }, + "instance_name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + }, + "resource_group_name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(1, 90), + }, + "extended_location": { + Type: pluginsdk.TypeList, + Required: true, // Required since SDK requires it + ForceNew: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + "type": { + Type: pluginsdk.TypeString, + Required: true, + }, + }, + }, + }, + "properties": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "memory_profile": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + "Tiny", + "Low", + "Medium", + "High", + }, false), + }, + "advanced": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "clients": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "max_session_expiry_seconds": { + Type: pluginsdk.TypeInt, + Optional: true, + }, + "max_message_expiry_seconds": { + Type: pluginsdk.TypeInt, + Optional: true, + }, + "max_packet_size_bytes": { + Type: pluginsdk.TypeInt, + Optional: true, + }, + "subscriber_queue_limit": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "length": { + Type: pluginsdk.TypeInt, + Optional: true, + }, + "strategy": { + Type: pluginsdk.TypeString, + Optional: true, + }, + }, + }, + }, + "max_receive_maximum": { + Type: pluginsdk.TypeInt, + Optional: true, + }, + "max_keep_alive_seconds": { + Type: pluginsdk.TypeInt, + Optional: true, + }, + }, + }, + }, + "encrypt_internal_traffic": { + Type: pluginsdk.TypeString, + Optional: true, + }, + "internal_certs": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "duration": { + Type: pluginsdk.TypeString, + Optional: true, + }, + "renew_before": { + Type: pluginsdk.TypeString, + Optional: true, + }, + "private_key": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "algorithm": { + Type: pluginsdk.TypeString, + Optional: true, + }, + "rotation_policy": { + Type: pluginsdk.TypeString, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "cardinality": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "backend_chain": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "partitions": { + Type: pluginsdk.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 16), + }, + "redundancy_factor": { + Type: pluginsdk.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 5), + }, + "workers": { + Type: pluginsdk.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 16), + }, + }, + }, + }, + "frontend": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "replicas": { + Type: pluginsdk.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 16), + }, + "workers": { + Type: pluginsdk.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 16), + }, + }, + }, + }, + }, + }, + }, + "diagnostics": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "logs": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "level": { + Type: pluginsdk.TypeString, + Optional: true, + }, + }, + }, + }, + "metrics": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "prometheus_port": { + Type: pluginsdk.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 65535), + }, + }, + }, + }, + "self_check": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "mode": { + Type: pluginsdk.TypeString, + Optional: true, + }, + "interval_seconds": { + Type: pluginsdk.TypeInt, + Optional: true, + }, + "timeout_seconds": { + Type: pluginsdk.TypeInt, + Optional: true, + }, + }, + }, + }, + "traces": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "mode": { + Type: pluginsdk.TypeString, + Optional: true, + }, + "cache_size_megabytes": { + Type: pluginsdk.TypeInt, + Optional: true, + }, + "self_tracing": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "mode": { + Type: pluginsdk.TypeString, + Optional: true, + }, + "interval_seconds": { + Type: pluginsdk.TypeInt, + Optional: true, + }, + }, + }, + }, + "span_channel_capacity": { + Type: pluginsdk.TypeInt, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + "disk_backed_message_buffer": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "max_size": { + Type: pluginsdk.TypeString, + Optional: true, + }, + "ephemeral_volume_claim_spec": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: volumeClaimSpecSchema(), + }, + "persistent_volume_claim_spec": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: volumeClaimSpecSchema(), + }, + }, + }, + }, + "generate_resource_limits": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "cpu": { + Type: pluginsdk.TypeString, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + } +} + +func (r BrokerResource) Attributes() map[string]*pluginsdk.Schema { + return map[string]*pluginsdk.Schema{ + "provisioning_state": { + Type: pluginsdk.TypeString, + Computed: true, + }, + } +} + +func volumeClaimSpecSchema() *pluginsdk.Resource { + return &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "volume_name": { + Type: pluginsdk.TypeString, + Optional: true, + }, + "volume_mode": { + Type: pluginsdk.TypeString, + Optional: true, + }, + "storage_class_name": { + Type: pluginsdk.TypeString, + Optional: true, + }, + "access_modes": { + Type: pluginsdk.TypeList, + Optional: true, + Elem: &pluginsdk.Schema{Type: pluginsdk.TypeString}, + }, + "data_source": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "api_group": { + Type: pluginsdk.TypeString, + Optional: true, + }, + "kind": { + Type: pluginsdk.TypeString, + Optional: true, + }, + "name": { + Type: pluginsdk.TypeString, + Optional: true, + }, + }, + }, + }, + "data_source_ref": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "api_group": { + Type: pluginsdk.TypeString, + Optional: true, + }, + "kind": { + Type: pluginsdk.TypeString, + Optional: true, + }, + "name": { + Type: pluginsdk.TypeString, + Optional: true, + }, + "namespace": { + Type: pluginsdk.TypeString, + Optional: true, + }, + }, + }, + }, + "resources": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "limits": { + Type: pluginsdk.TypeMap, + Optional: true, + Elem: &pluginsdk.Schema{Type: pluginsdk.TypeString}, + }, + "requests": { + Type: pluginsdk.TypeMap, + Optional: true, + Elem: &pluginsdk.Schema{Type: pluginsdk.TypeString}, + }, + }, + }, + }, + "selector": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "match_expressions": { + Type: pluginsdk.TypeList, + Optional: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "key": { + Type: pluginsdk.TypeString, + Optional: true, + }, + "operator": { + Type: pluginsdk.TypeString, + Optional: true, + }, + "values": { + Type: pluginsdk.TypeList, + Optional: true, + Elem: &pluginsdk.Schema{Type: pluginsdk.TypeString}, + }, + }, + }, + }, + "match_labels": { + Type: pluginsdk.TypeMap, + Optional: true, + Elem: &pluginsdk.Schema{Type: pluginsdk.TypeString}, + }, + }, + }, + }, + }, + } +} + +func (r BrokerResource) Create() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 30 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + client := metadata.Client.IoTOperations.BrokerClient + + var model BrokerModel + if err := metadata.Decode(&model); err != nil { + return fmt.Errorf("decoding: %+v", err) + } + + subscriptionId := metadata.Client.Account.SubscriptionId + id := broker.NewBrokerID(subscriptionId, model.ResourceGroupName, model.InstanceName, model.Name) + + // Check if resource already exists + existing, err := client.Get(ctx, id) + if err != nil && !response.WasNotFound(existing.HttpResponse) { + return fmt.Errorf("checking for existing %s: %+v", id, err) + } + if !response.WasNotFound(existing.HttpResponse) { + return fmt.Errorf("A resource with the ID %q already exists - to be managed via Terraform this resource needs to be imported into the State", id.ID()) + } + + // Build FULL payload for Create + payload := broker.BrokerResource{ + Properties: expandBrokerProperties(model.Properties), + ExtendedLocation: broker.ExtendedLocation{ + Name: *model.ExtendedLocationName, + Type: broker.ExtendedLocationType(*model.ExtendedLocationType), + }, + } + + if err := client.CreateOrUpdateThenPoll(ctx, id, payload); err != nil { + return fmt.Errorf("creating %s: %+v", id, err) + } + + metadata.SetID(id) + return nil + }, + } +} + +func (r BrokerResource) Update() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 30 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + client := metadata.Client.IoTOperations.BrokerClient + + id, err := broker.ParseBrokerID(metadata.ResourceData.Id()) + if err != nil { + return err + } + + var model BrokerModel + if err := metadata.Decode(&model); err != nil { + return fmt.Errorf("decoding: %+v", err) + } + + //Check what actually changed using d.HasChange() + hasChanges := false + payload := broker.BrokerResource{} + + // Only include properties if they changed + if metadata.ResourceData.HasChange("properties") { + payload.Properties = expandBrokerProperties(model.Properties) + hasChanges = true + } + + // Only make API call if something actually changed + if !hasChanges { + return nil + } + + // Get existing resource to preserve unchanged fields + existing, err := client.Get(ctx, *id) + if err != nil { + return fmt.Errorf("retrieving existing %s: %+v", *id, err) + } + + if existing.Model != nil { + if payload.ExtendedLocation.Name == "" && existing.Model.ExtendedLocation.Name != "" { + payload.ExtendedLocation.Name = existing.Model.ExtendedLocation.Name + } + if payload.ExtendedLocation.Type == "" && existing.Model.ExtendedLocation.Type != "" { + payload.ExtendedLocation.Type = existing.Model.ExtendedLocation.Type + } + // Preserve unchanged properties + if payload.Properties == nil && existing.Model.Properties != nil { + payload.Properties = existing.Model.Properties + } + } + + if err := client.CreateOrUpdateThenPoll(ctx, *id, payload); err != nil { + return fmt.Errorf("updating %s: %+v", *id, err) + } + + return nil + }, + } +} + +func (r BrokerResource) Read() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 5 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + client := metadata.Client.IoTOperations.BrokerClient + + id, err := broker.ParseBrokerID(metadata.ResourceData.Id()) + if err != nil { + return err + } + + resp, err := client.Get(ctx, *id) + if err != nil { + if response.WasNotFound(resp.HttpResponse) { + return metadata.MarkAsGone(id) + } + return fmt.Errorf("reading %s: %+v", *id, err) + } + + model := BrokerModel{ + Name: id.BrokerName, + InstanceName: id.InstanceName, + ResourceGroupName: id.ResourceGroupName, + } + + if resp.Model != nil { + + if resp.Model.ExtendedLocation.Name != "" { + model.ExtendedLocationName = &resp.Model.ExtendedLocation.Name + } + if resp.Model.ExtendedLocation.Type != "" { + extLocType := string(resp.Model.ExtendedLocation.Type) + model.ExtendedLocationType = &extLocType + } + + if resp.Model.Properties != nil { + model.Properties = flattenBrokerProperties(resp.Model.Properties) + + if resp.Model.Properties.ProvisioningState != nil { + provState := string(*resp.Model.Properties.ProvisioningState) + model.ProvisioningState = &provState + } + } + } + + return metadata.Encode(&model) + }, + } +} + +func (r BrokerResource) Delete() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 30 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + client := metadata.Client.IoTOperations.BrokerClient + + id, err := broker.ParseBrokerID(metadata.ResourceData.Id()) + if err != nil { + return err + } + + if err := client.DeleteThenPoll(ctx, *id); err != nil { + return fmt.Errorf("deleting %s: %+v", *id, err) + } + + return nil + }, + } +} + +// Helper functions for expanding and flattening data structures +// Helper to expand SubscriberQueueLimitModel to broker.SubscriberQueueLimit +func expandSubscriberQueueLimit(input *SubscriberQueueLimitModel) *broker.SubscriberQueueLimit { + if input == nil { + return nil + } + result := &broker.SubscriberQueueLimit{} + if input.Length != nil { + val := int64(*input.Length) + result.Length = &val + } + if input.Strategy != nil { + strategy := broker.SubscriberMessageDropStrategy(*input.Strategy) + result.Strategy = &strategy + } + return result +} +func expandBrokerProperties(input *BrokerPropertiesModel) *broker.BrokerProperties { + if input == nil { + return nil + } + + props := &broker.BrokerProperties{} + + if input.MemoryProfile != nil { + memProfile := broker.BrokerMemoryProfile(*input.MemoryProfile) + props.MemoryProfile = &memProfile + } + + if input.Advanced != nil { + props.Advanced = expandAdvancedSettings(input.Advanced) + } + + if input.Cardinality != nil { + props.Cardinality = expandCardinality(input.Cardinality) + } + + if input.Diagnostics != nil { + props.Diagnostics = expandBrokerDiagnostics(input.Diagnostics) + } + + if input.DiskBackedMessageBuffer != nil { + props.DiskBackedMessageBuffer = expandDiskBackedMessageBuffer(input.DiskBackedMessageBuffer) + } + + if input.GenerateResourceLimits != nil { + props.GenerateResourceLimits = expandGenerateResourceLimits(input.GenerateResourceLimits) + } + + return props +} + +// expandExtendedLocation removed; now handled inline with separate fields + +func expandAdvancedSettings(input *AdvancedSettingsModel) *broker.AdvancedSettings { + if input == nil { + return nil + } + + result := &broker.AdvancedSettings{} + + if input.EncryptInternalTraffic != nil { + opMode := broker.OperationalMode(*input.EncryptInternalTraffic) + result.EncryptInternalTraffic = &opMode + } + + if input.Clients != nil { + result.Clients = expandClientConfig(input.Clients) + } + + if input.InternalCerts != nil { + result.InternalCerts = expandCertManagerCertOptions(input.InternalCerts) + } + + return result +} + +func expandCardinality(input *CardinalityModel) *broker.Cardinality { + if input == nil { + return nil + } + + result := &broker.Cardinality{ + BackendChain: expandBackendChain(&input.BackendChain), + Frontend: expandFrontend(&input.Frontend), + } + + return result +} + +func expandBackendChain(input *BackendChainModel) broker.BackendChain { + result := broker.BackendChain{ + Partitions: int64(input.Partitions), + RedundancyFactor: int64(input.RedundancyFactor), + } + + if input.Workers != nil { + workers := int64(*input.Workers) + result.Workers = &workers + } + + return result +} + +func expandFrontend(input *FrontendModel) broker.Frontend { + result := broker.Frontend{ + Replicas: int64(input.Replicas), + } + + if input.Workers != nil { + workers := int64(*input.Workers) + result.Workers = &workers + } + + return result +} + +func expandClientConfig(input *ClientConfigModel) *broker.ClientConfig { + if input == nil { + return nil + } + result := &broker.ClientConfig{} + + if input.MaxSessionExpirySeconds != nil { + val := int64(*input.MaxSessionExpirySeconds) + result.MaxSessionExpirySeconds = &val + } + if input.MaxMessageExpirySeconds != nil { + val := int64(*input.MaxMessageExpirySeconds) + result.MaxMessageExpirySeconds = &val + } + if input.MaxPacketSizeBytes != nil { + val := int64(*input.MaxPacketSizeBytes) + result.MaxPacketSizeBytes = &val + } + if input.SubscriberQueueLimit != nil { + result.SubscriberQueueLimit = expandSubscriberQueueLimit(input.SubscriberQueueLimit) + } + if input.MaxReceiveMaximum != nil { + val := int64(*input.MaxReceiveMaximum) + result.MaxReceiveMaximum = &val + } + if input.MaxKeepAliveSeconds != nil { + val := int64(*input.MaxKeepAliveSeconds) + result.MaxKeepAliveSeconds = &val + } + + return result +} + +func expandCertManagerCertOptions(input *CertManagerCertOptionsModel) *broker.CertManagerCertOptions { + if input == nil { + return nil + } + + result := &broker.CertManagerCertOptions{} + + if input.Duration != nil { + result.Duration = *input.Duration + } + if input.RenewBefore != nil { + result.RenewBefore = *input.RenewBefore + } + if input.PrivateKey != nil { + result.PrivateKey = *expandCertManagerPrivateKey(input.PrivateKey) + } + + return result +} + +// Helper to expand CertManagerPrivateKeyModel to broker.CertManagerPrivateKey +func expandCertManagerPrivateKey(input *CertManagerPrivateKeyModel) *broker.CertManagerPrivateKey { + if input == nil { + return nil + } + result := &broker.CertManagerPrivateKey{} + if input.Algorithm != nil { + result.Algorithm = broker.PrivateKeyAlgorithm(*input.Algorithm) + } + if input.RotationPolicy != nil { + result.RotationPolicy = broker.PrivateKeyRotationPolicy(*input.RotationPolicy) + } + return result +} +func expandBrokerDiagnostics(input *BrokerDiagnosticsModel) *broker.BrokerDiagnostics { + if input == nil { + return nil + } + + result := &broker.BrokerDiagnostics{} + + if input.Logs != nil { + result.Logs = expandDiagnosticsLogs(input.Logs) + } + if input.Metrics != nil { + result.Metrics = expandMetrics(input.Metrics) + } + if input.SelfCheck != nil { + result.SelfCheck = expandSelfCheck(input.SelfCheck) + } + if input.Traces != nil { + result.Traces = expandTraces(input.Traces) + } + + return result +} + +// Helper to expand DiagnosticsLogsModel to broker.DiagnosticsLogs +func expandDiagnosticsLogs(input *DiagnosticsLogsModel) *broker.DiagnosticsLogs { + if input == nil { + return nil + } + result := &broker.DiagnosticsLogs{} + if input.Level != nil { + result.Level = input.Level + } + return result +} + +// Helper to expand MetricsModel to broker.Metrics +func expandMetrics(input *MetricsModel) *broker.Metrics { + if input == nil { + return nil + } + result := &broker.Metrics{} + if input.PrometheusPort != nil { + val := int64(*input.PrometheusPort) + result.PrometheusPort = &val + } + return result +} + +// Helper to expand SelfCheckModel to broker.SelfCheck +func expandSelfCheck(input *SelfCheckModel) *broker.SelfCheck { + if input == nil { + return nil + } + result := &broker.SelfCheck{} + if input.Mode != nil { + mode := broker.OperationalMode(*input.Mode) + result.Mode = &mode + } + if input.IntervalSeconds != nil { + val := int64(*input.IntervalSeconds) + result.IntervalSeconds = &val + } + if input.TimeoutSeconds != nil { + val := int64(*input.TimeoutSeconds) + result.TimeoutSeconds = &val + } + return result +} + +// Helper to expand TracesModel to broker.Traces +func expandTraces(input *TracesModel) *broker.Traces { + if input == nil { + return nil + } + result := &broker.Traces{} + if input.Mode != nil { + mode := broker.OperationalMode(*input.Mode) + result.Mode = &mode + } + if input.CacheSizeMegabytes != nil { + val := int64(*input.CacheSizeMegabytes) + result.CacheSizeMegabytes = &val + } + if input.SelfTracing != nil { + result.SelfTracing = expandSelfTracing(input.SelfTracing) + } + if input.SpanChannelCapacity != nil { + val := int64(*input.SpanChannelCapacity) + result.SpanChannelCapacity = &val + } + return result +} + +// Helper to expand SelfTracingModel to broker.SelfTracing +func expandSelfTracing(input *SelfTracingModel) *broker.SelfTracing { + if input == nil { + return nil + } + result := &broker.SelfTracing{} + if input.Mode != nil { + mode := broker.OperationalMode(*input.Mode) + result.Mode = &mode + } + if input.IntervalSeconds != nil { + val := int64(*input.IntervalSeconds) + result.IntervalSeconds = &val + } + return result +} + +func expandDiskBackedMessageBuffer(input *DiskBackedMessageBufferModel) *broker.DiskBackedMessageBuffer { + if input == nil { + return nil + } + + result := &broker.DiskBackedMessageBuffer{} + + if input.MaxSize != nil { + result.MaxSize = *input.MaxSize + } + if input.EphemeralVolumeClaimSpec != nil { + result.EphemeralVolumeClaimSpec = expandVolumeClaimSpec(input.EphemeralVolumeClaimSpec) + } + if input.PersistentVolumeClaimSpec != nil { + result.PersistentVolumeClaimSpec = expandVolumeClaimSpec(input.PersistentVolumeClaimSpec) + } + + return result +} + +// Helper to expand VolumeClaimSpecModel to broker.VolumeClaimSpec +func expandVolumeClaimSpec(input *VolumeClaimSpecModel) *broker.VolumeClaimSpec { + if input == nil { + return nil + } + result := &broker.VolumeClaimSpec{} + if input.VolumeName != nil { + result.VolumeName = input.VolumeName + } + if input.VolumeMode != nil { + result.VolumeMode = input.VolumeMode + } + if input.StorageClassName != nil { + result.StorageClassName = input.StorageClassName + } + if len(input.AccessModes) > 0 { + accessModes := make([]string, len(input.AccessModes)) + copy(accessModes, input.AccessModes) + result.AccessModes = &accessModes + } + if input.DataSource != nil { + result.DataSource = expandDataSource(input.DataSource) + } + if input.DataSourceRef != nil { + result.DataSourceRef = expandDataSourceRef(input.DataSourceRef) + } + if input.Resources != nil { + result.Resources = expandResourceRequirements(input.Resources) + } + if input.Selector != nil { + result.Selector = expandLabelSelector(input.Selector) + } + return result +} + +// Helper to expand DataSourceModel to broker.LocalKubernetesReference +func expandDataSource(input *DataSourceModel) *broker.LocalKubernetesReference { + if input == nil { + return nil + } + result := &broker.LocalKubernetesReference{} + if input.ApiGroup != nil { + result.ApiGroup = input.ApiGroup + } + if input.Kind != nil { + result.Kind = *input.Kind + } + if input.Name != nil { + result.Name = *input.Name + } + return result +} + +// Helper to expand DataSourceRefModel to broker.KubernetesReference +func expandDataSourceRef(input *DataSourceRefModel) *broker.KubernetesReference { + if input == nil { + return nil + } + result := &broker.KubernetesReference{} + if input.ApiGroup != nil { + result.ApiGroup = input.ApiGroup + } + if input.Kind != nil { + result.Kind = *input.Kind + } + if input.Name != nil { + result.Name = *input.Name + } + if input.Namespace != nil { + result.Namespace = input.Namespace + } + return result +} + +// Helper to expand ResourceRequirementsModel to broker.VolumeClaimResourceRequirements +func expandResourceRequirements(input *ResourceRequirementsModel) *broker.VolumeClaimResourceRequirements { + if input == nil { + return nil + } + result := &broker.VolumeClaimResourceRequirements{} + if input.Limits != nil { + result.Limits = &map[string]string{} + for k, v := range input.Limits { + (*result.Limits)[k] = v + } + } + if input.Requests != nil { + result.Requests = &map[string]string{} + for k, v := range input.Requests { + (*result.Requests)[k] = v + } + } + return result +} + +// Helper to expand LabelSelectorModel to broker.VolumeClaimSpecSelector +func expandLabelSelector(input *LabelSelectorModel) *broker.VolumeClaimSpecSelector { + if input == nil { + return nil + } + result := &broker.VolumeClaimSpecSelector{} + if len(input.MatchExpressions) > 0 { + matchExprs := make([]broker.VolumeClaimSpecSelectorMatchExpressions, len(input.MatchExpressions)) + for i, me := range input.MatchExpressions { + matchExprs[i] = expandMatchExpression(me) + } + result.MatchExpressions = &matchExprs + } + if input.MatchLabels != nil { + result.MatchLabels = &input.MatchLabels + } + return result +} + +// Helper to expand MatchExpressionModel to broker.VolumeClaimSpecSelectorMatchExpressions +func expandMatchExpression(input MatchExpressionModel) broker.VolumeClaimSpecSelectorMatchExpressions { + result := broker.VolumeClaimSpecSelectorMatchExpressions{} + if input.Key != nil { + result.Key = *input.Key + } + if input.Operator != nil { + result.Operator = broker.OperatorValues(*input.Operator) + } + if len(input.Values) > 0 { + result.Values = &input.Values + } + return result +} +func expandGenerateResourceLimits(input *GenerateResourceLimitsModel) *broker.GenerateResourceLimits { + if input == nil { + return nil + } + + result := &broker.GenerateResourceLimits{} + if input.Cpu != nil { + opMode := broker.OperationalMode(*input.Cpu) + result.Cpu = &opMode + } + return result +} + +// Flatten functions for Read operations +// flattenExtendedLocation removed; now handled inline with separate fields + +func flattenBrokerProperties(input *broker.BrokerProperties) *BrokerPropertiesModel { + if input == nil { + return nil + } + + result := &BrokerPropertiesModel{} + + if input.MemoryProfile != nil { + memProfile := string(*input.MemoryProfile) + result.MemoryProfile = &memProfile + } + + if input.Advanced != nil { + result.Advanced = flattenAdvancedSettings(input.Advanced) + } + + if input.Cardinality != nil { + result.Cardinality = flattenCardinality(input.Cardinality) + } + + return result +} + +func flattenAdvancedSettings(input *broker.AdvancedSettings) *AdvancedSettingsModel { + if input == nil { + return nil + } + + result := &AdvancedSettingsModel{} + + if input.EncryptInternalTraffic != nil { + opMode := string(*input.EncryptInternalTraffic) + result.EncryptInternalTraffic = &opMode + } + + return result +} + +func flattenCardinality(input *broker.Cardinality) *CardinalityModel { + if input == nil { + return nil + } + + result := &CardinalityModel{ + BackendChain: flattenBackendChain(&input.BackendChain), + Frontend: flattenFrontend(&input.Frontend), + } + + return result +} + +func flattenBackendChain(input *broker.BackendChain) BackendChainModel { + result := BackendChainModel{ + Partitions: int(input.Partitions), + RedundancyFactor: int(input.RedundancyFactor), + } + + if input.Workers != nil { + workers := int(*input.Workers) + result.Workers = &workers + } + + return result +} + +func flattenFrontend(input *broker.Frontend) FrontendModel { + result := FrontendModel{ + Replicas: int(input.Replicas), + } + + if input.Workers != nil { + workers := int(*input.Workers) + result.Workers = &workers + } + + return result +} diff --git a/internal/services/iotoperations/iotoperations_broker_resource_test.go b/internal/services/iotoperations/iotoperations_broker_resource_test.go new file mode 100644 index 000000000000..4ebb74ce20f3 --- /dev/null +++ b/internal/services/iotoperations/iotoperations_broker_resource_test.go @@ -0,0 +1,204 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package iotoperations_test + +import ( + "context" + "fmt" + "testing" + + "github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker" + "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" + "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" + "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" + "github.com/hashicorp/terraform-provider-azurerm/utils" +) + +func TestAccIotOperationsBroker_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_iotoperations_broker", "test") + r := BrokerResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccIotOperationsBroker_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_iotoperations_broker", "test") + r := BrokerResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + { + Config: r.requiresImport(data), + ExpectError: acceptance.RequiresImportError("azurerm_iotoperations_broker"), + }, + }) +} + +func TestAccIotOperationsBroker_complete(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_iotoperations_broker", "test") + r := BrokerResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccIotOperationsBroker_update(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_iotoperations_broker", "test") + r := BrokerResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +type BrokerResource struct{} + +func (BrokerResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := broker.ParseBrokerID(state.ID) + if err != nil { + return nil, err + } + + resp, err := clients.IoTOperations.BrokerClient.Get(ctx, *id) + if err != nil { + return nil, fmt.Errorf("reading %s: %+v", *id, err) + } + + return utils.Bool(resp.Model != nil), nil +} + +// Template function to create common resources +func (BrokerResource) template(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-iot-%d" + location = "%s" +} +`, data.RandomInteger, data.Locations.Primary) +} + +func (r BrokerResource) basic(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_iotoperations_broker" "test" { + name = "acctest-br-%s" + resource_group_name = azurerm_resource_group.test.name + instance_name = "test-instance-%d" + + properties { + memory_profile = "Tiny" + } + + extended_location { + name = "/subscriptions/%s/resourceGroups/${azurerm_resource_group.test.name}/providers/Microsoft.ExtendedLocation/customLocations/testlocation-%s" + type = "CustomLocation" + } +} +`, r.template(data), data.RandomString, data.RandomInteger, data.Client().SubscriptionID, data.RandomString) +} + +func (r BrokerResource) requiresImport(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_iotoperations_broker" "import" { + name = azurerm_iotoperations_broker.test.name + resource_group_name = azurerm_iotoperations_broker.test.resource_group_name + instance_name = azurerm_iotoperations_broker.test.instance_name + + properties { + memory_profile = "Tiny" + } + + extended_location { + name = azurerm_iotoperations_broker.test.extended_location[0].name + type = azurerm_iotoperations_broker.test.extended_location[0].type + } +} +`, r.basic(data)) +} + +func (r BrokerResource) complete(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_iotoperations_broker" "test" { + name = "acctest-br-%s" + resource_group_name = azurerm_resource_group.test.name + instance_name = "test-instance-%d" + location = azurerm_resource_group.test.location + + tags = { + ENV = "Test" + } + + properties { + memory_profile = "Large" + + cardinality { + backend_chain { + partitions = 2 + redundancy_factor = 2 + workers = 2 + } + frontend { + replicas = 2 + workers = 2 + } + } + } + + extended_location { + name = "/subscriptions/%s/resourceGroups/${azurerm_resource_group.test.name}/providers/Microsoft.ExtendedLocation/customLocations/testlocation-%s" + type = "CustomLocation" + } +} +`, r.template(data), data.RandomString, data.RandomInteger, data.Client().SubscriptionID, data.RandomString) +} diff --git a/internal/services/iotoperations/iotoperations_brokerauthentication_resource.go b/internal/services/iotoperations/iotoperations_brokerauthentication_resource.go new file mode 100644 index 000000000000..be30a875a8ed --- /dev/null +++ b/internal/services/iotoperations/iotoperations_brokerauthentication_resource.go @@ -0,0 +1,574 @@ +package iotoperations + +import ( + "context" + "fmt" + "regexp" + "time" + + "github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-azurerm/internal/sdk" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" +) + +type BrokerAuthenticationResource struct{} + +var _ sdk.ResourceWithUpdate = BrokerAuthenticationResource{} + +type BrokerAuthenticationModel struct { + Name string `tfschema:"name"` + ResourceGroupName string `tfschema:"resource_group_name"` + InstanceName string `tfschema:"instance_name"` + BrokerName string `tfschema:"broker_name"` + ExtendedLocationName *string `tfschema:"extended_location_name"` + ExtendedLocationType *string `tfschema:"extended_location_type"` + AuthenticationMethods []BrokerAuthenticationMethodModel `tfschema:"authentication_methods"` + ProvisioningState *string `tfschema:"provisioning_state"` +} + +type BrokerAuthenticationMethodModel struct { + Method string `tfschema:"method"` + CustomSettings *BrokerAuthenticationCustomSettingsModel `tfschema:"custom_settings"` + ServiceAccountTokenSettings *BrokerAuthenticationServiceAccountTokenModel `tfschema:"service_account_token_settings"` + X509Settings *BrokerAuthenticationX509SettingsModel `tfschema:"x509_settings"` +} + +type BrokerAuthenticationCustomSettingsModel struct { + Auth *BrokerAuthenticationCustomAuthModel `tfschema:"auth"` + CaCertConfigMap *string `tfschema:"ca_cert_config_map"` + Endpoint string `tfschema:"endpoint"` + Headers map[string]string `tfschema:"headers"` +} + +type BrokerAuthenticationCustomAuthModel struct { + X509 BrokerAuthenticationX509ManualModel `tfschema:"x509"` +} + +type BrokerAuthenticationX509ManualModel struct { + SecretRef string `tfschema:"secret_ref"` +} + +type BrokerAuthenticationServiceAccountTokenModel struct { + Audiences []string `tfschema:"audiences"` +} + +type BrokerAuthenticationX509SettingsModel struct { + AuthorizationAttributes []BrokerAuthenticationX509AttributesModel `tfschema:"authorization_attributes"` + TrustedClientCaCert *string `tfschema:"trusted_client_ca_cert"` +} + +type BrokerAuthenticationX509AttributesModel struct { + Name string `tfschema:"name"` + Attributes map[string]string `tfschema:"attributes"` + Subject string `tfschema:"subject"` +} + +func (r BrokerAuthenticationResource) ModelObject() interface{} { + return &BrokerAuthenticationModel{} +} + +func (r BrokerAuthenticationResource) ResourceType() string { + return "azurerm_iotoperations_broker_authentication" +} + +func (r BrokerAuthenticationResource) IDValidationFunc() pluginsdk.SchemaValidateFunc { + return brokerauthentication.ValidateAuthenticationID +} + +func (r BrokerAuthenticationResource) Arguments() map[string]*pluginsdk.Schema { + return map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(3, 63), + validation.StringMatch(regexp.MustCompile("^[a-z0-9][a-z0-9-]*[a-z0-9]$"), "must match ^[a-z0-9][a-z0-9-]*[a-z0-9]$"), + ), + }, + "resource_group_name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(1, 90), + }, + "instance_name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(3, 63), + validation.StringMatch(regexp.MustCompile("^[a-z0-9][a-z0-9-]*[a-z0-9]$"), "must match ^[a-z0-9][a-z0-9-]*[a-z0-9]$"), + ), + }, + "broker_name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(3, 63), + validation.StringMatch(regexp.MustCompile("^[a-z0-9][a-z0-9-]*[a-z0-9]$"), "must match ^[a-z0-9][a-z0-9-]*[a-z0-9]$"), + ), + }, + "extended_location": { + Type: pluginsdk.TypeList, + Required: true, // Changed from optional since SDK requires it + ForceNew: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + "type": { + Type: pluginsdk.TypeString, + Optional: true, + Default: "CustomLocation", + ValidateFunc: validation.StringInSlice([]string{ + "CustomLocation", + }, false), + }, + }, + }, + }, + "authentication_methods": { + Type: pluginsdk.TypeList, + Required: true, + MinItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "method": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + "Custom", + "ServiceAccountToken", + "X509", + }, false), + }, + "custom_settings": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "endpoint": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + "auth": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "x509": { + Type: pluginsdk.TypeList, + Required: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "secret_ref": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + }, + }, + }, + }, + }, + }, + "ca_cert_config_map": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + "headers": { + Type: pluginsdk.TypeMap, + Optional: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + }, + }, + }, + "service_account_token_settings": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "audiences": { + Type: pluginsdk.TypeList, + Required: true, + MinItems: 1, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + ValidateFunc: validation.StringIsNotEmpty, + }, + }, + }, + }, + }, + "x509_settings": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "trusted_client_ca_cert": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + "authorization_attributes": { + Type: pluginsdk.TypeList, + Optional: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + "subject": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + "attributes": { + Type: pluginsdk.TypeMap, + Required: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func (r BrokerAuthenticationResource) Attributes() map[string]*pluginsdk.Schema { + return map[string]*pluginsdk.Schema{ + "provisioning_state": { + Type: pluginsdk.TypeString, + Computed: true, + }, + } +} + +func (r BrokerAuthenticationResource) Create() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 30 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + client := metadata.Client.IoTOperations.BrokerAuthenticationClient + + var model BrokerAuthenticationModel + if err := metadata.Decode(&model); err != nil { + return fmt.Errorf("decoding: %+v", err) + } + + subscriptionId := metadata.Client.Account.SubscriptionId + id := brokerauthentication.NewAuthenticationID(subscriptionId, model.ResourceGroupName, model.InstanceName, model.BrokerName, model.Name) + + // Check if resource already exists + existing, err := client.Get(ctx, id) + if err == nil && existing.Model != nil { + return fmt.Errorf("IoT Operations Broker Authentication %q already exists", id.AuthenticationName) + } + + // Build payload with proper ExtendedLocation struct + payload := brokerauthentication.BrokerAuthenticationResource{ + ExtendedLocation: brokerauthentication.ExtendedLocation{ + Name: *model.ExtendedLocationName, + Type: brokerauthentication.ExtendedLocationType(*model.ExtendedLocationType), + }, + Properties: expandBrokerAuthenticationProperties(model.AuthenticationMethods), + } + + if err := client.CreateOrUpdateThenPoll(ctx, id, payload); err != nil { + return fmt.Errorf("creating %s: %+v", id, err) + } + + metadata.SetID(id) + return nil + }, + } +} + +func (r BrokerAuthenticationResource) Read() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 5 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + client := metadata.Client.IoTOperations.BrokerAuthenticationClient + + id, err := brokerauthentication.ParseAuthenticationID(metadata.ResourceData.Id()) + if err != nil { + return err + } + + resp, err := client.Get(ctx, *id) + if err != nil { + return fmt.Errorf("reading %s: %+v", *id, err) + } + + model := BrokerAuthenticationModel{ + Name: id.AuthenticationName, + ResourceGroupName: id.ResourceGroupName, + InstanceName: id.InstanceName, + BrokerName: id.BrokerName, + } + + if respModel := resp.Model; respModel != nil { + // Properly map ExtendedLocation struct + model.ExtendedLocationName = &respModel.ExtendedLocation.Name + extendedLocationType := string(respModel.ExtendedLocation.Type) + model.ExtendedLocationType = &extendedLocationType + + if respModel.Properties != nil { + model.AuthenticationMethods = flattenBrokerAuthenticationProperties(respModel.Properties) + + if respModel.Properties.ProvisioningState != nil { + provisioningState := string(*respModel.Properties.ProvisioningState) + model.ProvisioningState = &provisioningState + } + } + } + + return metadata.Encode(&model) + }, + } +} + +func (r BrokerAuthenticationResource) Update() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 30 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + client := metadata.Client.IoTOperations.BrokerAuthenticationClient + + id, err := brokerauthentication.ParseAuthenticationID(metadata.ResourceData.Id()) + if err != nil { + return err + } + + var model BrokerAuthenticationModel + if err := metadata.Decode(&model); err != nil { + return fmt.Errorf("decoding: %+v", err) + } + + // Since there's no separate Update method, use CreateOrUpdate + payload := brokerauthentication.BrokerAuthenticationResource{ + ExtendedLocation: brokerauthentication.ExtendedLocation{ + Name: *model.ExtendedLocationName, + Type: brokerauthentication.ExtendedLocationType(*model.ExtendedLocationType), + }, + Properties: expandBrokerAuthenticationProperties(model.AuthenticationMethods), + } + + if err := client.CreateOrUpdateThenPoll(ctx, *id, payload); err != nil { + return fmt.Errorf("updating %s: %+v", *id, err) + } + + return nil + }, + } +} + +func (r BrokerAuthenticationResource) Delete() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 30 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + client := metadata.Client.IoTOperations.BrokerAuthenticationClient + + id, err := brokerauthentication.ParseAuthenticationID(metadata.ResourceData.Id()) + if err != nil { + return err + } + + if err := client.DeleteThenPoll(ctx, *id); err != nil { + return fmt.Errorf("deleting %s: %+v", *id, err) + } + + return nil + }, + } +} + +// Helper functions for expand/flatten operations +func expandBrokerAuthenticationProperties(methods []BrokerAuthenticationMethodModel) *brokerauthentication.BrokerAuthenticationProperties { + return &brokerauthentication.BrokerAuthenticationProperties{ + AuthenticationMethods: expandBrokerAuthenticationMethods(methods), + } +} + +func expandBrokerAuthenticationMethods(methods []BrokerAuthenticationMethodModel) []brokerauthentication.BrokerAuthenticatorMethods { + result := make([]brokerauthentication.BrokerAuthenticatorMethods, 0, len(methods)) + + for _, method := range methods { + authMethod := brokerauthentication.BrokerAuthenticatorMethods{ + Method: brokerauthentication.BrokerAuthenticationMethod(method.Method), + } + + if method.CustomSettings != nil { + authMethod.CustomSettings = expandBrokerAuthenticationCustomSettings(*method.CustomSettings) + } + + if method.ServiceAccountTokenSettings != nil { + authMethod.ServiceAccountTokenSettings = expandBrokerAuthenticationServiceAccountToken(*method.ServiceAccountTokenSettings) + } + + if method.X509Settings != nil { + authMethod.X509Settings = expandBrokerAuthenticationX509Settings(*method.X509Settings) + } + + result = append(result, authMethod) + } + + return result +} + +func expandBrokerAuthenticationCustomSettings(settings BrokerAuthenticationCustomSettingsModel) *brokerauthentication.BrokerAuthenticatorMethodCustom { + result := &brokerauthentication.BrokerAuthenticatorMethodCustom{ + Endpoint: settings.Endpoint, + } + + if settings.Auth != nil { + result.Auth = &brokerauthentication.BrokerAuthenticatorCustomAuth{ + X509: brokerauthentication.X509ManualCertificate{ + SecretRef: settings.Auth.X509.SecretRef, + }, + } + } + + if settings.CaCertConfigMap != nil { + result.CaCertConfigMap = settings.CaCertConfigMap + } + + if len(settings.Headers) > 0 { + result.Headers = &settings.Headers + } + + return result +} + +func expandBrokerAuthenticationServiceAccountToken(settings BrokerAuthenticationServiceAccountTokenModel) *brokerauthentication.BrokerAuthenticatorMethodSat { + return &brokerauthentication.BrokerAuthenticatorMethodSat{ + Audiences: settings.Audiences, + } +} + +func expandBrokerAuthenticationX509Settings(settings BrokerAuthenticationX509SettingsModel) *brokerauthentication.BrokerAuthenticatorMethodX509 { + result := &brokerauthentication.BrokerAuthenticatorMethodX509{} + + if settings.TrustedClientCaCert != nil { + result.TrustedClientCaCert = settings.TrustedClientCaCert + } + + if len(settings.AuthorizationAttributes) > 0 { + authzAttrs := make(map[string]brokerauthentication.BrokerAuthenticatorMethodX509Attributes) + for _, attr := range settings.AuthorizationAttributes { + authzAttrs[attr.Name] = brokerauthentication.BrokerAuthenticatorMethodX509Attributes{ + Subject: attr.Subject, + Attributes: attr.Attributes, + } + } + result.AuthorizationAttributes = &authzAttrs + } + + return result +} + +func flattenBrokerAuthenticationProperties(props *brokerauthentication.BrokerAuthenticationProperties) []BrokerAuthenticationMethodModel { + if props == nil { + return []BrokerAuthenticationMethodModel{} + } + + result := make([]BrokerAuthenticationMethodModel, 0, len(props.AuthenticationMethods)) + + for _, method := range props.AuthenticationMethods { + authMethod := BrokerAuthenticationMethodModel{ + Method: string(method.Method), + } + + if method.CustomSettings != nil { + authMethod.CustomSettings = flattenBrokerAuthenticationCustomSettings(method.CustomSettings) + } + + if method.ServiceAccountTokenSettings != nil { + authMethod.ServiceAccountTokenSettings = flattenBrokerAuthenticationServiceAccountToken(method.ServiceAccountTokenSettings) + } + + if method.X509Settings != nil { + authMethod.X509Settings = flattenBrokerAuthenticationX509Settings(method.X509Settings) + } + + result = append(result, authMethod) + } + + return result +} + +func flattenBrokerAuthenticationCustomSettings(settings *brokerauthentication.BrokerAuthenticatorMethodCustom) *BrokerAuthenticationCustomSettingsModel { + result := &BrokerAuthenticationCustomSettingsModel{ + Endpoint: settings.Endpoint, + } + + if settings.Auth != nil { + result.Auth = &BrokerAuthenticationCustomAuthModel{ + X509: BrokerAuthenticationX509ManualModel{ + SecretRef: settings.Auth.X509.SecretRef, + }, + } + } + + if settings.CaCertConfigMap != nil { + result.CaCertConfigMap = settings.CaCertConfigMap + } + + if settings.Headers != nil { + result.Headers = *settings.Headers + } + + return result +} + +func flattenBrokerAuthenticationServiceAccountToken(settings *brokerauthentication.BrokerAuthenticatorMethodSat) *BrokerAuthenticationServiceAccountTokenModel { + return &BrokerAuthenticationServiceAccountTokenModel{ + Audiences: settings.Audiences, + } +} + +func flattenBrokerAuthenticationX509Settings(settings *brokerauthentication.BrokerAuthenticatorMethodX509) *BrokerAuthenticationX509SettingsModel { + result := &BrokerAuthenticationX509SettingsModel{ + AuthorizationAttributes: []BrokerAuthenticationX509AttributesModel{}, + } + + if settings.TrustedClientCaCert != nil { + result.TrustedClientCaCert = settings.TrustedClientCaCert + } + + if settings.AuthorizationAttributes != nil { + for key, attr := range *settings.AuthorizationAttributes { + result.AuthorizationAttributes = append(result.AuthorizationAttributes, BrokerAuthenticationX509AttributesModel{ + Name: key, + Subject: attr.Subject, + Attributes: attr.Attributes, + }) + } + } + + return result +} diff --git a/internal/services/iotoperations/iotoperations_brokerauthentication_resource_test.go b/internal/services/iotoperations/iotoperations_brokerauthentication_resource_test.go new file mode 100644 index 000000000000..5036a19dff9d --- /dev/null +++ b/internal/services/iotoperations/iotoperations_brokerauthentication_resource_test.go @@ -0,0 +1,222 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package iotoperations_test + +import ( + "context" + "fmt" + "testing" + + "github.com/hashicorp/go-azure-helpers/lang/response" + "github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication" + "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" + "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" + "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" + "github.com/hashicorp/terraform-provider-azurerm/utils" +) + +type BrokerAuthenticationResource struct{} + +func TestAccIotOperationsBrokerAuthentication_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_iotoperations_broker_authentication", "test") + r := BrokerAuthenticationResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccIotOperationsBrokerAuthentication_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_iotoperations_broker_authentication", "test") + r := BrokerAuthenticationResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + { + Config: r.requiresImport(data), + ExpectError: acceptance.RequiresImportError("azurerm_iotoperations_broker_authentication"), + }, + }) +} + +func TestAccIotOperationsBrokerAuthentication_complete(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_iotoperations_broker_authentication", "test") + r := BrokerAuthenticationResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccIotOperationsBrokerAuthentication_update(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_iotoperations_broker_authentication", "test") + r := BrokerAuthenticationResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func (r BrokerAuthenticationResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := brokerauthentication.ParseAuthenticationID(state.ID) + if err != nil { + return nil, err + } + + client := clients.IoTOperations.BrokerAuthenticationClient + resp, err := client.Get(ctx, *id) + if err != nil { + if response.WasNotFound(resp.HttpResponse) { + return utils.Bool(false), nil + } + return nil, fmt.Errorf("retrieving IoT Operations Broker Authentication %q: %+v", id.AuthenticationName, err) + } + + return utils.Bool(resp.Model != nil), nil +} + +// Template function to create common resources +func (BrokerAuthenticationResource) template(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-iot-%d" + location = "%s" +} +`, data.RandomInteger, data.Locations.Primary) +} + +func (r BrokerAuthenticationResource) basic(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_iotoperations_broker_authentication" "test" { + name = "acctest-ba-%s" + resource_group_name = azurerm_resource_group.test.name + instance_name = "test-instance-%d" + broker_name = "test-broker-%s" + location = azurerm_resource_group.test.location + + authentication_methods { + method = "ServiceAccountToken" + service_account_token_settings { + audiences = ["test-audience-%s"] + } + } + + extended_location { + name = "/subscriptions/%s/resourceGroups/${azurerm_resource_group.test.name}/providers/Microsoft.ExtendedLocation/customLocations/testlocation-%s" + type = "CustomLocation" + } +} +`, r.template(data), data.RandomString, data.RandomInteger, data.RandomString, data.RandomString, data.Client().SubscriptionID, data.RandomString) +} + +func (r BrokerAuthenticationResource) requiresImport(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_iotoperations_broker_authentication" "import" { + name = azurerm_iotoperations_broker_authentication.test.name + resource_group_name = azurerm_iotoperations_broker_authentication.test.resource_group_name + instance_name = azurerm_iotoperations_broker_authentication.test.instance_name + broker_name = azurerm_iotoperations_broker_authentication.test.broker_name + location = azurerm_iotoperations_broker_authentication.test.location + + authentication_methods { + method = "ServiceAccountToken" + service_account_token_settings { + audiences = ["test-audience-%s"] + } + } + + extended_location { + name = azurerm_iotoperations_broker_authentication.test.extended_location[0].name + type = azurerm_iotoperations_broker_authentication.test.extended_location[0].type + } +} +`, r.basic(data), data.RandomString) +} + +func (r BrokerAuthenticationResource) complete(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_iotoperations_broker_authentication" "test" { + name = "acctest-ba-%s" + resource_group_name = azurerm_resource_group.test.name + instance_name = "test-instance-%d" + broker_name = "test-broker-%s" + location = azurerm_resource_group.test.location + + tags = { + ENV = "Test" + } + + authentication_methods { + method = "ServiceAccountToken" + service_account_token_settings { + audiences = ["test-audience-%s", "additional-audience-%s"] + } + } + + authentication_methods { + method = "X509Certificate" + x509_settings { + trusted_client_ca_cert = "example-cert-%s" + authorization_attributes = { + "subject" = "CN=example" + "issuer" = "CN=ca" + } + } + } + + extended_location { + name = "/subscriptions/%s/resourceGroups/${azurerm_resource_group.test.name}/providers/Microsoft.ExtendedLocation/customLocations/testlocation-%s" + type = "CustomLocation" + } +} +`, r.template(data), data.RandomString, data.RandomInteger, data.RandomString, data.RandomString, data.RandomString, data.RandomString, data.Client().SubscriptionID, data.RandomString) +} diff --git a/internal/services/iotoperations/iotoperations_brokerauthorization_resource.go b/internal/services/iotoperations/iotoperations_brokerauthorization_resource.go new file mode 100644 index 000000000000..9df6d22591f6 --- /dev/null +++ b/internal/services/iotoperations/iotoperations_brokerauthorization_resource.go @@ -0,0 +1,591 @@ +package iotoperations + +import ( + "context" + "fmt" + "regexp" + "time" + + "github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-azurerm/internal/sdk" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" +) + +type BrokerAuthorizationResource struct{} + +var _ sdk.ResourceWithUpdate = BrokerAuthorizationResource{} + +type BrokerAuthorizationModel struct { + Name string `tfschema:"name"` + ResourceGroupName string `tfschema:"resource_group_name"` + InstanceName string `tfschema:"instance_name"` + BrokerName string `tfschema:"broker_name"` + ExtendedLocationName *string `tfschema:"extended_location_name"` + ExtendedLocationType *string `tfschema:"extended_location_type"` + AuthorizationPolicies BrokerAuthorizationConfigModel `tfschema:"authorization_policies"` + ProvisioningState *string `tfschema:"provisioning_state"` +} + +type BrokerAuthorizationConfigModel struct { + Cache *string `tfschema:"cache"` + Rules []BrokerAuthorizationRuleModel `tfschema:"rules"` +} + +type BrokerAuthorizationRuleModel struct { + BrokerResources []BrokerAuthorizationBrokerResourceModel `tfschema:"broker_resources"` + Principals BrokerAuthorizationPrincipalModel `tfschema:"principals"` + StateStoreResources []BrokerAuthorizationStateStoreResourceModel `tfschema:"state_store_resources"` +} + +type BrokerAuthorizationBrokerResourceModel struct { + Method string `tfschema:"method"` + Clients []string `tfschema:"clients"` + Topics []string `tfschema:"topics"` +} + +type BrokerAuthorizationPrincipalModel struct { + Attributes []map[string]string `tfschema:"attributes"` + Clients []string `tfschema:"clients"` + Usernames []string `tfschema:"usernames"` +} + +type BrokerAuthorizationStateStoreResourceModel struct { + KeyType string `tfschema:"key_type"` + Keys []string `tfschema:"keys"` + Method string `tfschema:"method"` +} + +func (r BrokerAuthorizationResource) ModelObject() interface{} { + return &BrokerAuthorizationModel{} +} + +func (r BrokerAuthorizationResource) ResourceType() string { + return "azurerm_iotoperations_broker_authorization" +} + +func (r BrokerAuthorizationResource) IDValidationFunc() pluginsdk.SchemaValidateFunc { + return brokerauthorization.ValidateAuthorizationID +} + +func (r BrokerAuthorizationResource) Arguments() map[string]*pluginsdk.Schema { + return map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(3, 63), + validation.StringMatch(regexp.MustCompile("^[a-z0-9][a-z0-9-]*[a-z0-9]$"), "must match ^[a-z0-9][a-z0-9-]*[a-z0-9]$"), + ), + }, + "resource_group_name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(1, 90), + }, + "instance_name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(3, 63), + validation.StringMatch(regexp.MustCompile("^[a-z0-9][a-z0-9-]*[a-z0-9]$"), "must match ^[a-z0-9][a-z0-9-]*[a-z0-9]$"), + ), + }, + "broker_name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(3, 63), + validation.StringMatch(regexp.MustCompile("^[a-z0-9][a-z0-9-]*[a-z0-9]$"), "must match ^[a-z0-9][a-z0-9-]*[a-z0-9]$"), + ), + }, + "extended_location": { + Type: pluginsdk.TypeList, + Required: true, + ForceNew: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + "type": { + Type: pluginsdk.TypeString, + Optional: true, + Default: "CustomLocation", + ValidateFunc: validation.StringInSlice([]string{ + "CustomLocation", + }, false), + }, + }, + }, + }, + "authorization_policies": { + Type: pluginsdk.TypeList, + Required: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "cache": { + Type: pluginsdk.TypeString, + Optional: true, + Default: "Enabled", + ValidateFunc: validation.StringInSlice([]string{ + "Enabled", + "Disabled", + }, false), + }, + "rules": { + Type: pluginsdk.TypeList, + Required: true, + MinItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "broker_resources": { + Type: pluginsdk.TypeList, + Required: true, + MinItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "method": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + "Connect", + "Publish", + "Subscribe", + }, false), + }, + "clients": { + Type: pluginsdk.TypeList, + Optional: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + ValidateFunc: validation.StringIsNotEmpty, + }, + }, + "topics": { + Type: pluginsdk.TypeList, + Optional: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + ValidateFunc: validation.StringIsNotEmpty, + }, + }, + }, + }, + }, + "principals": { + Type: pluginsdk.TypeList, + Required: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "clients": { + Type: pluginsdk.TypeList, + Optional: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + ValidateFunc: validation.StringIsNotEmpty, + }, + }, + "usernames": { + Type: pluginsdk.TypeList, + Optional: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + ValidateFunc: validation.StringIsNotEmpty, + }, + }, + "attributes": { + Type: pluginsdk.TypeList, + Optional: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeMap, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + }, + }, + }, + }, + "state_store_resources": { + Type: pluginsdk.TypeList, + Optional: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "key_type": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + "Binary", + "Pattern", + "String", + }, false), + }, + "keys": { + Type: pluginsdk.TypeList, + Required: true, + MinItems: 1, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + ValidateFunc: validation.StringIsNotEmpty, + }, + }, + "method": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + "Read", + "ReadWrite", + "Write", + }, false), + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func (r BrokerAuthorizationResource) Attributes() map[string]*pluginsdk.Schema { + return map[string]*pluginsdk.Schema{ + "provisioning_state": { + Type: pluginsdk.TypeString, + Computed: true, + }, + } +} + +func (r BrokerAuthorizationResource) Create() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 30 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + client := metadata.Client.IoTOperations.BrokerAuthorizationClient + + var model BrokerAuthorizationModel + if err := metadata.Decode(&model); err != nil { + return fmt.Errorf("decoding: %+v", err) + } + + subscriptionId := metadata.Client.Account.SubscriptionId + id := brokerauthorization.NewAuthorizationID(subscriptionId, model.ResourceGroupName, model.InstanceName, model.BrokerName, model.Name) + + // Check if resource already exists + existing, err := client.Get(ctx, id) + if err == nil && existing.Model != nil { + return fmt.Errorf("IoT Operations Broker Authorization %q already exists", id.AuthorizationName) + } + + // Build payload + payload := brokerauthorization.BrokerAuthorizationResource{ + ExtendedLocation: brokerauthorization.ExtendedLocation{ + Name: *model.ExtendedLocationName, + Type: brokerauthorization.ExtendedLocationType(*model.ExtendedLocationType), + }, + Properties: expandBrokerAuthorizationProperties(model.AuthorizationPolicies), + } + + if err := client.CreateOrUpdateThenPoll(ctx, id, payload); err != nil { + return fmt.Errorf("creating %s: %+v", id, err) + } + + metadata.SetID(id) + return nil + }, + } +} + +func (r BrokerAuthorizationResource) Read() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 5 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + client := metadata.Client.IoTOperations.BrokerAuthorizationClient + + id, err := brokerauthorization.ParseAuthorizationID(metadata.ResourceData.Id()) + if err != nil { + return err + } + + resp, err := client.Get(ctx, *id) + if err != nil { + return fmt.Errorf("reading %s: %+v", *id, err) + } + + model := BrokerAuthorizationModel{ + Name: id.AuthorizationName, + ResourceGroupName: id.ResourceGroupName, + InstanceName: id.InstanceName, + BrokerName: id.BrokerName, + } + + if respModel := resp.Model; respModel != nil { + model.ExtendedLocationName = &respModel.ExtendedLocation.Name + extendedLocationType := string(respModel.ExtendedLocation.Type) + model.ExtendedLocationType = &extendedLocationType + + if respModel.Properties != nil { + model.AuthorizationPolicies = flattenBrokerAuthorizationProperties(respModel.Properties) + + if respModel.Properties.ProvisioningState != nil { + provisioningState := string(*respModel.Properties.ProvisioningState) + model.ProvisioningState = &provisioningState + } + } + } + + return metadata.Encode(&model) + }, + } +} + +func (r BrokerAuthorizationResource) Update() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 30 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + client := metadata.Client.IoTOperations.BrokerAuthorizationClient + + id, err := brokerauthorization.ParseAuthorizationID(metadata.ResourceData.Id()) + if err != nil { + return err + } + + var model BrokerAuthorizationModel + if err := metadata.Decode(&model); err != nil { + return fmt.Errorf("decoding: %+v", err) + } + + // Since there's no separate Update method, use CreateOrUpdate + payload := brokerauthorization.BrokerAuthorizationResource{ + ExtendedLocation: brokerauthorization.ExtendedLocation{ + Name: *model.ExtendedLocationName, + Type: brokerauthorization.ExtendedLocationType(*model.ExtendedLocationType), + }, + Properties: expandBrokerAuthorizationProperties(model.AuthorizationPolicies), + } + + if err := client.CreateOrUpdateThenPoll(ctx, *id, payload); err != nil { + return fmt.Errorf("updating %s: %+v", *id, err) + } + + return nil + }, + } +} + +func (r BrokerAuthorizationResource) Delete() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 30 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + client := metadata.Client.IoTOperations.BrokerAuthorizationClient + + id, err := brokerauthorization.ParseAuthorizationID(metadata.ResourceData.Id()) + if err != nil { + return err + } + + if err := client.DeleteThenPoll(ctx, *id); err != nil { + return fmt.Errorf("deleting %s: %+v", *id, err) + } + + return nil + }, + } +} + +// Helper functions for expand/flatten operations +func expandBrokerAuthorizationProperties(config BrokerAuthorizationConfigModel) *brokerauthorization.BrokerAuthorizationProperties { + authConfig := brokerauthorization.AuthorizationConfig{ + Rules: expandBrokerAuthorizationRules(config.Rules), + } + + if config.Cache != nil { + cache := brokerauthorization.OperationalMode(*config.Cache) + authConfig.Cache = &cache + } + + return &brokerauthorization.BrokerAuthorizationProperties{ + AuthorizationPolicies: authConfig, + } +} + +func expandBrokerAuthorizationRules(rules []BrokerAuthorizationRuleModel) *[]brokerauthorization.AuthorizationRule { + if len(rules) == 0 { + return nil + } + + result := make([]brokerauthorization.AuthorizationRule, 0, len(rules)) + + for _, rule := range rules { + authRule := brokerauthorization.AuthorizationRule{ + BrokerResources: expandBrokerAuthorizationBrokerResources(rule.BrokerResources), + Principals: expandBrokerAuthorizationPrincipals(rule.Principals), + } + + if len(rule.StateStoreResources) > 0 { + authRule.StateStoreResources = expandBrokerAuthorizationStateStoreResources(rule.StateStoreResources) + } + + result = append(result, authRule) + } + + return &result +} + +func expandBrokerAuthorizationBrokerResources(resources []BrokerAuthorizationBrokerResourceModel) []brokerauthorization.BrokerResourceRule { + result := make([]brokerauthorization.BrokerResourceRule, 0, len(resources)) + + for _, resource := range resources { + brokerResource := brokerauthorization.BrokerResourceRule{ + Method: brokerauthorization.BrokerResourceDefinitionMethods(resource.Method), + } + + if len(resource.Clients) > 0 { + brokerResource.ClientIds = &resource.Clients + } + + if len(resource.Topics) > 0 { + brokerResource.Topics = &resource.Topics + } + + result = append(result, brokerResource) + } + + return result +} + +func expandBrokerAuthorizationPrincipals(principal BrokerAuthorizationPrincipalModel) brokerauthorization.PrincipalDefinition { + result := brokerauthorization.PrincipalDefinition{} + + if len(principal.Clients) > 0 { + result.ClientIds = &principal.Clients + } + + if len(principal.Usernames) > 0 { + result.Usernames = &principal.Usernames + } + + if len(principal.Attributes) > 0 { + result.Attributes = &principal.Attributes + } + + return result +} + +func expandBrokerAuthorizationStateStoreResources(resources []BrokerAuthorizationStateStoreResourceModel) *[]brokerauthorization.StateStoreResourceRule { + result := make([]brokerauthorization.StateStoreResourceRule, 0, len(resources)) + + for _, resource := range resources { + stateStoreResource := brokerauthorization.StateStoreResourceRule{ + KeyType: brokerauthorization.StateStoreResourceKeyTypes(resource.KeyType), + Keys: resource.Keys, + Method: brokerauthorization.StateStoreResourceDefinitionMethods(resource.Method), + } + + result = append(result, stateStoreResource) + } + + return &result +} + +func flattenBrokerAuthorizationProperties(props *brokerauthorization.BrokerAuthorizationProperties) BrokerAuthorizationConfigModel { + result := BrokerAuthorizationConfigModel{} + + if props.AuthorizationPolicies.Cache != nil { + cache := string(*props.AuthorizationPolicies.Cache) + result.Cache = &cache + } + + if props.AuthorizationPolicies.Rules != nil { + result.Rules = flattenBrokerAuthorizationRules(*props.AuthorizationPolicies.Rules) + } + + return result +} + +func flattenBrokerAuthorizationRules(rules []brokerauthorization.AuthorizationRule) []BrokerAuthorizationRuleModel { + result := make([]BrokerAuthorizationRuleModel, 0, len(rules)) + + for _, rule := range rules { + ruleModel := BrokerAuthorizationRuleModel{ + BrokerResources: flattenBrokerAuthorizationBrokerResources(rule.BrokerResources), + Principals: flattenBrokerAuthorizationPrincipals(rule.Principals), + } + + if rule.StateStoreResources != nil { + ruleModel.StateStoreResources = flattenBrokerAuthorizationStateStoreResources(*rule.StateStoreResources) + } + + result = append(result, ruleModel) + } + + return result +} + +func flattenBrokerAuthorizationBrokerResources(resources []brokerauthorization.BrokerResourceRule) []BrokerAuthorizationBrokerResourceModel { + result := make([]BrokerAuthorizationBrokerResourceModel, 0, len(resources)) + + for _, resource := range resources { + brokerResource := BrokerAuthorizationBrokerResourceModel{ + Method: string(resource.Method), + } + + if resource.ClientIds != nil { + brokerResource.Clients = *resource.ClientIds + } + + if resource.Topics != nil { + brokerResource.Topics = *resource.Topics + } + + result = append(result, brokerResource) + } + + return result +} + +func flattenBrokerAuthorizationPrincipals(principal brokerauthorization.PrincipalDefinition) BrokerAuthorizationPrincipalModel { + result := BrokerAuthorizationPrincipalModel{} + + if principal.ClientIds != nil { + result.Clients = *principal.ClientIds + } + + if principal.Usernames != nil { + result.Usernames = *principal.Usernames + } + + if principal.Attributes != nil { + result.Attributes = *principal.Attributes + } + + return result +} + +func flattenBrokerAuthorizationStateStoreResources(resources []brokerauthorization.StateStoreResourceRule) []BrokerAuthorizationStateStoreResourceModel { + result := make([]BrokerAuthorizationStateStoreResourceModel, 0, len(resources)) + + for _, resource := range resources { + stateStoreResource := BrokerAuthorizationStateStoreResourceModel{ + KeyType: string(resource.KeyType), + Keys: resource.Keys, + Method: string(resource.Method), + } + + result = append(result, stateStoreResource) + } + + return result +} diff --git a/internal/services/iotoperations/iotoperations_brokerautorization_resource_test.go b/internal/services/iotoperations/iotoperations_brokerautorization_resource_test.go new file mode 100644 index 000000000000..038d0c1d49d8 --- /dev/null +++ b/internal/services/iotoperations/iotoperations_brokerautorization_resource_test.go @@ -0,0 +1,296 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package iotoperations_test + +import ( + "context" + "fmt" + "testing" + + "github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization" + "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" + "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" + "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" + "github.com/hashicorp/terraform-provider-azurerm/utils" +) + +func TestAccIotOperationsBrokerAuthorization_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_iotoperations_broker_authorization", "test") + r := BrokerAuthorizationResource{} + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccIotOperationsBrokerAuthorization_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_iotoperations_broker_authorization", "test") + r := BrokerAuthorizationResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + { + Config: r.requiresImport(data), + ExpectError: acceptance.RequiresImportError("azurerm_iotoperations_broker_authorization"), + }, + }) +} + +func TestAccIotOperationsBrokerAuthorization_complete(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_iotoperations_broker_authorization", "test") + r := BrokerAuthorizationResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccIotOperationsBrokerAuthorization_update(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_iotoperations_broker_authorization", "test") + r := BrokerAuthorizationResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +type BrokerAuthorizationResource struct{} + +func (BrokerAuthorizationResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := brokerauthorization.ParseAuthorizationID(state.ID) + if err != nil { + return nil, fmt.Errorf("parsing %s: %+v", state.ID, err) + } + + resp, err := clients.IoTOperations.BrokerAuthorizationClient.Get(ctx, *id) + if err != nil { + return nil, fmt.Errorf("retrieving %s: %+v", state.ID, err) + } + + return utils.Bool(resp.Model != nil), nil +} + +func (r BrokerAuthorizationResource) basic(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_iotoperations_instance" "test" { + name = "acctestinstance%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + + extended_location { + name = "/subscriptions/%s/resourceGroups/acctestRG-%d/providers/Microsoft.ExtendedLocation/customLocations/location1" + type = "CustomLocation" + } +} + +resource "azurerm_iotoperations_broker" "test" { + name = "acctestbroker%d" + resource_group_name = azurerm_resource_group.test.name + instance_name = azurerm_iotoperations_instance.test.name + + properties { + memory_profile = "Tiny" + } + + extended_location { + name = azurerm_iotoperations_instance.test.extended_location[0].name + type = azurerm_iotoperations_instance.test.extended_location[0].type + } +} + +resource "azurerm_iotoperations_broker_authorization" "test" { + name = "acctestauth%d" + resource_group_name = azurerm_resource_group.test.name + instance_name = azurerm_iotoperations_instance.test.name + broker_name = azurerm_iotoperations_broker.test.name + + authorization_policies { + cache = "Enabled" + rules { + broker_resources { + method = "Connect" + clients = ["test-client"] + topics = ["test-topic"] + } + principals { + clients = ["test-client"] + usernames = ["test-user"] + } + } + } + + extended_location { + name = azurerm_iotoperations_instance.test.extended_location[0].name + type = azurerm_iotoperations_instance.test.extended_location[0].type + } +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.Client().SubscriptionID, data.RandomInteger, data.RandomInteger, data.RandomInteger) +} + +func (r BrokerAuthorizationResource) requiresImport(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_iotoperations_broker_authorization" "import" { + name = azurerm_iotoperations_broker_authorization.test.name + resource_group_name = azurerm_iotoperations_broker_authorization.test.resource_group_name + instance_name = azurerm_iotoperations_broker_authorization.test.instance_name + broker_name = azurerm_iotoperations_broker_authorization.test.broker_name + + authorization_policies { + cache = "Enabled" + rules { + broker_resources { + method = "Connect" + clients = ["test-client"] + topics = ["test-topic"] + } + principals { + clients = ["test-client"] + usernames = ["test-user"] + } + } + } + + extended_location { + name = azurerm_iotoperations_broker_authorization.test.extended_location[0].name + type = azurerm_iotoperations_broker_authorization.test.extended_location[0].type + } +} +`, r.basic(data)) +} + +func (r BrokerAuthorizationResource) complete(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_iotoperations_instance" "test" { + name = "acctestinstance%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + + extended_location { + name = "/subscriptions/%s/resourceGroups/acctestRG-%d/providers/Microsoft.ExtendedLocation/customLocations/location1" + type = "CustomLocation" + } +} + +resource "azurerm_iotoperations_broker" "test" { + name = "acctestbroker%d" + resource_group_name = azurerm_resource_group.test.name + instance_name = azurerm_iotoperations_instance.test.name + + properties { + memory_profile = "Medium" + } + + extended_location { + name = azurerm_iotoperations_instance.test.extended_location[0].name + type = azurerm_iotoperations_instance.test.extended_location[0].type + } +} + +resource "azurerm_iotoperations_broker_authorization" "test" { + name = "acctestauth%d" + resource_group_name = azurerm_resource_group.test.name + instance_name = azurerm_iotoperations_instance.test.name + broker_name = azurerm_iotoperations_broker.test.name + + tags = { + ENV = "Test" + } + + authorization_policies { + cache = "Enabled" + rules { + broker_resources { + method = "Connect" + clients = ["test-client-%d", "admin-client-%d"] + topics = ["sensor/temperature", "device/status"] + } + principals { + clients = ["test-client-%d", "admin-client-%d"] + usernames = ["test-user", "admin-user"] + attributes = { + "group" = "sensors" + "role" = "publisher" + } + } + } + rules { + broker_resources { + method = "Publish" + clients = ["publisher-client-%d"] + topics = ["data/telemetry"] + } + principals { + clients = ["publisher-client-%d"] + usernames = ["publisher-user"] + attributes = { + "department" = "iot" + } + } + } + } + + extended_location { + name = azurerm_iotoperations_instance.test.extended_location[0].name + type = azurerm_iotoperations_instance.test.extended_location[0].type + } +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.Client().SubscriptionID, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger) +} diff --git a/internal/services/iotoperations/iotoperations_dataflow_resource.go b/internal/services/iotoperations/iotoperations_dataflow_resource.go new file mode 100644 index 000000000000..c53d5afed497 --- /dev/null +++ b/internal/services/iotoperations/iotoperations_dataflow_resource.go @@ -0,0 +1,900 @@ +package iotoperations + +import ( + "context" + "fmt" + "regexp" + "time" + + "github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-azurerm/internal/sdk" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" +) + +type DataflowResource struct{} + +var _ sdk.ResourceWithUpdate = DataflowResource{} + +type DataflowModel struct { + Name string `tfschema:"name"` + ResourceGroupName string `tfschema:"resource_group_name"` + InstanceName string `tfschema:"instance_name"` + DataflowProfileName string `tfschema:"dataflow_profile_name"` + Mode *string `tfschema:"mode"` + Operations []DataflowOperationModel `tfschema:"operations"` + ExtendedLocationName *string `tfschema:"extended_location_name"` + ExtendedLocationType *string `tfschema:"extended_location_type"` + ProvisioningState *string `tfschema:"provisioning_state"` +} + +type DataflowOperationModel struct { + Name *string `tfschema:"name"` + OperationType string `tfschema:"operation_type"` + SourceSettings *DataflowSourceOperationSettingsModel `tfschema:"source_settings"` + DestinationSettings *DataflowDestinationOperationSettingsModel `tfschema:"destination_settings"` + BuiltInTransformationSettings *DataflowBuiltInTransformationSettingsModel `tfschema:"built_in_transformation_settings"` +} + +type DataflowSourceOperationSettingsModel struct { + DataSources []string `tfschema:"data_sources"` + AssetRef *string `tfschema:"asset_ref"` + EndpointRef string `tfschema:"endpoint_ref"` + SchemaRef *string `tfschema:"schema_ref"` + SerializationFormat *string `tfschema:"serialization_format"` +} + +type DataflowDestinationOperationSettingsModel struct { + DataDestination string `tfschema:"data_destination"` + EndpointRef string `tfschema:"endpoint_ref"` +} + +type DataflowBuiltInTransformationSettingsModel struct { + Datasets []DataflowBuiltInTransformationDatasetModel `tfschema:"datasets"` + Filter []DataflowBuiltInTransformationFilterModel `tfschema:"filter"` + Map []DataflowBuiltInTransformationMapModel `tfschema:"map"` + SchemaRef *string `tfschema:"schema_ref"` + SerializationFormat *string `tfschema:"serialization_format"` +} + +type DataflowBuiltInTransformationFilterModel struct { + Description *string `tfschema:"description"` + Expression string `tfschema:"expression"` + Inputs []string `tfschema:"inputs"` + Type *string `tfschema:"type"` +} + +type DataflowBuiltInTransformationMapModel struct { + Description *string `tfschema:"description"` + Expression *string `tfschema:"expression"` + Inputs []string `tfschema:"inputs"` + Output string `tfschema:"output"` + Type *string `tfschema:"type"` +} + +type DataflowBuiltInTransformationDatasetModel struct { + Key string `tfschema:"key"` + Description *string `tfschema:"description"` + Expression *string `tfschema:"expression"` + Inputs []string `tfschema:"inputs"` + SchemaRef *string `tfschema:"schema_ref"` +} + +func (r DataflowResource) ModelObject() interface{} { + return &DataflowModel{} +} + +func (r DataflowResource) ResourceType() string { + return "azurerm_iotoperations_dataflow" +} + +func (r DataflowResource) IDValidationFunc() pluginsdk.SchemaValidateFunc { + return dataflow.ValidateDataflowID +} + +func (r DataflowResource) Arguments() map[string]*pluginsdk.Schema { + return map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(3, 63), + validation.StringMatch(regexp.MustCompile("^[a-z0-9][a-z0-9-]*[a-z0-9]$"), "must match ^[a-z0-9][a-z0-9-]*[a-z0-9]$"), + ), + }, + "resource_group_name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(1, 90), + }, + "instance_name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(3, 63), + validation.StringMatch(regexp.MustCompile("^[a-z0-9][a-z0-9-]*[a-z0-9]$"), "must match ^[a-z0-9][a-z0-9-]*[a-z0-9]$"), + ), + }, + "dataflow_profile_name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(3, 63), + validation.StringMatch(regexp.MustCompile("^[a-z0-9][a-z0-9-]*[a-z0-9]$"), "must match ^[a-z0-9][a-z0-9-]*[a-z0-9]$"), + ), + }, + "mode": { + Type: pluginsdk.TypeString, + Optional: true, + Default: "Enabled", + ValidateFunc: validation.StringInSlice([]string{ + "Enabled", + "Disabled", + }, false), + }, + "operations": { + Type: pluginsdk.TypeList, + Required: true, + MinItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 63), + }, + "operation_type": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + "Source", + "Destination", + "BuiltInTransformation", + }, false), + }, + "source_settings": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "data_sources": { + Type: pluginsdk.TypeList, + Required: true, + MinItems: 1, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + ValidateFunc: validation.StringLenBetween(1, 253), + }, + }, + "asset_ref": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 253), + }, + "endpoint_ref": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 253), + }, + "schema_ref": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 253), + }, + "serialization_format": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + "Json", + }, false), + }, + }, + }, + }, + "destination_settings": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "data_destination": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 253), + }, + "endpoint_ref": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 253), + }, + }, + }, + }, + "built_in_transformation_settings": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "datasets": { + Type: pluginsdk.TypeList, + Optional: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "key": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 253), + }, + "description": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 500), + }, + "expression": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 1000), + }, + "inputs": { + Type: pluginsdk.TypeList, + Required: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + ValidateFunc: validation.StringLenBetween(1, 253), + }, + }, + "schema_ref": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 253), + }, + }, + }, + }, + "filter": { + Type: pluginsdk.TypeList, + Optional: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "description": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 500), + }, + "expression": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 1000), + }, + "inputs": { + Type: pluginsdk.TypeList, + Required: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + ValidateFunc: validation.StringLenBetween(1, 253), + }, + }, + "type": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + "Filter", + }, false), + }, + }, + }, + }, + "map": { + Type: pluginsdk.TypeList, + Optional: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "description": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 500), + }, + "expression": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 1000), + }, + "inputs": { + Type: pluginsdk.TypeList, + Required: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + ValidateFunc: validation.StringLenBetween(1, 253), + }, + }, + "output": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 253), + }, + "type": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + "BuiltInFunction", + "Compute", + "NewProperties", + "PassThrough", + "Rename", + }, false), + }, + }, + }, + }, + "schema_ref": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 253), + }, + "serialization_format": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + "Delta", + "Json", + "Parquet", + }, false), + }, + }, + }, + }, + }, + }, + }, + "extended_location": { + Type: pluginsdk.TypeList, + Required: true, + ForceNew: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + "type": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + "CustomLocation", + }, false), + }, + }, + }, + }, + } +} + +func (r DataflowResource) Attributes() map[string]*pluginsdk.Schema { + return map[string]*pluginsdk.Schema{ + "provisioning_state": { + Type: pluginsdk.TypeString, + Computed: true, + }, + } +} + +func (r DataflowResource) Create() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 30 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + client := metadata.Client.IoTOperations.DataflowClient + + var model DataflowModel + if err := metadata.Decode(&model); err != nil { + return fmt.Errorf("decoding: %+v", err) + } + + subscriptionId := metadata.Client.Account.SubscriptionId + id := dataflow.NewDataflowID(subscriptionId, model.ResourceGroupName, model.InstanceName, model.DataflowProfileName, model.Name) + + // Build payload + extendedLocationName := "" + if model.ExtendedLocationName != nil { + extendedLocationName = *model.ExtendedLocationName + } + extendedLocationType := dataflow.ExtendedLocationTypeCustomLocation + if model.ExtendedLocationType != nil { + extendedLocationType = dataflow.ExtendedLocationType(*model.ExtendedLocationType) + } + // no new variables on left side of := + extendedLocationType = dataflow.ExtendedLocationTypeCustomLocation + if model.ExtendedLocationType != nil { + extendedLocationType = dataflow.ExtendedLocationType(*model.ExtendedLocationType) + } + payload := dataflow.DataflowResource{ + ExtendedLocation: dataflow.ExtendedLocation{ + Name: extendedLocationName, + Type: extendedLocationType, + }, + Properties: expandDataflowProperties(model), + } + + if err := client.CreateOrUpdateThenPoll(ctx, id, payload); err != nil { + return fmt.Errorf("creating %s: %+v", id, err) + } + + metadata.SetID(id) + return nil + }, + } +} + +func (r DataflowResource) Read() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 5 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + client := metadata.Client.IoTOperations.DataflowClient + + id, err := dataflow.ParseDataflowID(metadata.ResourceData.Id()) + if err != nil { + return err + } + + resp, err := client.Get(ctx, *id) + if err != nil { + return fmt.Errorf("reading %s: %+v", *id, err) + } + + model := DataflowModel{ + Name: id.DataflowName, + ResourceGroupName: id.ResourceGroupName, + InstanceName: id.InstanceName, + DataflowProfileName: id.DataflowProfileName, + } + + if respModel := resp.Model; respModel != nil { + model.ExtendedLocationName = &respModel.ExtendedLocation.Name + extendedLocationType := string(respModel.ExtendedLocation.Type) + model.ExtendedLocationType = &extendedLocationType + + if respModel.Properties != nil { + flattenDataflowProperties(respModel.Properties, &model) + + if respModel.Properties.ProvisioningState != nil { + provisioningState := string(*respModel.Properties.ProvisioningState) + model.ProvisioningState = &provisioningState + } + } + } + + return metadata.Encode(&model) + }, + } +} + +func (r DataflowResource) Update() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 30 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + client := metadata.Client.IoTOperations.DataflowClient + + id, err := dataflow.ParseDataflowID(metadata.ResourceData.Id()) + if err != nil { + return err + } + + var model DataflowModel + if err := metadata.Decode(&model); err != nil { + return fmt.Errorf("decoding: %+v", err) + } + + // For dataflow, we use CreateOrUpdate for updates since there's no dedicated Update method + extendedLocationName := "" + if model.ExtendedLocationName != nil { + extendedLocationName = *model.ExtendedLocationName + } + extendedLocationType := dataflow.ExtendedLocationTypeCustomLocation + if model.ExtendedLocationType != nil { + extendedLocationType = dataflow.ExtendedLocationType(*model.ExtendedLocationType) + } + + payload := dataflow.DataflowResource{ + ExtendedLocation: dataflow.ExtendedLocation{ + Name: extendedLocationName, + Type: extendedLocationType, + }, + Properties: expandDataflowProperties(model), + } + + if err := client.CreateOrUpdateThenPoll(ctx, *id, payload); err != nil { + return fmt.Errorf("updating %s: %+v", *id, err) + } + + return nil + }, + } +} + +func (r DataflowResource) Delete() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 30 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + client := metadata.Client.IoTOperations.DataflowClient + + id, err := dataflow.ParseDataflowID(metadata.ResourceData.Id()) + if err != nil { + return err + } + + if err := client.DeleteThenPoll(ctx, *id); err != nil { + return fmt.Errorf("deleting %s: %+v", *id, err) + } + + return nil + }, + } +} + +// Helper functions for expand/flatten operations +// expandDataflowExtendedLocation and flattenDataflowExtendedLocation removed; now handled inline with separate fields + +func expandDataflowProperties(model DataflowModel) *dataflow.DataflowProperties { + props := &dataflow.DataflowProperties{ + Operations: expandDataflowOperations(model.Operations), + } + + if model.Mode != nil { + mode := dataflow.OperationalMode(*model.Mode) + props.Mode = &mode + } + + return props +} + +func expandDataflowOperations(operations []DataflowOperationModel) []dataflow.DataflowOperation { + result := make([]dataflow.DataflowOperation, 0, len(operations)) + + for _, op := range operations { + operation := dataflow.DataflowOperation{ + OperationType: dataflow.OperationType(op.OperationType), + } + + if op.Name != nil { + operation.Name = op.Name + } + + if op.SourceSettings != nil { + operation.SourceSettings = expandDataflowSourceOperationSettings(*op.SourceSettings) + } + + if op.DestinationSettings != nil { + operation.DestinationSettings = expandDataflowDestinationOperationSettings(*op.DestinationSettings) + } + + if op.BuiltInTransformationSettings != nil { + operation.BuiltInTransformationSettings = expandDataflowBuiltInTransformationSettings(*op.BuiltInTransformationSettings) + } + + result = append(result, operation) + } + + return result +} + +func expandDataflowSourceOperationSettings(source DataflowSourceOperationSettingsModel) *dataflow.DataflowSourceOperationSettings { + result := &dataflow.DataflowSourceOperationSettings{ + DataSources: source.DataSources, + EndpointRef: source.EndpointRef, + } + + if source.AssetRef != nil { + result.AssetRef = source.AssetRef + } + + if source.SchemaRef != nil { + result.SchemaRef = source.SchemaRef + } + + if source.SerializationFormat != nil { + format := dataflow.SourceSerializationFormat(*source.SerializationFormat) + result.SerializationFormat = &format + } + + return result +} + +func expandDataflowDestinationOperationSettings(destination DataflowDestinationOperationSettingsModel) *dataflow.DataflowDestinationOperationSettings { + return &dataflow.DataflowDestinationOperationSettings{ + DataDestination: destination.DataDestination, + EndpointRef: destination.EndpointRef, + } +} + +func expandDataflowBuiltInTransformationSettings(settings DataflowBuiltInTransformationSettingsModel) *dataflow.DataflowBuiltInTransformationSettings { + result := &dataflow.DataflowBuiltInTransformationSettings{} + + if len(settings.Datasets) > 0 { + result.Datasets = expandDataflowBuiltInTransformationDatasets(settings.Datasets) + } + + if len(settings.Filter) > 0 { + result.Filter = expandDataflowBuiltInTransformationFilters(settings.Filter) + } + + if len(settings.Map) > 0 { + result.Map = expandDataflowBuiltInTransformationMaps(settings.Map) + } + + if settings.SchemaRef != nil { + result.SchemaRef = settings.SchemaRef + } + + if settings.SerializationFormat != nil { + format := dataflow.TransformationSerializationFormat(*settings.SerializationFormat) + result.SerializationFormat = &format + } + + return result +} + +func expandDataflowBuiltInTransformationDatasets(datasets []DataflowBuiltInTransformationDatasetModel) *[]dataflow.DataflowBuiltInTransformationDataset { + result := make([]dataflow.DataflowBuiltInTransformationDataset, 0, len(datasets)) + + for _, dataset := range datasets { + datasetItem := dataflow.DataflowBuiltInTransformationDataset{ + Key: dataset.Key, + Inputs: dataset.Inputs, + } + + if dataset.Description != nil { + datasetItem.Description = dataset.Description + } + + if dataset.Expression != nil { + datasetItem.Expression = dataset.Expression + } + + if dataset.SchemaRef != nil { + datasetItem.SchemaRef = dataset.SchemaRef + } + + result = append(result, datasetItem) + } + + return &result +} + +func expandDataflowBuiltInTransformationFilters(filters []DataflowBuiltInTransformationFilterModel) *[]dataflow.DataflowBuiltInTransformationFilter { + result := make([]dataflow.DataflowBuiltInTransformationFilter, 0, len(filters)) + + for _, filter := range filters { + filterItem := dataflow.DataflowBuiltInTransformationFilter{ + Expression: filter.Expression, + Inputs: filter.Inputs, + } + + if filter.Description != nil { + filterItem.Description = filter.Description + } + + if filter.Type != nil { + filterType := dataflow.FilterType(*filter.Type) + filterItem.Type = &filterType + } + + result = append(result, filterItem) + } + + return &result +} + +func expandDataflowBuiltInTransformationMaps(maps []DataflowBuiltInTransformationMapModel) *[]dataflow.DataflowBuiltInTransformationMap { + result := make([]dataflow.DataflowBuiltInTransformationMap, 0, len(maps)) + + for _, mapItem := range maps { + mapTransform := dataflow.DataflowBuiltInTransformationMap{ + Inputs: mapItem.Inputs, + Output: mapItem.Output, + } + + if mapItem.Description != nil { + mapTransform.Description = mapItem.Description + } + + if mapItem.Expression != nil { + mapTransform.Expression = mapItem.Expression + } + + if mapItem.Type != nil { + mapType := dataflow.DataflowMappingType(*mapItem.Type) + mapTransform.Type = &mapType + } + + result = append(result, mapTransform) + } + + return &result +} + +func flattenDataflowProperties(props *dataflow.DataflowProperties, model *DataflowModel) { + if props == nil { + return + } + + if props.Mode != nil { + mode := string(*props.Mode) + model.Mode = &mode + } + + if len(props.Operations) > 0 { + model.Operations = flattenDataflowOperations(props.Operations) + } +} + +func flattenDataflowOperations(operations []dataflow.DataflowOperation) []DataflowOperationModel { + result := make([]DataflowOperationModel, 0, len(operations)) + + for _, op := range operations { + operation := DataflowOperationModel{ + OperationType: string(op.OperationType), + } + + if op.Name != nil { + operation.Name = op.Name + } + + if op.SourceSettings != nil { + operation.SourceSettings = flattenDataflowSourceOperationSettings(*op.SourceSettings) + } + + if op.DestinationSettings != nil { + operation.DestinationSettings = flattenDataflowDestinationOperationSettings(*op.DestinationSettings) + } + + if op.BuiltInTransformationSettings != nil { + operation.BuiltInTransformationSettings = flattenDataflowBuiltInTransformationSettings(*op.BuiltInTransformationSettings) + } + + result = append(result, operation) + } + + return result +} + +func flattenDataflowSourceOperationSettings(source dataflow.DataflowSourceOperationSettings) *DataflowSourceOperationSettingsModel { + result := &DataflowSourceOperationSettingsModel{ + DataSources: source.DataSources, + EndpointRef: source.EndpointRef, + } + + if source.AssetRef != nil { + result.AssetRef = source.AssetRef + } + + if source.SchemaRef != nil { + result.SchemaRef = source.SchemaRef + } + + if source.SerializationFormat != nil { + format := string(*source.SerializationFormat) + result.SerializationFormat = &format + } + + return result +} + +func flattenDataflowDestinationOperationSettings(destination dataflow.DataflowDestinationOperationSettings) *DataflowDestinationOperationSettingsModel { + return &DataflowDestinationOperationSettingsModel{ + DataDestination: destination.DataDestination, + EndpointRef: destination.EndpointRef, + } +} + +func flattenDataflowBuiltInTransformationSettings(settings dataflow.DataflowBuiltInTransformationSettings) *DataflowBuiltInTransformationSettingsModel { + result := &DataflowBuiltInTransformationSettingsModel{} + + if settings.Datasets != nil { + result.Datasets = flattenDataflowBuiltInTransformationDatasets(*settings.Datasets) + } + + if settings.Filter != nil { + result.Filter = flattenDataflowBuiltInTransformationFilters(*settings.Filter) + } + + if settings.Map != nil { + result.Map = flattenDataflowBuiltInTransformationMaps(*settings.Map) + } + + if settings.SchemaRef != nil { + result.SchemaRef = settings.SchemaRef + } + + if settings.SerializationFormat != nil { + format := string(*settings.SerializationFormat) + result.SerializationFormat = &format + } + + return result +} + +func flattenDataflowBuiltInTransformationDatasets(datasets []dataflow.DataflowBuiltInTransformationDataset) []DataflowBuiltInTransformationDatasetModel { + result := make([]DataflowBuiltInTransformationDatasetModel, 0, len(datasets)) + + for _, dataset := range datasets { + datasetModel := DataflowBuiltInTransformationDatasetModel{ + Key: dataset.Key, + Inputs: dataset.Inputs, + } + + if dataset.Description != nil { + datasetModel.Description = dataset.Description + } + + if dataset.Expression != nil { + datasetModel.Expression = dataset.Expression + } + + if dataset.SchemaRef != nil { + datasetModel.SchemaRef = dataset.SchemaRef + } + + result = append(result, datasetModel) + } + + return result +} + +func flattenDataflowBuiltInTransformationFilters(filters []dataflow.DataflowBuiltInTransformationFilter) []DataflowBuiltInTransformationFilterModel { + result := make([]DataflowBuiltInTransformationFilterModel, 0, len(filters)) + + for _, filter := range filters { + filterModel := DataflowBuiltInTransformationFilterModel{ + Expression: filter.Expression, + Inputs: filter.Inputs, + } + + if filter.Description != nil { + filterModel.Description = filter.Description + } + + if filter.Type != nil { + filterType := string(*filter.Type) + filterModel.Type = &filterType + } + + result = append(result, filterModel) + } + + return result +} + +func flattenDataflowBuiltInTransformationMaps(maps []dataflow.DataflowBuiltInTransformationMap) []DataflowBuiltInTransformationMapModel { + result := make([]DataflowBuiltInTransformationMapModel, 0, len(maps)) + + for _, mapItem := range maps { + mapModel := DataflowBuiltInTransformationMapModel{ + Inputs: mapItem.Inputs, + Output: mapItem.Output, + } + + if mapItem.Description != nil { + mapModel.Description = mapItem.Description + } + + if mapItem.Expression != nil { + mapModel.Expression = mapItem.Expression + } + + if mapItem.Type != nil { + mapType := string(*mapItem.Type) + mapModel.Type = &mapType + } + + result = append(result, mapModel) + } + + return result +} diff --git a/internal/services/iotoperations/iotoperations_dataflow_resource_test.go b/internal/services/iotoperations/iotoperations_dataflow_resource_test.go new file mode 100644 index 000000000000..92a80ee6e490 --- /dev/null +++ b/internal/services/iotoperations/iotoperations_dataflow_resource_test.go @@ -0,0 +1,263 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package iotoperations_test + +import ( + "context" + "fmt" + "testing" + + "github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow" + "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" + "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" + "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" + "github.com/hashicorp/terraform-provider-azurerm/utils" +) + +// IotOperationsDataflowResource is a test harness for azurerm_iotoperations_dataflow acceptance tests. +type IotOperationsDataflowResource struct{} + +func TestAccIotOperationsDataflow_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_iotoperations_dataflow", "test") + r := IotOperationsDataflowResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("name").HasValue(fmt.Sprintf("acctest-dataflow-%s", data.RandomString)), + check.That(data.ResourceName).Key("properties.0.mode").HasValue("Enabled"), + ), + }, + data.ImportStep(), + }) +} + +func TestAccIotOperationsDataflow_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_iotoperations_dataflow", "test") + r := IotOperationsDataflowResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.RequiresImportErrorStep(r.requiresImport), + }) +} + +func TestAccIotOperationsDataflow_complete(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_iotoperations_dataflow", "test") + r := IotOperationsDataflowResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("name").HasValue(fmt.Sprintf("acctest-dataflow-%s", data.RandomString)), + check.That(data.ResourceName).Key("properties.0.mode").HasValue("Enabled"), + check.That(data.ResourceName).Key("properties.0.operations.#").HasValue("2"), + check.That(data.ResourceName).Key("tags.%").HasValue("2"), + ), + }, + data.ImportStep(), + }) +} + +func TestAccIotOperationsDataflow_update(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_iotoperations_dataflow", "test") + r := IotOperationsDataflowResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("properties.0.operations.#").HasValue("2"), + check.That(data.ResourceName).Key("tags.%").HasValue("2"), + ), + }, + data.ImportStep(), + }) +} + +func (r IotOperationsDataflowResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := dataflow.ParseDataflowID(state.ID) + if err != nil { + return nil, fmt.Errorf("parsing %s: %+v", state.ID, err) + } + + resp, err := clients.IoTOperations.DataflowClient.Get(ctx, *id) + if err != nil { + return nil, fmt.Errorf("retrieving %s: %+v", state.ID, err) + } + + return utils.Bool(resp.Model != nil), nil +} + +func (r IotOperationsDataflowResource) template(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-iot-%d" + location = "%s" +} + +resource "azurerm_iotoperations_instance" "test" { + name = "acctest-instance-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + + extended_location { + name = "acctest-custom-location-%s" + type = "CustomLocation" + } +} + +resource "azurerm_iotoperations_dataflow_profile" "test" { + name = "acctest-profile-%s" + resource_group_name = azurerm_resource_group.test.name + instance_name = azurerm_iotoperations_instance.test.name + location = azurerm_resource_group.test.location + + extended_location { + name = azurerm_iotoperations_instance.test.extended_location[0].name + type = azurerm_iotoperations_instance.test.extended_location[0].type + } +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomString, data.RandomString) +} + +func (r IotOperationsDataflowResource) basic(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_iotoperations_dataflow" "test" { + name = "acctest-dataflow-%s" + resource_group_name = azurerm_resource_group.test.name + instance_name = azurerm_iotoperations_instance.test.name + dataflow_profile_name = azurerm_iotoperations_dataflow_profile.test.name + location = azurerm_resource_group.test.location + + properties { + mode = "Enabled" + request_disk_persistence = "Enabled" + + operations { + operation_type = "Source" + name = "temperature-source" + + source_settings { + endpoint_ref = "temperature-endpoint" + data_sources = ["temperature/*"] + } + } + } + + extended_location { + name = azurerm_iotoperations_instance.test.extended_location[0].name + type = azurerm_iotoperations_instance.test.extended_location[0].type + } +} +`, r.template(data), data.RandomString) +} + +func (r IotOperationsDataflowResource) requiresImport(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_iotoperations_dataflow" "import" { + name = azurerm_iotoperations_dataflow.test.name + resource_group_name = azurerm_iotoperations_dataflow.test.resource_group_name + instance_name = azurerm_iotoperations_dataflow.test.instance_name + dataflow_profile_name = azurerm_iotoperations_dataflow.test.dataflow_profile_name + location = azurerm_iotoperations_dataflow.test.location + + properties { + mode = azurerm_iotoperations_dataflow.test.properties[0].mode + request_disk_persistence = azurerm_iotoperations_dataflow.test.properties[0].request_disk_persistence + + operations { + operation_type = azurerm_iotoperations_dataflow.test.properties[0].operations[0].operation_type + name = azurerm_iotoperations_dataflow.test.properties[0].operations[0].name + + source_settings { + endpoint_ref = azurerm_iotoperations_dataflow.test.properties[0].operations[0].source_settings[0].endpoint_ref + data_sources = azurerm_iotoperations_dataflow.test.properties[0].operations[0].source_settings[0].data_sources + } + } + } + + extended_location { + name = azurerm_iotoperations_dataflow.test.extended_location[0].name + type = azurerm_iotoperations_dataflow.test.extended_location[0].type + } +} +`, r.basic(data)) +} + +func (r IotOperationsDataflowResource) complete(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_iotoperations_dataflow" "test" { + name = "acctest-dataflow-%s" + resource_group_name = azurerm_resource_group.test.name + instance_name = azurerm_iotoperations_instance.test.name + dataflow_profile_name = azurerm_iotoperations_dataflow_profile.test.name + location = azurerm_resource_group.test.location + + properties { + mode = "Enabled" + request_disk_persistence = "Enabled" + + operations { + operation_type = "Source" + name = "temperature-source" + + source_settings { + endpoint_ref = "temperature-endpoint" + data_sources = ["temperature/*", "humidity/*"] + serialization_format = "Json" + } + } + + operations { + operation_type = "Destination" + name = "adx-destination" + + destination_settings { + endpoint_ref = "adx-endpoint" + data_destination = "telemetry-table" + } + } + } + + extended_location { + name = azurerm_iotoperations_instance.test.extended_location[0].name + type = azurerm_iotoperations_instance.test.extended_location[0].type + } + + tags = { + environment = "testing" + purpose = "dataflow-acceptance-test" + } +} +`, r.template(data), data.RandomString) +} diff --git a/internal/services/iotoperations/iotoperations_dataflowendpoint_resource.go b/internal/services/iotoperations/iotoperations_dataflowendpoint_resource.go new file mode 100644 index 000000000000..9873b88a3517 --- /dev/null +++ b/internal/services/iotoperations/iotoperations_dataflowendpoint_resource.go @@ -0,0 +1,1575 @@ +package iotoperations + +import ( + "context" + "fmt" + "regexp" + "time" + + "github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-azurerm/internal/sdk" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" +) + +type DataflowEndpointResource struct{} + +var _ sdk.ResourceWithUpdate = DataflowEndpointResource{} + +type DataflowEndpointModel struct { + Name string `tfschema:"name"` + ResourceGroupName string `tfschema:"resource_group_name"` + InstanceName string `tfschema:"instance_name"` + EndpointType string `tfschema:"endpoint_type"` + ExtendedLocationName *string `tfschema:"extended_location_name"` + ExtendedLocationType *string `tfschema:"extended_location_type"` + DataExplorerSettings *DataflowEndpointDataExplorerModel `tfschema:"data_explorer_settings"` + DataLakeStorageSettings *DataflowEndpointDataLakeStorageModel `tfschema:"data_lake_storage_settings"` + FabricOneLakeSettings *DataflowEndpointFabricOneLakeModel `tfschema:"fabric_one_lake_settings"` + KafkaSettings *DataflowEndpointKafkaModel `tfschema:"kafka_settings"` + LocalStorageSettings *DataflowEndpointLocalStorageModel `tfschema:"local_storage_settings"` + MqttSettings *DataflowEndpointMqttModel `tfschema:"mqtt_settings"` + ProvisioningState *string `tfschema:"provisioning_state"` +} + +type DataflowEndpointDataExplorerModel struct { + Authentication DataflowEndpointDataExplorerAuthenticationModel `tfschema:"authentication"` + Batching *BatchingConfigurationModel `tfschema:"batching"` + Database string `tfschema:"database"` + Host string `tfschema:"host"` +} + +type DataflowEndpointDataExplorerAuthenticationModel struct { + Method string `tfschema:"method"` + SystemAssignedManagedIdentitySettings *DataflowEndpointAuthenticationSystemAssignedManagedIdentity `tfschema:"system_assigned_managed_identity_settings"` + UserAssignedManagedIdentitySettings *DataflowEndpointAuthenticationUserAssignedManagedIdentity `tfschema:"user_assigned_managed_identity_settings"` +} + +type DataflowEndpointDataLakeStorageModel struct { + Authentication DataflowEndpointDataLakeStorageAuthenticationModel `tfschema:"authentication"` + Batching *BatchingConfigurationModel `tfschema:"batching"` + Host string `tfschema:"host"` +} + +type DataflowEndpointDataLakeStorageAuthenticationModel struct { + Method string `tfschema:"method"` + AccessTokenSettings *DataflowEndpointAuthenticationAccessTokenModel `tfschema:"access_token_settings"` + SystemAssignedManagedIdentitySettings *DataflowEndpointAuthenticationSystemAssignedManagedIdentity `tfschema:"system_assigned_managed_identity_settings"` + UserAssignedManagedIdentitySettings *DataflowEndpointAuthenticationUserAssignedManagedIdentity `tfschema:"user_assigned_managed_identity_settings"` +} + +type DataflowEndpointFabricOneLakeModel struct { + Authentication DataflowEndpointFabricOneLakeAuthenticationModel `tfschema:"authentication"` + Batching *BatchingConfigurationModel `tfschema:"batching"` + Host string `tfschema:"host"` + Names DataflowEndpointFabricOneLakeNamesModel `tfschema:"names"` + OneLakePathType string `tfschema:"one_lake_path_type"` +} + +type DataflowEndpointFabricOneLakeAuthenticationModel struct { + Method string `tfschema:"method"` + SystemAssignedManagedIdentitySettings *DataflowEndpointAuthenticationSystemAssignedManagedIdentity `tfschema:"system_assigned_managed_identity_settings"` + UserAssignedManagedIdentitySettings *DataflowEndpointAuthenticationUserAssignedManagedIdentity `tfschema:"user_assigned_managed_identity_settings"` +} + +type DataflowEndpointFabricOneLakeNamesModel struct { + LakehouseName string `tfschema:"lakehouse_name"` + WorkspaceName string `tfschema:"workspace_name"` +} + +type DataflowEndpointKafkaModel struct { + Authentication DataflowEndpointKafkaAuthenticationModel `tfschema:"authentication"` + Batching *DataflowEndpointKafkaBatchingModel `tfschema:"batching"` + CloudEventAttributes *string `tfschema:"cloud_event_attributes"` + Compression *string `tfschema:"compression"` + ConsumerGroupId *string `tfschema:"consumer_group_id"` + CopyMqttProperties *string `tfschema:"copy_mqtt_properties"` + Host string `tfschema:"host"` + KafkaAcks *string `tfschema:"kafka_acks"` + PartitionStrategy *string `tfschema:"partition_strategy"` + Tls *TlsPropertiesModel `tfschema:"tls"` +} + +type DataflowEndpointKafkaAuthenticationModel struct { + Method string `tfschema:"method"` + SaslSettings *DataflowEndpointAuthenticationSaslModel `tfschema:"sasl_settings"` + SystemAssignedManagedIdentitySettings *DataflowEndpointAuthenticationSystemAssignedManagedIdentity `tfschema:"system_assigned_managed_identity_settings"` + UserAssignedManagedIdentitySettings *DataflowEndpointAuthenticationUserAssignedManagedIdentity `tfschema:"user_assigned_managed_identity_settings"` + X509CertificateSettings *DataflowEndpointAuthenticationX509Model `tfschema:"x509_certificate_settings"` +} + +type DataflowEndpointKafkaBatchingModel struct { + LatencyMs *int64 `tfschema:"latency_ms"` + MaxBytes *int64 `tfschema:"max_bytes"` + MaxMessages *int64 `tfschema:"max_messages"` + Mode *string `tfschema:"mode"` +} + +type DataflowEndpointLocalStorageModel struct { + PersistentVolumeClaimRef string `tfschema:"persistent_volume_claim_ref"` +} + +type DataflowEndpointMqttModel struct { + Authentication DataflowEndpointMqttAuthenticationModel `tfschema:"authentication"` + ClientIdPrefix *string `tfschema:"client_id_prefix"` + CloudEventAttributes *string `tfschema:"cloud_event_attributes"` + Host *string `tfschema:"host"` + KeepAliveSeconds *int64 `tfschema:"keep_alive_seconds"` + MaxInflightMessages *int64 `tfschema:"max_inflight_messages"` + Protocol *string `tfschema:"protocol"` + Qos *int64 `tfschema:"qos"` + Retain *string `tfschema:"retain"` + SessionExpirySeconds *int64 `tfschema:"session_expiry_seconds"` + Tls *TlsPropertiesModel `tfschema:"tls"` +} + +type DataflowEndpointMqttAuthenticationModel struct { + Method string `tfschema:"method"` + ServiceAccountTokenSettings *DataflowEndpointAuthenticationServiceAccountTokenModel `tfschema:"service_account_token_settings"` + SystemAssignedManagedIdentitySettings *DataflowEndpointAuthenticationSystemAssignedManagedIdentity `tfschema:"system_assigned_managed_identity_settings"` + UserAssignedManagedIdentitySettings *DataflowEndpointAuthenticationUserAssignedManagedIdentity `tfschema:"user_assigned_managed_identity_settings"` + X509CertificateSettings *DataflowEndpointAuthenticationX509Model `tfschema:"x509_certificate_settings"` +} + +// Common authentication models +type DataflowEndpointAuthenticationAccessTokenModel struct { + SecretRef string `tfschema:"secret_ref"` +} + +type DataflowEndpointAuthenticationSaslModel struct { + SaslType string `tfschema:"sasl_type"` + SecretRef string `tfschema:"secret_ref"` +} + +type DataflowEndpointAuthenticationServiceAccountTokenModel struct { + Audience string `tfschema:"audience"` +} + +type DataflowEndpointAuthenticationSystemAssignedManagedIdentity struct { + Audience *string `tfschema:"audience"` +} + +type DataflowEndpointAuthenticationUserAssignedManagedIdentity struct { + ClientId string `tfschema:"client_id"` + Scope *string `tfschema:"scope"` + TenantId string `tfschema:"tenant_id"` +} + +type DataflowEndpointAuthenticationX509Model struct { + SecretRef string `tfschema:"secret_ref"` +} + +type BatchingConfigurationModel struct { + LatencySeconds *int64 `tfschema:"latency_seconds"` + MaxMessages *int64 `tfschema:"max_messages"` +} + +type TlsPropertiesModel struct { + Mode *string `tfschema:"mode"` + TrustedCaCertificateConfigMapRef *string `tfschema:"trusted_ca_certificate_config_map_ref"` +} + +func (r DataflowEndpointResource) ModelObject() interface{} { + return &DataflowEndpointModel{} +} + +func (r DataflowEndpointResource) ResourceType() string { + return "azurerm_iotoperations_dataflow_endpoint" +} + +func (r DataflowEndpointResource) IDValidationFunc() pluginsdk.SchemaValidateFunc { + return dataflowendpoint.ValidateDataflowEndpointID +} + +func (r DataflowEndpointResource) Arguments() map[string]*pluginsdk.Schema { + return map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(3, 63), + validation.StringMatch(regexp.MustCompile("^[a-z0-9][a-z0-9-]*[a-z0-9]$"), "must match ^[a-z0-9][a-z0-9-]*[a-z0-9]$"), + ), + }, + "resource_group_name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(1, 90), + }, + "instance_name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(3, 63), + validation.StringMatch(regexp.MustCompile("^[a-z0-9][a-z0-9-]*[a-z0-9]$"), "must match ^[a-z0-9][a-z0-9-]*[a-z0-9]$"), + ), + }, + "endpoint_type": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{ + "DataExplorer", + "DataLakeStorage", + "FabricOneLake", + "Kafka", + "LocalStorage", + "Mqtt", + }, false), + }, + "extended_location": { + Type: pluginsdk.TypeList, + Required: true, + ForceNew: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + "type": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + "CustomLocation", + }, false), + }, + }, + }, + }, + "data_explorer_settings": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"data_lake_storage_settings", "fabric_one_lake_settings", "kafka_settings", "local_storage_settings", "mqtt_settings"}, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "database": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 253), + }, + "host": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 253), + }, + "batching": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: dataflowEndpointBatchingSchema(), + }, + "authentication": { + Type: pluginsdk.TypeList, + Required: true, + MaxItems: 1, + Elem: dataflowEndpointAuthenticationSchema(), + }, + }, + }, + }, + "data_lake_storage_settings": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"data_explorer_settings", "fabric_one_lake_settings", "kafka_settings", "local_storage_settings", "mqtt_settings"}, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "host": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 253), + }, + "batching": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: dataflowEndpointBatchingSchema(), + }, + "authentication": { + Type: pluginsdk.TypeList, + Required: true, + MaxItems: 1, + Elem: dataflowEndpointAuthenticationSchema(), + }, + }, + }, + }, + "fabric_one_lake_settings": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"data_explorer_settings", "data_lake_storage_settings", "kafka_settings", "local_storage_settings", "mqtt_settings"}, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "host": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 253), + }, + "names": { + Type: pluginsdk.TypeList, + Required: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + ValidateFunc: validation.StringLenBetween(1, 253), + }, + }, + "one_lake_path_type": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + "Files", + "Tables", + }, false), + }, + "workspace": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 253), + }, + "batching": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: dataflowEndpointBatchingSchema(), + }, + "authentication": { + Type: pluginsdk.TypeList, + Required: true, + MaxItems: 1, + Elem: dataflowEndpointAuthenticationSchema(), + }, + }, + }, + }, + "kafka_settings": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"data_explorer_settings", "data_lake_storage_settings", "fabric_one_lake_settings", "local_storage_settings", "mqtt_settings"}, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "host": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 253), + }, + "batching": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: dataflowEndpointBatchingSchema(), + }, + "kafka": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "consumer_group_id": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 253), + }, + "compression": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + "None", + "Gzip", + "Snappy", + "Lz4", + }, false), + }, + "batching": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "mode": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + "Enabled", + "Disabled", + }, false), + }, + "latency_ms": { + Type: pluginsdk.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 3600000), + }, + "max_bytes": { + Type: pluginsdk.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 1073741824), + }, + "max_messages": { + Type: pluginsdk.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 1000000), + }, + }, + }, + }, + }, + }, + }, + "authentication": { + Type: pluginsdk.TypeList, + Required: true, + MaxItems: 1, + Elem: dataflowEndpointAuthenticationSchema(), + }, + }, + }, + }, + "local_storage_settings": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"data_explorer_settings", "data_lake_storage_settings", "fabric_one_lake_settings", "kafka_settings", "mqtt_settings"}, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "path": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 1000), + }, + }, + }, + }, + "mqtt_settings": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"data_explorer_settings", "data_lake_storage_settings", "fabric_one_lake_settings", "kafka_settings", "local_storage_settings"}, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "host": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 253), + }, + "keep_alive_seconds": { + Type: pluginsdk.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 65535), + }, + "retain": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + "Keep", + "Never", + }, false), + }, + "session_expiry_seconds": { + Type: pluginsdk.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 4294967295), + }, + "max_inflight_messages": { + Type: pluginsdk.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 65535), + }, + "qos": { + Type: pluginsdk.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 2), + }, + "protocol": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + "Mqtt", + "WebSockets", + }, false), + }, + "client_id_prefix": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 253), + }, + "tls_settings": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "mode": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + "Enabled", + "Disabled", + }, false), + }, + "trusted_ca_certificate_config_map_ref": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 253), + }, + }, + }, + }, + "authentication": { + Type: pluginsdk.TypeList, + Required: true, + MaxItems: 1, + Elem: dataflowEndpointAuthenticationSchema(), + }, + }, + }, + }, + "tags": { + Type: pluginsdk.TypeMap, + Optional: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + } +} + +func (r DataflowEndpointResource) Attributes() map[string]*pluginsdk.Schema { + return map[string]*pluginsdk.Schema{ + "provisioning_state": { + Type: pluginsdk.TypeString, + // NOTE: O+C Azure automatically assigns provisioning state during resource lifecycle + Computed: true, + }, + } +} + +func dataflowEndpointBatchingSchema() *pluginsdk.Resource { + return &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "latency_seconds": { + Type: pluginsdk.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 3600), + }, + "max_messages": { + Type: pluginsdk.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 1000000), + }, + }, + } +} + +func dataflowEndpointAuthenticationSchema() *pluginsdk.Resource { + return &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "method": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + "SystemAssignedManagedIdentity", + "UserAssignedManagedIdentity", + "ServiceAccountToken", + "X509Certificate", + "AccessToken", + "Sasl", + }, false), + }, + "system_assigned_managed_identity_settings": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "audience": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 253), + }, + }, + }, + }, + "user_assigned_managed_identity_settings": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "client_id": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.IsUUID, + }, + "audience": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 253), + }, + }, + }, + }, + "service_account_token_settings": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "audience": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 253), + }, + }, + }, + }, + "x509_certificate_settings": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "secret_ref": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 253), + }, + }, + }, + }, + "access_token_settings": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "secret_ref": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 253), + }, + }, + }, + }, + "sasl_settings": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "sasl_type": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + "Plain", + "ScramSha256", + "ScramSha512", + }, false), + }, + "secret_ref": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 253), + }, + }, + }, + }, + }, + } +} + +func (r DataflowEndpointResource) Create() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 30 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + client := metadata.Client.IoTOperations.DataflowEndpointClient + + var model DataflowEndpointModel + if err := metadata.Decode(&model); err != nil { + return fmt.Errorf("decoding: %+v", err) + } + + // Extract extended_location block from ResourceData and set model fields (value types) + if v, ok := metadata.ResourceData.GetOk("extended_location"); ok { + if list, ok := v.([]interface{}); ok && len(list) > 0 && list[0] != nil { + m := list[0].(map[string]interface{}) + if name, ok := m["name"].(string); ok && name != "" { + model.ExtendedLocationName = &name + } + if typ, ok := m["type"].(string); ok && typ != "" { + model.ExtendedLocationType = &typ + } + } + } + + subscriptionId := metadata.Client.Account.SubscriptionId + id := dataflowendpoint.NewDataflowEndpointID(subscriptionId, model.ResourceGroupName, model.InstanceName, model.Name) + + // Build payload + var extendedLocation *dataflowendpoint.ExtendedLocation + if model.ExtendedLocationName != nil && model.ExtendedLocationType != nil { + extendedLocation = &dataflowendpoint.ExtendedLocation{ + Name: *model.ExtendedLocationName, + Type: dataflowendpoint.ExtendedLocationType(*model.ExtendedLocationType), + } + } + var payload dataflowendpoint.DataflowEndpointResource + if extendedLocation != nil { + payload.ExtendedLocation = *extendedLocation + } + payload.Properties = expandDataflowEndpointProperties(model) + + if err := client.CreateOrUpdateThenPoll(ctx, id, payload); err != nil { + return fmt.Errorf("creating %s: %+v", id, err) + } + + metadata.SetID(id) + return nil + }, + } +} + +func (r DataflowEndpointResource) Read() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 5 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + client := metadata.Client.IoTOperations.DataflowEndpointClient + + id, err := dataflowendpoint.ParseDataflowEndpointID(metadata.ResourceData.Id()) + if err != nil { + return err + } + + resp, err := client.Get(ctx, *id) + if err != nil { + return fmt.Errorf("reading %s: %+v", *id, err) + } + + model := DataflowEndpointModel{ + Name: id.DataflowEndpointName, + ResourceGroupName: id.ResourceGroupName, + InstanceName: id.InstanceName, + } + + if respModel := resp.Model; respModel != nil { + model.ExtendedLocationName = &respModel.ExtendedLocation.Name + extendedLocationType := string(respModel.ExtendedLocation.Type) + model.ExtendedLocationType = &extendedLocationType + + if respModel.Properties != nil { + flattenDataflowEndpointProperties(respModel.Properties, &model) + + if respModel.Properties.ProvisioningState != nil { + provisioningState := string(*respModel.Properties.ProvisioningState) + model.ProvisioningState = &provisioningState + } + } + } + + return metadata.Encode(&model) + }, + } +} + +func (r DataflowEndpointResource) Update() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 30 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + client := metadata.Client.IoTOperations.DataflowEndpointClient + + id, err := dataflowendpoint.ParseDataflowEndpointID(metadata.ResourceData.Id()) + if err != nil { + return err + } + + var model DataflowEndpointModel + if err := metadata.Decode(&model); err != nil { + return fmt.Errorf("decoding: %+v", err) + } + + // Extract extended_location block from ResourceData and set model fields (value types) + if v, ok := metadata.ResourceData.GetOk("extended_location"); ok { + if list, ok := v.([]interface{}); ok && len(list) > 0 && list[0] != nil { + m := list[0].(map[string]interface{}) + if name, ok := m["name"].(string); ok && name != "" { + model.ExtendedLocationName = &name + } + if typ, ok := m["type"].(string); ok && typ != "" { + model.ExtendedLocationType = &typ + } + } + } + + // For dataflow endpoint, we use CreateOrUpdate for updates since there's no dedicated Update method + var extendedLocation *dataflowendpoint.ExtendedLocation + if model.ExtendedLocationName != nil && model.ExtendedLocationType != nil { + extendedLocation = &dataflowendpoint.ExtendedLocation{ + Name: *model.ExtendedLocationName, + Type: dataflowendpoint.ExtendedLocationType(*model.ExtendedLocationType), + } + } + var payload dataflowendpoint.DataflowEndpointResource + if extendedLocation != nil { + payload.ExtendedLocation = *extendedLocation + } + payload.Properties = expandDataflowEndpointProperties(model) + + if err := client.CreateOrUpdateThenPoll(ctx, *id, payload); err != nil { + return fmt.Errorf("updating %s: %+v", *id, err) + } + + return nil + }, + } +} + +func (r DataflowEndpointResource) Delete() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 30 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + client := metadata.Client.IoTOperations.DataflowEndpointClient + + id, err := dataflowendpoint.ParseDataflowEndpointID(metadata.ResourceData.Id()) + if err != nil { + return err + } + + if err := client.DeleteThenPoll(ctx, *id); err != nil { + return fmt.Errorf("deleting %s: %+v", *id, err) + } + + return nil + }, + } +} + +func expandDataflowEndpointProperties(model DataflowEndpointModel) *dataflowendpoint.DataflowEndpointProperties { + props := &dataflowendpoint.DataflowEndpointProperties{ + EndpointType: dataflowendpoint.EndpointType(model.EndpointType), + } + + if model.DataExplorerSettings != nil { + props.DataExplorerSettings = expandDataflowEndpointDataExplorer(*model.DataExplorerSettings) + } + + if model.DataLakeStorageSettings != nil { + props.DataLakeStorageSettings = expandDataflowEndpointDataLakeStorage(*model.DataLakeStorageSettings) + } + + if model.FabricOneLakeSettings != nil { + props.FabricOneLakeSettings = expandDataflowEndpointFabricOneLake(*model.FabricOneLakeSettings) + } + + if model.KafkaSettings != nil { + props.KafkaSettings = expandDataflowEndpointKafka(*model.KafkaSettings) + } + + if model.LocalStorageSettings != nil { + props.LocalStorageSettings = expandDataflowEndpointLocalStorage(*model.LocalStorageSettings) + } + + if model.MqttSettings != nil { + props.MqttSettings = expandDataflowEndpointMqtt(*model.MqttSettings) + } + + return props +} + +func expandDataflowEndpointKafka(kafka DataflowEndpointKafkaModel) *dataflowendpoint.DataflowEndpointKafka { + result := &dataflowendpoint.DataflowEndpointKafka{ + Host: kafka.Host, + Authentication: expandDataflowEndpointKafkaAuthentication(kafka.Authentication), + } + + if kafka.Batching != nil { + result.Batching = expandDataflowEndpointKafkaBatching(*kafka.Batching) + } + + if kafka.CloudEventAttributes != nil { + cloudEventType := dataflowendpoint.CloudEventAttributeType(*kafka.CloudEventAttributes) + result.CloudEventAttributes = &cloudEventType + } + + if kafka.Compression != nil { + compression := dataflowendpoint.DataflowEndpointKafkaCompression(*kafka.Compression) + result.Compression = &compression + } + + if kafka.ConsumerGroupId != nil { + result.ConsumerGroupId = kafka.ConsumerGroupId + } + + if kafka.CopyMqttProperties != nil { + copyMqtt := dataflowendpoint.OperationalMode(*kafka.CopyMqttProperties) + result.CopyMqttProperties = ©Mqtt + } + + if kafka.KafkaAcks != nil { + acks := dataflowendpoint.DataflowEndpointKafkaAcks(*kafka.KafkaAcks) + result.KafkaAcks = &acks + } + + if kafka.PartitionStrategy != nil { + strategy := dataflowendpoint.DataflowEndpointKafkaPartitionStrategy(*kafka.PartitionStrategy) + result.PartitionStrategy = &strategy + } + + if kafka.Tls != nil { + result.Tls = expandTlsProperties(*kafka.Tls) + } + + return result +} + +func expandDataflowEndpointKafkaAuthentication(auth DataflowEndpointKafkaAuthenticationModel) dataflowendpoint.DataflowEndpointKafkaAuthentication { + result := dataflowendpoint.DataflowEndpointKafkaAuthentication{ + Method: dataflowendpoint.KafkaAuthMethod(auth.Method), + } + + if auth.SaslSettings != nil { + result.SaslSettings = &dataflowendpoint.DataflowEndpointAuthenticationSasl{ + SaslType: dataflowendpoint.DataflowEndpointAuthenticationSaslType(auth.SaslSettings.SaslType), + SecretRef: auth.SaslSettings.SecretRef, + } + } + + if auth.SystemAssignedManagedIdentitySettings != nil { + result.SystemAssignedManagedIdentitySettings = &dataflowendpoint.DataflowEndpointAuthenticationSystemAssignedManagedIdentity{ + Audience: auth.SystemAssignedManagedIdentitySettings.Audience, + } + } + + if auth.UserAssignedManagedIdentitySettings != nil { + result.UserAssignedManagedIdentitySettings = &dataflowendpoint.DataflowEndpointAuthenticationUserAssignedManagedIdentity{ + ClientId: auth.UserAssignedManagedIdentitySettings.ClientId, + TenantId: auth.UserAssignedManagedIdentitySettings.TenantId, + Scope: auth.UserAssignedManagedIdentitySettings.Scope, + } + } + + if auth.X509CertificateSettings != nil { + result.X509CertificateSettings = &dataflowendpoint.DataflowEndpointAuthenticationX509{ + SecretRef: auth.X509CertificateSettings.SecretRef, + } + } + + return result +} + +func expandDataflowEndpointKafkaBatching(batching DataflowEndpointKafkaBatchingModel) *dataflowendpoint.DataflowEndpointKafkaBatching { + result := &dataflowendpoint.DataflowEndpointKafkaBatching{} + + if batching.LatencyMs != nil { + result.LatencyMs = batching.LatencyMs + } + + if batching.MaxBytes != nil { + result.MaxBytes = batching.MaxBytes + } + + if batching.MaxMessages != nil { + result.MaxMessages = batching.MaxMessages + } + + if batching.Mode != nil { + mode := dataflowendpoint.OperationalMode(*batching.Mode) + result.Mode = &mode + } + + return result +} + +func expandTlsProperties(tls TlsPropertiesModel) *dataflowendpoint.TlsProperties { + result := &dataflowendpoint.TlsProperties{} + + if tls.Mode != nil { + mode := dataflowendpoint.OperationalMode(*tls.Mode) + result.Mode = &mode + } + + if tls.TrustedCaCertificateConfigMapRef != nil { + result.TrustedCaCertificateConfigMapRef = tls.TrustedCaCertificateConfigMapRef + } + + return result +} + +// Additional expand functions for other endpoint types would go here... +func expandDataflowEndpointDataExplorer(dataExplorer DataflowEndpointDataExplorerModel) *dataflowendpoint.DataflowEndpointDataExplorer { + result := &dataflowendpoint.DataflowEndpointDataExplorer{ + Database: dataExplorer.Database, + Host: dataExplorer.Host, + Authentication: expandDataflowEndpointDataExplorerAuthentication(dataExplorer.Authentication), + } + + if dataExplorer.Batching != nil { + result.Batching = &dataflowendpoint.BatchingConfiguration{ + LatencySeconds: dataExplorer.Batching.LatencySeconds, + MaxMessages: dataExplorer.Batching.MaxMessages, + } + } + + return result +} + +func expandDataflowEndpointDataExplorerAuthentication(auth DataflowEndpointDataExplorerAuthenticationModel) dataflowendpoint.DataflowEndpointDataExplorerAuthentication { + result := dataflowendpoint.DataflowEndpointDataExplorerAuthentication{ + Method: dataflowendpoint.ManagedIdentityMethod(auth.Method), + } + + if auth.SystemAssignedManagedIdentitySettings != nil { + result.SystemAssignedManagedIdentitySettings = &dataflowendpoint.DataflowEndpointAuthenticationSystemAssignedManagedIdentity{ + Audience: auth.SystemAssignedManagedIdentitySettings.Audience, + } + } + + if auth.UserAssignedManagedIdentitySettings != nil { + result.UserAssignedManagedIdentitySettings = &dataflowendpoint.DataflowEndpointAuthenticationUserAssignedManagedIdentity{ + ClientId: auth.UserAssignedManagedIdentitySettings.ClientId, + TenantId: auth.UserAssignedManagedIdentitySettings.TenantId, + Scope: auth.UserAssignedManagedIdentitySettings.Scope, + } + } + + return result +} + +func expandDataflowEndpointDataLakeStorage(dataLakeStorage DataflowEndpointDataLakeStorageModel) *dataflowendpoint.DataflowEndpointDataLakeStorage { + result := &dataflowendpoint.DataflowEndpointDataLakeStorage{ + Host: dataLakeStorage.Host, + Authentication: expandDataflowEndpointDataLakeStorageAuthentication(dataLakeStorage.Authentication), + } + + if dataLakeStorage.Batching != nil { + result.Batching = &dataflowendpoint.BatchingConfiguration{ + LatencySeconds: dataLakeStorage.Batching.LatencySeconds, + MaxMessages: dataLakeStorage.Batching.MaxMessages, + } + } + + return result +} + +func expandDataflowEndpointDataLakeStorageAuthentication(auth DataflowEndpointDataLakeStorageAuthenticationModel) dataflowendpoint.DataflowEndpointDataLakeStorageAuthentication { + result := dataflowendpoint.DataflowEndpointDataLakeStorageAuthentication{ + Method: dataflowendpoint.DataLakeStorageAuthMethod(auth.Method), + } + + if auth.AccessTokenSettings != nil { + result.AccessTokenSettings = &dataflowendpoint.DataflowEndpointAuthenticationAccessToken{ + SecretRef: auth.AccessTokenSettings.SecretRef, + } + } + + if auth.SystemAssignedManagedIdentitySettings != nil { + result.SystemAssignedManagedIdentitySettings = &dataflowendpoint.DataflowEndpointAuthenticationSystemAssignedManagedIdentity{ + Audience: auth.SystemAssignedManagedIdentitySettings.Audience, + } + } + + if auth.UserAssignedManagedIdentitySettings != nil { + result.UserAssignedManagedIdentitySettings = &dataflowendpoint.DataflowEndpointAuthenticationUserAssignedManagedIdentity{ + ClientId: auth.UserAssignedManagedIdentitySettings.ClientId, + TenantId: auth.UserAssignedManagedIdentitySettings.TenantId, + Scope: auth.UserAssignedManagedIdentitySettings.Scope, + } + } + + return result +} + +func expandDataflowEndpointFabricOneLake(fabricOneLake DataflowEndpointFabricOneLakeModel) *dataflowendpoint.DataflowEndpointFabricOneLake { + result := &dataflowendpoint.DataflowEndpointFabricOneLake{ + Host: fabricOneLake.Host, + OneLakePathType: dataflowendpoint.DataflowEndpointFabricPathType(fabricOneLake.OneLakePathType), + Authentication: expandDataflowEndpointFabricOneLakeAuthentication(fabricOneLake.Authentication), + Names: dataflowendpoint.DataflowEndpointFabricOneLakeNames{ + LakehouseName: fabricOneLake.Names.LakehouseName, + WorkspaceName: fabricOneLake.Names.WorkspaceName, + }, + } + + if fabricOneLake.Batching != nil { + result.Batching = &dataflowendpoint.BatchingConfiguration{ + LatencySeconds: fabricOneLake.Batching.LatencySeconds, + MaxMessages: fabricOneLake.Batching.MaxMessages, + } + } + + return result +} + +func expandDataflowEndpointFabricOneLakeAuthentication(auth DataflowEndpointFabricOneLakeAuthenticationModel) dataflowendpoint.DataflowEndpointFabricOneLakeAuthentication { + result := dataflowendpoint.DataflowEndpointFabricOneLakeAuthentication{ + Method: dataflowendpoint.ManagedIdentityMethod(auth.Method), + } + + if auth.SystemAssignedManagedIdentitySettings != nil { + result.SystemAssignedManagedIdentitySettings = &dataflowendpoint.DataflowEndpointAuthenticationSystemAssignedManagedIdentity{ + Audience: auth.SystemAssignedManagedIdentitySettings.Audience, + } + } + + if auth.UserAssignedManagedIdentitySettings != nil { + result.UserAssignedManagedIdentitySettings = &dataflowendpoint.DataflowEndpointAuthenticationUserAssignedManagedIdentity{ + ClientId: auth.UserAssignedManagedIdentitySettings.ClientId, + TenantId: auth.UserAssignedManagedIdentitySettings.TenantId, + Scope: auth.UserAssignedManagedIdentitySettings.Scope, + } + } + + return result +} + +func expandDataflowEndpointLocalStorage(localStorage DataflowEndpointLocalStorageModel) *dataflowendpoint.DataflowEndpointLocalStorage { + return &dataflowendpoint.DataflowEndpointLocalStorage{ + PersistentVolumeClaimRef: localStorage.PersistentVolumeClaimRef, + } +} + +func expandDataflowEndpointMqtt(mqtt DataflowEndpointMqttModel) *dataflowendpoint.DataflowEndpointMqtt { + result := &dataflowendpoint.DataflowEndpointMqtt{ + Authentication: expandDataflowEndpointMqttAuthentication(mqtt.Authentication), + } + + if mqtt.ClientIdPrefix != nil { + result.ClientIdPrefix = mqtt.ClientIdPrefix + } + + if mqtt.CloudEventAttributes != nil { + cloudEventType := dataflowendpoint.CloudEventAttributeType(*mqtt.CloudEventAttributes) + result.CloudEventAttributes = &cloudEventType + } + + if mqtt.Host != nil { + result.Host = mqtt.Host + } + + if mqtt.KeepAliveSeconds != nil { + result.KeepAliveSeconds = mqtt.KeepAliveSeconds + } + + if mqtt.MaxInflightMessages != nil { + result.MaxInflightMessages = mqtt.MaxInflightMessages + } + + if mqtt.Protocol != nil { + protocol := dataflowendpoint.BrokerProtocolType(*mqtt.Protocol) + result.Protocol = &protocol + } + + if mqtt.Qos != nil { + result.Qos = mqtt.Qos + } + + if mqtt.Retain != nil { + retain := dataflowendpoint.MqttRetainType(*mqtt.Retain) + result.Retain = &retain + } + + if mqtt.SessionExpirySeconds != nil { + result.SessionExpirySeconds = mqtt.SessionExpirySeconds + } + + if mqtt.Tls != nil { + result.Tls = expandTlsProperties(*mqtt.Tls) + } + + return result +} + +func expandDataflowEndpointMqttAuthentication(auth DataflowEndpointMqttAuthenticationModel) dataflowendpoint.DataflowEndpointMqttAuthentication { + result := dataflowendpoint.DataflowEndpointMqttAuthentication{ + Method: dataflowendpoint.MqttAuthMethod(auth.Method), + } + + if auth.ServiceAccountTokenSettings != nil { + result.ServiceAccountTokenSettings = &dataflowendpoint.DataflowEndpointAuthenticationServiceAccountToken{ + Audience: auth.ServiceAccountTokenSettings.Audience, + } + } + + if auth.SystemAssignedManagedIdentitySettings != nil { + result.SystemAssignedManagedIdentitySettings = &dataflowendpoint.DataflowEndpointAuthenticationSystemAssignedManagedIdentity{ + Audience: auth.SystemAssignedManagedIdentitySettings.Audience, + } + } + + if auth.UserAssignedManagedIdentitySettings != nil { + result.UserAssignedManagedIdentitySettings = &dataflowendpoint.DataflowEndpointAuthenticationUserAssignedManagedIdentity{ + ClientId: auth.UserAssignedManagedIdentitySettings.ClientId, + TenantId: auth.UserAssignedManagedIdentitySettings.TenantId, + Scope: auth.UserAssignedManagedIdentitySettings.Scope, + } + } + + if auth.X509CertificateSettings != nil { + result.X509CertificateSettings = &dataflowendpoint.DataflowEndpointAuthenticationX509{ + SecretRef: auth.X509CertificateSettings.SecretRef, + } + } + + return result +} + +func flattenDataflowEndpointProperties(props *dataflowendpoint.DataflowEndpointProperties, model *DataflowEndpointModel) { + if props == nil { + return + } + + model.EndpointType = string(props.EndpointType) + + if props.DataExplorerSettings != nil { + model.DataExplorerSettings = flattenDataflowEndpointDataExplorer(*props.DataExplorerSettings) + } + + if props.DataLakeStorageSettings != nil { + model.DataLakeStorageSettings = flattenDataflowEndpointDataLakeStorage(*props.DataLakeStorageSettings) + } + + if props.FabricOneLakeSettings != nil { + model.FabricOneLakeSettings = flattenDataflowEndpointFabricOneLake(*props.FabricOneLakeSettings) + } + + if props.KafkaSettings != nil { + model.KafkaSettings = flattenDataflowEndpointKafka(*props.KafkaSettings) + } + + if props.LocalStorageSettings != nil { + model.LocalStorageSettings = flattenDataflowEndpointLocalStorage(*props.LocalStorageSettings) + } + + if props.MqttSettings != nil { + model.MqttSettings = flattenDataflowEndpointMqtt(*props.MqttSettings) + } +} + +// Flatten functions would follow similar patterns... +func flattenDataflowEndpointKafka(kafka dataflowendpoint.DataflowEndpointKafka) *DataflowEndpointKafkaModel { + result := &DataflowEndpointKafkaModel{ + Host: kafka.Host, + Authentication: flattenDataflowEndpointKafkaAuthentication(kafka.Authentication), + } + + if kafka.Batching != nil { + result.Batching = &DataflowEndpointKafkaBatchingModel{ + LatencyMs: kafka.Batching.LatencyMs, + MaxBytes: kafka.Batching.MaxBytes, + MaxMessages: kafka.Batching.MaxMessages, + } + if kafka.Batching.Mode != nil { + mode := string(*kafka.Batching.Mode) + result.Batching.Mode = &mode + } + } + + if kafka.CloudEventAttributes != nil { + cloudEvent := string(*kafka.CloudEventAttributes) + result.CloudEventAttributes = &cloudEvent + } + + if kafka.Compression != nil { + compression := string(*kafka.Compression) + result.Compression = &compression + } + + if kafka.ConsumerGroupId != nil { + result.ConsumerGroupId = kafka.ConsumerGroupId + } + + if kafka.CopyMqttProperties != nil { + copyMqtt := string(*kafka.CopyMqttProperties) + result.CopyMqttProperties = ©Mqtt + } + + if kafka.KafkaAcks != nil { + acks := string(*kafka.KafkaAcks) + result.KafkaAcks = &acks + } + + if kafka.PartitionStrategy != nil { + strategy := string(*kafka.PartitionStrategy) + result.PartitionStrategy = &strategy + } + + if kafka.Tls != nil { + result.Tls = flattenTlsProperties(*kafka.Tls) + } + + return result +} + +func flattenDataflowEndpointKafkaAuthentication(auth dataflowendpoint.DataflowEndpointKafkaAuthentication) DataflowEndpointKafkaAuthenticationModel { + result := DataflowEndpointKafkaAuthenticationModel{ + Method: string(auth.Method), + } + + if auth.SaslSettings != nil { + result.SaslSettings = &DataflowEndpointAuthenticationSaslModel{ + SaslType: string(auth.SaslSettings.SaslType), + SecretRef: auth.SaslSettings.SecretRef, + } + } + + if auth.SystemAssignedManagedIdentitySettings != nil { + result.SystemAssignedManagedIdentitySettings = &DataflowEndpointAuthenticationSystemAssignedManagedIdentity{ + Audience: auth.SystemAssignedManagedIdentitySettings.Audience, + } + } + + if auth.UserAssignedManagedIdentitySettings != nil { + result.UserAssignedManagedIdentitySettings = &DataflowEndpointAuthenticationUserAssignedManagedIdentity{ + ClientId: auth.UserAssignedManagedIdentitySettings.ClientId, + TenantId: auth.UserAssignedManagedIdentitySettings.TenantId, + Scope: auth.UserAssignedManagedIdentitySettings.Scope, + } + } + + if auth.X509CertificateSettings != nil { + result.X509CertificateSettings = &DataflowEndpointAuthenticationX509Model{ + SecretRef: auth.X509CertificateSettings.SecretRef, + } + } + + return result +} + +func flattenTlsProperties(tls dataflowendpoint.TlsProperties) *TlsPropertiesModel { + result := &TlsPropertiesModel{} + + if tls.Mode != nil { + mode := string(*tls.Mode) + result.Mode = &mode + } + + if tls.TrustedCaCertificateConfigMapRef != nil { + result.TrustedCaCertificateConfigMapRef = tls.TrustedCaCertificateConfigMapRef + } + + return result +} + +// Additional flatten functions for other endpoint types would follow... +func flattenDataflowEndpointDataExplorer(dataExplorer dataflowendpoint.DataflowEndpointDataExplorer) *DataflowEndpointDataExplorerModel { + result := &DataflowEndpointDataExplorerModel{ + Database: dataExplorer.Database, + Host: dataExplorer.Host, + Authentication: flattenDataflowEndpointDataExplorerAuthentication(dataExplorer.Authentication), + } + + if dataExplorer.Batching != nil { + result.Batching = &BatchingConfigurationModel{ + LatencySeconds: dataExplorer.Batching.LatencySeconds, + MaxMessages: dataExplorer.Batching.MaxMessages, + } + } + + return result +} + +func flattenDataflowEndpointDataExplorerAuthentication(auth dataflowendpoint.DataflowEndpointDataExplorerAuthentication) DataflowEndpointDataExplorerAuthenticationModel { + result := DataflowEndpointDataExplorerAuthenticationModel{ + Method: string(auth.Method), + } + + if auth.SystemAssignedManagedIdentitySettings != nil { + result.SystemAssignedManagedIdentitySettings = &DataflowEndpointAuthenticationSystemAssignedManagedIdentity{ + Audience: auth.SystemAssignedManagedIdentitySettings.Audience, + } + } + + if auth.UserAssignedManagedIdentitySettings != nil { + result.UserAssignedManagedIdentitySettings = &DataflowEndpointAuthenticationUserAssignedManagedIdentity{ + ClientId: auth.UserAssignedManagedIdentitySettings.ClientId, + TenantId: auth.UserAssignedManagedIdentitySettings.TenantId, + Scope: auth.UserAssignedManagedIdentitySettings.Scope, + } + } + + return result +} + +func flattenDataflowEndpointDataLakeStorage(dataLakeStorage dataflowendpoint.DataflowEndpointDataLakeStorage) *DataflowEndpointDataLakeStorageModel { + result := &DataflowEndpointDataLakeStorageModel{ + Host: dataLakeStorage.Host, + Authentication: flattenDataflowEndpointDataLakeStorageAuthentication(dataLakeStorage.Authentication), + } + + if dataLakeStorage.Batching != nil { + result.Batching = &BatchingConfigurationModel{ + LatencySeconds: dataLakeStorage.Batching.LatencySeconds, + MaxMessages: dataLakeStorage.Batching.MaxMessages, + } + } + + return result +} + +func flattenDataflowEndpointDataLakeStorageAuthentication(auth dataflowendpoint.DataflowEndpointDataLakeStorageAuthentication) DataflowEndpointDataLakeStorageAuthenticationModel { + result := DataflowEndpointDataLakeStorageAuthenticationModel{ // Add "Model" suffix + Method: string(auth.Method), + } + + if auth.AccessTokenSettings != nil { + result.AccessTokenSettings = &DataflowEndpointAuthenticationAccessTokenModel{ + SecretRef: auth.AccessTokenSettings.SecretRef, + } + } + + if auth.SystemAssignedManagedIdentitySettings != nil { + result.SystemAssignedManagedIdentitySettings = &DataflowEndpointAuthenticationSystemAssignedManagedIdentity{ + Audience: auth.SystemAssignedManagedIdentitySettings.Audience, + } + } + + if auth.UserAssignedManagedIdentitySettings != nil { + result.UserAssignedManagedIdentitySettings = &DataflowEndpointAuthenticationUserAssignedManagedIdentity{ + ClientId: auth.UserAssignedManagedIdentitySettings.ClientId, + TenantId: auth.UserAssignedManagedIdentitySettings.TenantId, + Scope: auth.UserAssignedManagedIdentitySettings.Scope, + } + } + + return result +} + +func flattenDataflowEndpointFabricOneLake(fabricOneLake dataflowendpoint.DataflowEndpointFabricOneLake) *DataflowEndpointFabricOneLakeModel { + result := &DataflowEndpointFabricOneLakeModel{ + Host: fabricOneLake.Host, + OneLakePathType: string(fabricOneLake.OneLakePathType), + Authentication: flattenDataflowEndpointFabricOneLakeAuthentication(fabricOneLake.Authentication), + Names: DataflowEndpointFabricOneLakeNamesModel{ + LakehouseName: fabricOneLake.Names.LakehouseName, + WorkspaceName: fabricOneLake.Names.WorkspaceName, + }, + } + + if fabricOneLake.Batching != nil { + result.Batching = &BatchingConfigurationModel{ + LatencySeconds: fabricOneLake.Batching.LatencySeconds, + MaxMessages: fabricOneLake.Batching.MaxMessages, + } + } + + return result +} + +func flattenDataflowEndpointFabricOneLakeAuthentication(auth dataflowendpoint.DataflowEndpointFabricOneLakeAuthentication) DataflowEndpointFabricOneLakeAuthenticationModel { + result := DataflowEndpointFabricOneLakeAuthenticationModel{ + Method: string(auth.Method), + } + + if auth.SystemAssignedManagedIdentitySettings != nil { + result.SystemAssignedManagedIdentitySettings = &DataflowEndpointAuthenticationSystemAssignedManagedIdentity{ + Audience: auth.SystemAssignedManagedIdentitySettings.Audience, + } + } + + if auth.UserAssignedManagedIdentitySettings != nil { + result.UserAssignedManagedIdentitySettings = &DataflowEndpointAuthenticationUserAssignedManagedIdentity{ + ClientId: auth.UserAssignedManagedIdentitySettings.ClientId, + TenantId: auth.UserAssignedManagedIdentitySettings.TenantId, + Scope: auth.UserAssignedManagedIdentitySettings.Scope, + } + } + + return result +} + +func flattenDataflowEndpointLocalStorage(localStorage dataflowendpoint.DataflowEndpointLocalStorage) *DataflowEndpointLocalStorageModel { + return &DataflowEndpointLocalStorageModel{ + PersistentVolumeClaimRef: localStorage.PersistentVolumeClaimRef, + } +} + +func flattenDataflowEndpointMqtt(mqtt dataflowendpoint.DataflowEndpointMqtt) *DataflowEndpointMqttModel { + result := &DataflowEndpointMqttModel{ + Authentication: flattenDataflowEndpointMqttAuthentication(mqtt.Authentication), + } + + if mqtt.ClientIdPrefix != nil { + result.ClientIdPrefix = mqtt.ClientIdPrefix + } + + if mqtt.CloudEventAttributes != nil { + cloudEvent := string(*mqtt.CloudEventAttributes) + result.CloudEventAttributes = &cloudEvent + } + + if mqtt.Host != nil { + result.Host = mqtt.Host + } + + if mqtt.KeepAliveSeconds != nil { + result.KeepAliveSeconds = mqtt.KeepAliveSeconds + } + + if mqtt.MaxInflightMessages != nil { + result.MaxInflightMessages = mqtt.MaxInflightMessages + } + + if mqtt.Protocol != nil { + protocol := string(*mqtt.Protocol) + result.Protocol = &protocol + } + + if mqtt.Qos != nil { + result.Qos = mqtt.Qos + } + + if mqtt.Retain != nil { + retain := string(*mqtt.Retain) + result.Retain = &retain + } + + if mqtt.SessionExpirySeconds != nil { + result.SessionExpirySeconds = mqtt.SessionExpirySeconds + } + + if mqtt.Tls != nil { + result.Tls = flattenTlsProperties(*mqtt.Tls) + } + + return result +} + +func flattenDataflowEndpointMqttAuthentication(auth dataflowendpoint.DataflowEndpointMqttAuthentication) DataflowEndpointMqttAuthenticationModel { + result := DataflowEndpointMqttAuthenticationModel{ + Method: string(auth.Method), + } + + if auth.ServiceAccountTokenSettings != nil { + result.ServiceAccountTokenSettings = &DataflowEndpointAuthenticationServiceAccountTokenModel{ + Audience: auth.ServiceAccountTokenSettings.Audience, + } + } + + if auth.SystemAssignedManagedIdentitySettings != nil { + result.SystemAssignedManagedIdentitySettings = &DataflowEndpointAuthenticationSystemAssignedManagedIdentity{ + Audience: auth.SystemAssignedManagedIdentitySettings.Audience, + } + } + + if auth.UserAssignedManagedIdentitySettings != nil { + result.UserAssignedManagedIdentitySettings = &DataflowEndpointAuthenticationUserAssignedManagedIdentity{ + ClientId: auth.UserAssignedManagedIdentitySettings.ClientId, + TenantId: auth.UserAssignedManagedIdentitySettings.TenantId, + Scope: auth.UserAssignedManagedIdentitySettings.Scope, + } + } + + if auth.X509CertificateSettings != nil { + result.X509CertificateSettings = &DataflowEndpointAuthenticationX509Model{ + SecretRef: auth.X509CertificateSettings.SecretRef, + } + } + + return result +} diff --git a/internal/services/iotoperations/iotoperations_dataflowendpoint_resource_test.go b/internal/services/iotoperations/iotoperations_dataflowendpoint_resource_test.go new file mode 100644 index 000000000000..b0fd49c44277 --- /dev/null +++ b/internal/services/iotoperations/iotoperations_dataflowendpoint_resource_test.go @@ -0,0 +1,245 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package iotoperations_test + +import ( + "context" + "fmt" + "testing" + + "github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint" + "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" + "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" + "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" + "github.com/hashicorp/terraform-provider-azurerm/utils" +) + +// IotOperationsDataflowEndpointResource is a test harness for azurerm_iotoperations_dataflow_endpoint acceptance tests. +type IotOperationsDataflowEndpointResource struct{} + +func TestAccIotOperationsDataflowEndpoint_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_iotoperations_dataflow_endpoint", "test") + r := IotOperationsDataflowEndpointResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("name").HasValue(fmt.Sprintf("acctest-de-%s", data.RandomString)), + check.That(data.ResourceName).Key("properties.0.endpoint_type").HasValue("DataExplorer"), + ), + }, + data.ImportStep(), + }) +} + +func TestAccIotOperationsDataflowEndpoint_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_iotoperations_dataflow_endpoint", "test") + r := IotOperationsDataflowEndpointResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.RequiresImportErrorStep(r.requiresImport), + }) +} + +func TestAccIotOperationsDataflowEndpoint_complete(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_iotoperations_dataflow_endpoint", "test") + r := IotOperationsDataflowEndpointResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("name").HasValue(fmt.Sprintf("acctest-de-%s", data.RandomString)), + check.That(data.ResourceName).Key("properties.0.endpoint_type").HasValue("DataLakeStorage"), + check.That(data.ResourceName).Key("tags.%").HasValue("2"), + ), + }, + data.ImportStep(), + }) +} + +func TestAccIotOperationsDataflowEndpoint_update(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_iotoperations_dataflow_endpoint", "test") + r := IotOperationsDataflowEndpointResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("properties.0.endpoint_type").HasValue("DataExplorer"), + ), + }, + data.ImportStep(), + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("properties.0.endpoint_type").HasValue("DataLakeStorage"), + check.That(data.ResourceName).Key("tags.%").HasValue("2"), + ), + }, + data.ImportStep(), + }) +} + +func (r IotOperationsDataflowEndpointResource) Exists(ctx context.Context, c *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := dataflowendpoint.ParseDataflowEndpointID(state.ID) + if err != nil { + return nil, fmt.Errorf("parsing ID %q: %w", state.ID, err) + } + + resp, err := c.IoTOperations.DataflowEndpointClient.Get(ctx, *id) + if err != nil { + return nil, fmt.Errorf("retrieving %s: %+v", id.ID(), err) + } + + return utils.Bool(resp.Model != nil), nil +} + +func (r IotOperationsDataflowEndpointResource) template(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-iot-%d" + location = "%s" +} + +resource "azurerm_iotoperations_instance" "test" { + name = "acctest-instance-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + + extended_location { + name = "acctest-custom-location-%s" + type = "CustomLocation" + } +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomString) +} + +func (r IotOperationsDataflowEndpointResource) basic(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_iotoperations_dataflow_endpoint" "test" { + name = "acctest-de-%s" + resource_group_name = azurerm_resource_group.test.name + instance_name = azurerm_iotoperations_instance.test.name + location = azurerm_resource_group.test.location + + properties { + endpoint_type = "DataExplorer" + data_explorer_settings { + authentication { + method = "SystemAssignedManagedIdentity" + system_assigned_managed_identity_settings { + audience = "https://help.kusto.windows.net" + } + } + database = "testdb-%s" + host = "testcluster-%s.region.kusto.windows.net" + batching { + latency_seconds = 5 + max_messages = 100 + } + } + } + + extended_location { + name = azurerm_iotoperations_instance.test.extended_location[0].name + type = azurerm_iotoperations_instance.test.extended_location[0].type + } +} +`, r.template(data), data.RandomString, data.RandomString, data.RandomString) +} + +func (r IotOperationsDataflowEndpointResource) requiresImport(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_iotoperations_dataflow_endpoint" "import" { + name = azurerm_iotoperations_dataflow_endpoint.test.name + resource_group_name = azurerm_iotoperations_dataflow_endpoint.test.resource_group_name + instance_name = azurerm_iotoperations_dataflow_endpoint.test.instance_name + location = azurerm_iotoperations_dataflow_endpoint.test.location + + properties { + endpoint_type = azurerm_iotoperations_dataflow_endpoint.test.properties[0].endpoint_type + data_explorer_settings { + authentication { + method = azurerm_iotoperations_dataflow_endpoint.test.properties[0].data_explorer_settings[0].authentication[0].method + system_assigned_managed_identity_settings { + audience = azurerm_iotoperations_dataflow_endpoint.test.properties[0].data_explorer_settings[0].authentication[0].system_assigned_managed_identity_settings[0].audience + } + } + database = azurerm_iotoperations_dataflow_endpoint.test.properties[0].data_explorer_settings[0].database + host = azurerm_iotoperations_dataflow_endpoint.test.properties[0].data_explorer_settings[0].host + batching { + latency_seconds = azurerm_iotoperations_dataflow_endpoint.test.properties[0].data_explorer_settings[0].batching[0].latency_seconds + max_messages = azurerm_iotoperations_dataflow_endpoint.test.properties[0].data_explorer_settings[0].batching[0].max_messages + } + } + } + + extended_location { + name = azurerm_iotoperations_dataflow_endpoint.test.extended_location[0].name + type = azurerm_iotoperations_dataflow_endpoint.test.extended_location[0].type + } +} +`, r.basic(data)) +} + +func (r IotOperationsDataflowEndpointResource) complete(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_iotoperations_dataflow_endpoint" "test" { + name = "acctest-de-%s" + resource_group_name = azurerm_resource_group.test.name + instance_name = azurerm_iotoperations_instance.test.name + location = azurerm_resource_group.test.location + + properties { + endpoint_type = "DataLakeStorage" + data_lake_storage_settings { + authentication { + method = "SystemAssignedManagedIdentity" + system_assigned_managed_identity_settings { + audience = "https://storage.azure.com/" + } + } + host = "testaccount-%s.dfs.core.windows.net" + batching { + latency_seconds = 10 + max_messages = 1000 + } + } + } + + extended_location { + name = azurerm_iotoperations_instance.test.extended_location[0].name + type = azurerm_iotoperations_instance.test.extended_location[0].type + } + + tags = { + environment = "testing" + purpose = "dataflow-endpoint-acceptance-test" + } +} +`, r.template(data), data.RandomString, data.RandomString) +} diff --git a/internal/services/iotoperations/iotoperations_dataflowprofile_resource.go b/internal/services/iotoperations/iotoperations_dataflowprofile_resource.go new file mode 100644 index 000000000000..698a2bf05f81 --- /dev/null +++ b/internal/services/iotoperations/iotoperations_dataflowprofile_resource.go @@ -0,0 +1,354 @@ +package iotoperations + +import ( + "context" + "fmt" + "regexp" + "time" + + "github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-azurerm/internal/sdk" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" +) + +type DataflowProfileResource struct{} + +var _ sdk.ResourceWithUpdate = DataflowProfileResource{} + +type DataflowProfileModel struct { + Name string `tfschema:"name"` + ResourceGroupName string `tfschema:"resource_group_name"` + InstanceName string `tfschema:"instance_name"` + InstanceCount *int64 `tfschema:"instance_count"` + Diagnostics *DataflowProfileDiagnosticsModel `tfschema:"diagnostics"` + ExtendedLocationName *string `tfschema:"extended_location_name"` + ExtendedLocationType *string `tfschema:"extended_location_type"` + ProvisioningState *string `tfschema:"provisioning_state"` +} + +type DataflowProfileDiagnosticsModel struct { + Logs *DataflowProfileDiagnosticsLogsModel `tfschema:"logs"` + Metrics *DataflowProfileDiagnosticsMetricsModel `tfschema:"metrics"` +} + +type DataflowProfileDiagnosticsLogsModel struct { + Level *string `tfschema:"level"` +} + +type DataflowProfileDiagnosticsMetricsModel struct { + PrometheusPort *int64 `tfschema:"prometheus_port"` +} + +func (r DataflowProfileResource) ModelObject() interface{} { + return &DataflowProfileModel{} +} + +func (r DataflowProfileResource) ResourceType() string { + return "azurerm_iotoperations_dataflow_profile" +} + +func (r DataflowProfileResource) IDValidationFunc() pluginsdk.SchemaValidateFunc { + return dataflowprofile.ValidateDataflowProfileID +} + +func (r DataflowProfileResource) Arguments() map[string]*pluginsdk.Schema { + return map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(3, 63), + validation.StringMatch(regexp.MustCompile("^[a-z0-9][a-z0-9-]*[a-z0-9]$"), "must match ^[a-z0-9][a-z0-9-]*[a-z0-9]$"), + ), + }, + "resource_group_name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(1, 90), + }, + "instance_name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(3, 63), + validation.StringMatch(regexp.MustCompile("^[a-z0-9][a-z0-9-]*[a-z0-9]$"), "must match ^[a-z0-9][a-z0-9-]*[a-z0-9]$"), + ), + }, + "extended_location": { + Type: pluginsdk.TypeList, + Required: true, + ForceNew: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + "type": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + "CustomLocation", + }, false), + }, + }, + }, + }, + "instance_count": { + Type: pluginsdk.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 1000), + }, + "diagnostics": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "logs": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "level": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + "trace", + "debug", + "info", + "warn", + "error", + }, false), + }, + }, + }, + }, + "metrics": { + Type: pluginsdk.TypeList, + Optional: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "prometheus_port": { + Type: pluginsdk.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 65535), + }, + }, + }, + }, + }, + }, + }, + } +} + +func (r DataflowProfileResource) Attributes() map[string]*pluginsdk.Schema { + return map[string]*pluginsdk.Schema{ + "provisioning_state": { + Type: pluginsdk.TypeString, + Computed: true, + }, + } +} + +func (r DataflowProfileResource) Create() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 30 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + client := metadata.Client.IoTOperations.DataflowProfileClient + + var model DataflowProfileModel + if err := metadata.Decode(&model); err != nil { + return fmt.Errorf("decoding: %+v", err) + } + + subscriptionId := metadata.Client.Account.SubscriptionId + id := dataflowprofile.NewDataflowProfileID(subscriptionId, model.ResourceGroupName, model.InstanceName, model.Name) + + // Build payload + payload := dataflowprofile.DataflowProfileResource{ + ExtendedLocation: dataflowprofile.ExtendedLocation{ + Name: *model.ExtendedLocationName, + Type: dataflowprofile.ExtendedLocationType(*model.ExtendedLocationType), + }, + Properties: expandDataflowProfileProperties(model), + } + + if err := client.CreateOrUpdateThenPoll(ctx, id, payload); err != nil { + return fmt.Errorf("creating %s: %+v", id, err) + } + + metadata.SetID(id) + return nil + }, + } +} + +func (r DataflowProfileResource) Read() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 5 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + client := metadata.Client.IoTOperations.DataflowProfileClient + + id, err := dataflowprofile.ParseDataflowProfileID(metadata.ResourceData.Id()) + if err != nil { + return err + } + + resp, err := client.Get(ctx, *id) + if err != nil { + return fmt.Errorf("reading %s: %+v", *id, err) + } + + model := DataflowProfileModel{ + Name: id.DataflowProfileName, + ResourceGroupName: id.ResourceGroupName, + InstanceName: id.InstanceName, + } + + if respModel := resp.Model; respModel != nil { + model.ExtendedLocationName = &respModel.ExtendedLocation.Name + extendedLocationType := string(respModel.ExtendedLocation.Type) + model.ExtendedLocationType = &extendedLocationType + + if respModel.Properties != nil { + flattenDataflowProfileProperties(respModel.Properties, &model) + + if respModel.Properties.ProvisioningState != nil { + provisioningState := string(*respModel.Properties.ProvisioningState) + model.ProvisioningState = &provisioningState + } + } + } + + return metadata.Encode(&model) + }, + } +} + +func (r DataflowProfileResource) Update() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 30 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + client := metadata.Client.IoTOperations.DataflowProfileClient + + id, err := dataflowprofile.ParseDataflowProfileID(metadata.ResourceData.Id()) + if err != nil { + return err + } + + var model DataflowProfileModel + if err := metadata.Decode(&model); err != nil { + return fmt.Errorf("decoding: %+v", err) + } + + // For dataflow profile, we use CreateOrUpdate for updates since there's no dedicated Update method + payload := dataflowprofile.DataflowProfileResource{ + ExtendedLocation: dataflowprofile.ExtendedLocation{ + Name: *model.ExtendedLocationName, + Type: dataflowprofile.ExtendedLocationType(*model.ExtendedLocationType), + }, + Properties: expandDataflowProfileProperties(model), + } + + if err := client.CreateOrUpdateThenPoll(ctx, *id, payload); err != nil { + return fmt.Errorf("updating %s: %+v", *id, err) + } + + return nil + }, + } +} + +func (r DataflowProfileResource) Delete() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 30 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + client := metadata.Client.IoTOperations.DataflowProfileClient + + id, err := dataflowprofile.ParseDataflowProfileID(metadata.ResourceData.Id()) + if err != nil { + return err + } + + if err := client.DeleteThenPoll(ctx, *id); err != nil { + return fmt.Errorf("deleting %s: %+v", *id, err) + } + + return nil + }, + } +} + +func expandDataflowProfileProperties(model DataflowProfileModel) *dataflowprofile.DataflowProfileProperties { + props := &dataflowprofile.DataflowProfileProperties{} + + if model.InstanceCount != nil { + props.InstanceCount = model.InstanceCount + } + + if model.Diagnostics != nil { + props.Diagnostics = expandDataflowProfileDiagnostics(*model.Diagnostics) + } + + return props +} + +func expandDataflowProfileDiagnostics(model DataflowProfileDiagnosticsModel) *dataflowprofile.ProfileDiagnostics { + result := &dataflowprofile.ProfileDiagnostics{} + + if model.Logs != nil { + result.Logs = &dataflowprofile.DiagnosticsLogs{ + Level: model.Logs.Level, + } + } + + if model.Metrics != nil { + result.Metrics = &dataflowprofile.Metrics{ + PrometheusPort: model.Metrics.PrometheusPort, + } + } + + return result +} + +func flattenDataflowProfileProperties(props *dataflowprofile.DataflowProfileProperties, model *DataflowProfileModel) { + if props == nil { + return + } + + if props.InstanceCount != nil { + model.InstanceCount = props.InstanceCount + } + + if props.Diagnostics != nil { + model.Diagnostics = flattenDataflowProfileDiagnostics(*props.Diagnostics) + } +} + +func flattenDataflowProfileDiagnostics(diagnostics dataflowprofile.ProfileDiagnostics) *DataflowProfileDiagnosticsModel { + result := &DataflowProfileDiagnosticsModel{} + + if diagnostics.Logs != nil { + result.Logs = &DataflowProfileDiagnosticsLogsModel{ + Level: diagnostics.Logs.Level, + } + } + + if diagnostics.Metrics != nil { + result.Metrics = &DataflowProfileDiagnosticsMetricsModel{ + PrometheusPort: diagnostics.Metrics.PrometheusPort, + } + } + + return result +} diff --git a/internal/services/iotoperations/iotoperations_dataflowprofile_resource_test.go b/internal/services/iotoperations/iotoperations_dataflowprofile_resource_test.go new file mode 100644 index 000000000000..c1dccdd887eb --- /dev/null +++ b/internal/services/iotoperations/iotoperations_dataflowprofile_resource_test.go @@ -0,0 +1,225 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package iotoperations_test + +import ( + "context" + "fmt" + "testing" + + "github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile" + "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" + "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" + "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" + "github.com/hashicorp/terraform-provider-azurerm/utils" +) + +// IotOperationsDataflowProfileResource is a test harness for azurerm_iotoperations_dataflow_profile acceptance tests. +type IotOperationsDataflowProfileResource struct{} + +func TestAccIotOperationsDataflowProfile_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_iotoperations_dataflow_profile", "test") + r := IotOperationsDataflowProfileResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("name").HasValue(fmt.Sprintf("acctest-dfp-%s", data.RandomString)), + check.That(data.ResourceName).Key("properties.0.instance_count").HasValue("1"), + ), + }, + data.ImportStep(), + }) +} + +func TestAccIotOperationsDataflowProfile_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_iotoperations_dataflow_profile", "test") + r := IotOperationsDataflowProfileResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.RequiresImportErrorStep(r.requiresImport), + }) +} + +func TestAccIotOperationsDataflowProfile_complete(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_iotoperations_dataflow_profile", "test") + r := IotOperationsDataflowProfileResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("name").HasValue(fmt.Sprintf("acctest-dfp-%s", data.RandomString)), + check.That(data.ResourceName).Key("properties.0.instance_count").HasValue("3"), + check.That(data.ResourceName).Key("properties.0.diagnostics.0.logs.0.level").HasValue("Info"), + check.That(data.ResourceName).Key("properties.0.diagnostics.0.metrics.0.prometheus_port").HasValue("9090"), + check.That(data.ResourceName).Key("tags.%").HasValue("2"), + ), + }, + data.ImportStep(), + }) +} + +func TestAccIotOperationsDataflowProfile_update(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_iotoperations_dataflow_profile", "test") + r := IotOperationsDataflowProfileResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("properties.0.instance_count").HasValue("1"), + ), + }, + data.ImportStep(), + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("properties.0.instance_count").HasValue("3"), + check.That(data.ResourceName).Key("properties.0.diagnostics.0.logs.0.level").HasValue("Info"), + check.That(data.ResourceName).Key("tags.%").HasValue("2"), + ), + }, + data.ImportStep(), + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("properties.0.instance_count").HasValue("1"), + ), + }, + data.ImportStep(), + }) +} + +// Exists implements the acceptance existence check using the generated SDK ID parser. +func (IotOperationsDataflowProfileResource) Exists(ctx context.Context, c *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := dataflowprofile.ParseDataflowProfileID(state.ID) + if err != nil { + return nil, fmt.Errorf("parsing ID %q: %w", state.ID, err) + } + + resp, err := c.IoTOperations.DataflowProfileClient.Get(ctx, *id) + if err != nil { + return nil, fmt.Errorf("retrieving %s: %+v", id.ID(), err) + } + + return utils.Bool(resp.Model != nil), nil +} + +func (r IotOperationsDataflowProfileResource) template(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-iotops-%d" + location = "%s" +} + +resource "azurerm_iotoperations_instance" "test" { + name = "acctest-instance-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + + extended_location { + name = "acctest-custom-location-%s" + type = "CustomLocation" + } +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomString) +} + +func (r IotOperationsDataflowProfileResource) basic(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_iotoperations_dataflow_profile" "test" { + name = "acctest-dfp-%s" + resource_group_name = azurerm_resource_group.test.name + instance_name = azurerm_iotoperations_instance.test.name + location = azurerm_resource_group.test.location + + properties { + instance_count = 1 + } + + extended_location { + name = azurerm_iotoperations_instance.test.extended_location[0].name + type = azurerm_iotoperations_instance.test.extended_location[0].type + } +} +`, r.template(data), data.RandomString) +} + +func (r IotOperationsDataflowProfileResource) requiresImport(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_iotoperations_dataflow_profile" "import" { + name = azurerm_iotoperations_dataflow_profile.test.name + resource_group_name = azurerm_iotoperations_dataflow_profile.test.resource_group_name + instance_name = azurerm_iotoperations_dataflow_profile.test.instance_name + location = azurerm_iotoperations_dataflow_profile.test.location + + properties { + instance_count = azurerm_iotoperations_dataflow_profile.test.properties[0].instance_count + } + + extended_location { + name = azurerm_iotoperations_dataflow_profile.test.extended_location[0].name + type = azurerm_iotoperations_dataflow_profile.test.extended_location[0].type + } +} +`, r.basic(data)) +} + +func (r IotOperationsDataflowProfileResource) complete(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_iotoperations_dataflow_profile" "test" { + name = "acctest-dfp-%s" + resource_group_name = azurerm_resource_group.test.name + instance_name = azurerm_iotoperations_instance.test.name + location = azurerm_resource_group.test.location + + properties { + instance_count = 3 + + diagnostics { + logs { + level = "Info" + } + metrics { + prometheus_port = 9090 + } + } + } + + extended_location { + name = azurerm_iotoperations_instance.test.extended_location[0].name + type = azurerm_iotoperations_instance.test.extended_location[0].type + } + + tags = { + environment = "testing" + purpose = "dataflow-profile-acceptance-test" + } +} +`, r.template(data), data.RandomString) +} diff --git a/internal/services/iotoperations/iotoperations_instance_resource.go b/internal/services/iotoperations/iotoperations_instance_resource.go new file mode 100644 index 000000000000..20623d0eb55d --- /dev/null +++ b/internal/services/iotoperations/iotoperations_instance_resource.go @@ -0,0 +1,282 @@ +package iotoperations + +import ( + "context" + "fmt" + "regexp" + "time" + + "github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-azurerm/internal/sdk" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" +) + +type InstanceResource struct{} + +var _ sdk.ResourceWithUpdate = InstanceResource{} + +type InstanceModel struct { + Name string `tfschema:"name"` + ResourceGroupName string `tfschema:"resource_group_name"` + Location string `tfschema:"location"` + Description *string `tfschema:"description"` + Version *string `tfschema:"version"` + ProvisioningState *string `tfschema:"provisioning_state"` + ExtendedLocationName *string `tfschema:"extended_location_name"` + ExtendedLocationType *string `tfschema:"extended_location_type"` + SchemaRegistryRef string `tfschema:"schema_registry_ref"` + Tags map[string]string `tfschema:"tags"` +} + +func (r InstanceResource) ModelObject() interface{} { + return &InstanceModel{} +} + +func (r InstanceResource) ResourceType() string { + return "azurerm_iotoperations_instance" +} + +func (r InstanceResource) IDValidationFunc() pluginsdk.SchemaValidateFunc { + return instance.ValidateInstanceID +} + +func (r InstanceResource) Arguments() map[string]*pluginsdk.Schema { + return map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(3, 63), + validation.StringMatch(regexp.MustCompile("^[a-z0-9][a-z0-9-]*[a-z0-9]$"), "Must match ^[a-z0-9][a-z0-9-]*[a-z0-9]$"), + ), + }, + "resource_group_name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(1, 90), + }, + "location": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + }, + "schema_registry_ref": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + }, + "description": { + Type: pluginsdk.TypeString, + Optional: true, + }, + "version": { + Type: pluginsdk.TypeString, + Optional: true, + // NOTE: O+C Azure assigns a default version if not specified, and may update it during resource lifecycle + Computed: true, + }, + "extended_location_name": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + }, + "extended_location_type": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + }, + "tags": { + Type: pluginsdk.TypeMap, + Optional: true, + Elem: &pluginsdk.Schema{Type: pluginsdk.TypeString}, + }, + } +} + +func (r InstanceResource) Attributes() map[string]*pluginsdk.Schema { + return map[string]*pluginsdk.Schema{ + "provisioning_state": { + Type: pluginsdk.TypeString, + Computed: true, + }, + } +} + +func (r InstanceResource) Create() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 30 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + client := metadata.Client.IoTOperations.InstanceClient + + var model InstanceModel + if err := metadata.Decode(&model); err != nil { + return fmt.Errorf("decoding: %+v", err) + } + + subscriptionId := metadata.Client.Account.SubscriptionId + id := instance.NewInstanceID(subscriptionId, model.ResourceGroupName, model.Name) + + // Build extended location if provided + var extendedLocation *instance.ExtendedLocation + if model.ExtendedLocationName != nil && model.ExtendedLocationType != nil { + extendedLocation = &instance.ExtendedLocation{ + Name: *model.ExtendedLocationName, + Type: instance.ExtendedLocationType(*model.ExtendedLocationType), + } + } + + // Build properties + props := &instance.InstanceProperties{ + Description: model.Description, + Version: model.Version, + SchemaRegistryRef: instance.SchemaRegistryRef{ + ResourceId: model.SchemaRegistryRef, + }, + } + + payload := instance.InstanceResource{ + Location: model.Location, + ExtendedLocation: *extendedLocation, // Required field + Properties: props, + Tags: &model.Tags, + } + + if err := client.CreateOrUpdateThenPoll(ctx, id, payload); err != nil { + return fmt.Errorf("creating %s: %+v", id, err) + } + + metadata.SetID(id) + return nil + }, + } +} + +func (r InstanceResource) Read() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 5 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + client := metadata.Client.IoTOperations.InstanceClient + + id, err := instance.ParseInstanceID(metadata.ResourceData.Id()) + if err != nil { + return err + } + + resp, err := client.Get(ctx, *id) + if err != nil { + return fmt.Errorf("reading %s: %+v", *id, err) + } + + model := InstanceModel{ + Name: id.InstanceName, + ResourceGroupName: id.ResourceGroupName, + } + + if resp.Model != nil { + model.Location = resp.Model.Location + + if resp.Model.ExtendedLocation.Name != "" { + model.ExtendedLocationName = &resp.Model.ExtendedLocation.Name + model.ExtendedLocationType = (*string)(&resp.Model.ExtendedLocation.Type) + } + + if resp.Model.Properties != nil { + model.Description = resp.Model.Properties.Description + model.Version = resp.Model.Properties.Version + model.SchemaRegistryRef = resp.Model.Properties.SchemaRegistryRef.ResourceId + + if resp.Model.Properties.ProvisioningState != nil { + provState := string(*resp.Model.Properties.ProvisioningState) + model.ProvisioningState = &provState + } + } + + if resp.Model.Tags != nil { + model.Tags = *resp.Model.Tags + } + } + + return metadata.Encode(&model) + }, + } +} + +func (r InstanceResource) Update() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 30 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + client := metadata.Client.IoTOperations.InstanceClient + + id, err := instance.ParseInstanceID(metadata.ResourceData.Id()) + if err != nil { + return err + } + + var model InstanceModel + if err := metadata.Decode(&model); err != nil { + return fmt.Errorf("decoding: %+v", err) + } + + // Check if anything actually changed + if !metadata.ResourceData.HasChange("tags") { + return nil + } + + payload := instance.InstancePatchModel{} + hasChanges := false + + // Only tags can be updated via PATCH + if metadata.ResourceData.HasChange("tags") { + payload.Tags = &model.Tags + hasChanges = true + } + + if !hasChanges { + return nil + } + + if _, err := client.Update(ctx, *id, payload); err != nil { + return fmt.Errorf("updating %s: %+v", *id, err) + } + + return nil + }, + } +} + +func (r InstanceResource) Delete() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 30 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + client := metadata.Client.IoTOperations.InstanceClient + + id, err := instance.ParseInstanceID(metadata.ResourceData.Id()) + if err != nil { + return err + } + + if err := client.DeleteThenPoll(ctx, *id); err != nil { + return fmt.Errorf("deleting %s: %+v", *id, err) + } + + return nil + }, + } +} + +// Helper to get pointer to any type +func toPtr[T any](v T) *T { + return &v +} + +// Using your existing toPtr helper (create reverse function) +func fromPtr[T any](ptr *T) T { + if ptr == nil { + var zero T + return zero + } + return *ptr +} diff --git a/internal/services/iotoperations/iotoperations_instance_resource_test.go b/internal/services/iotoperations/iotoperations_instance_resource_test.go new file mode 100644 index 000000000000..dc588733d0a6 --- /dev/null +++ b/internal/services/iotoperations/iotoperations_instance_resource_test.go @@ -0,0 +1,183 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package iotoperations_test + +import ( + "context" + "fmt" + "testing" + + "github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance" + "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" + "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" + "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" + "github.com/hashicorp/terraform-provider-azurerm/utils" +) + +// IotOperationsInstanceResource is a test harness for azurerm_iotoperations_instance acceptance tests. +type IotOperationsInstanceResource struct{} + +func TestAccIotOperationsInstance_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_iotoperations_instance", "test") + r := IotOperationsInstanceResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("name").HasValue(fmt.Sprintf("acctest-ioi-%s", data.RandomString)), + check.That(data.ResourceName).Key("schema_registry_ref").Exists(), + check.That(data.ResourceName).Key("extended_location.0.type").HasValue("CustomLocation"), + ), + }, + data.ImportStep(), + }) +} + +func TestAccIotOperationsInstance_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_iotoperations_instance", "test") + r := IotOperationsInstanceResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.RequiresImportErrorStep(r.requiresImport), + }) +} + +func TestAccIotOperationsInstance_complete(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_iotoperations_instance", "test") + r := IotOperationsInstanceResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("name").HasValue(fmt.Sprintf("acctest-ioi-%s", data.RandomString)), + check.That(data.ResourceName).Key("schema_registry_ref").Exists(), + check.That(data.ResourceName).Key("description").HasValue("This is a test IoT Operations instance for terraform acceptance test"), + check.That(data.ResourceName).Key("tags.%").HasValue("2"), + ), + }, + data.ImportStep(), + }) +} + +func TestAccIotOperationsInstance_update(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_iotoperations_instance", "test") + r := IotOperationsInstanceResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("description").HasValue("This is a test IoT Operations instance for terraform acceptance test"), + check.That(data.ResourceName).Key("tags.%").HasValue("2"), + ), + }, + data.ImportStep(), + }) +} + +func (r IotOperationsInstanceResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := instance.ParseInstanceID(state.ID) + if err != nil { + return nil, err + } + + resp, err := clients.IoTOperations.InstanceClient.Get(ctx, *id) + if err != nil { + return nil, fmt.Errorf("reading %s: %+v", *id, err) + } + + return utils.Bool(resp.Model != nil), nil +} + +func (r IotOperationsInstanceResource) template(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-iotops-%d" + location = "%s" +} +`, data.RandomInteger, data.Locations.Primary) +} + +func (r IotOperationsInstanceResource) basic(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_iotoperations_instance" "test" { + name = "acctest-ioi-%s" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + schema_registry_ref = "/subscriptions/%s/resourceGroups/${azurerm_resource_group.test.name}/providers/Microsoft.DeviceRegistry/schemaRegistries/acctest-registry-%s" + + extended_location { + name = "acctest-custom-location-%s" + type = "CustomLocation" + } +} +`, r.template(data), data.RandomString, data.Client().SubscriptionID, data.RandomString, data.RandomString) +} + +func (r IotOperationsInstanceResource) requiresImport(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_iotoperations_instance" "import" { + name = azurerm_iotoperations_instance.test.name + resource_group_name = azurerm_iotoperations_instance.test.resource_group_name + location = azurerm_iotoperations_instance.test.location + schema_registry_ref = azurerm_iotoperations_instance.test.schema_registry_ref + + extended_location { + name = azurerm_iotoperations_instance.test.extended_location[0].name + type = azurerm_iotoperations_instance.test.extended_location[0].type + } +} +`, r.basic(data)) +} + +func (r IotOperationsInstanceResource) complete(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_iotoperations_instance" "test" { + name = "acctest-ioi-%s" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + schema_registry_ref = "/subscriptions/%s/resourceGroups/${azurerm_resource_group.test.name}/providers/Microsoft.DeviceRegistry/schemaRegistries/acctest-registry-%s" + description = "This is a test IoT Operations instance for terraform acceptance test" + + extended_location { + name = "acctest-custom-location-%s" + type = "CustomLocation" + } + + tags = { + environment = "testing" + cost_center = "finance" + } +} +`, r.template(data), data.RandomString, data.Client().SubscriptionID, data.RandomString, data.RandomString) +} diff --git a/internal/services/iotoperations/registration.go b/internal/services/iotoperations/registration.go new file mode 100644 index 000000000000..54f2c9ebb2fc --- /dev/null +++ b/internal/services/iotoperations/registration.go @@ -0,0 +1,49 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package iotoperations + +import ( + "github.com/hashicorp/terraform-provider-azurerm/internal/sdk" +) + +type Registration struct{} + +// Change this to typed registration +var _ sdk.TypedServiceRegistrationWithAGitHubLabel = Registration{} + +func (r Registration) AssociatedGitHubLabel() string { + return "service/iot-operations" +} + +// Name is the name of this Service +func (r Registration) Name() string { + return "IoT Operations" +} + +// WebsiteCategories returns a list of categories which can be used for the sidebar +func (r Registration) WebsiteCategories() []string { + return []string{ + "IoT Operations", + } +} + +func (r Registration) DataSources() []sdk.DataSource { + return []sdk.DataSource{ + // Add typed data sources here when implemented + } +} + +func (r Registration) Resources() []sdk.Resource { + return []sdk.Resource{ + InstanceResource{}, + // Comment out others until they compile + BrokerResource{}, + BrokerAuthenticationResource{}, + BrokerAuthorizationResource{}, + BrokerListenerResource{}, + DataflowResource{}, + DataflowEndpointResource{}, + DataflowProfileResource{}, + } +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/README.md b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/README.md new file mode 100644 index 000000000000..4d6dbe386b38 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/README.md @@ -0,0 +1,82 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker` Documentation + +The `broker` SDK allows for interaction with Azure Resource Manager `iotoperations` (API Version `2024-11-01`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker" +``` + + +### Client Initialization + +```go +client := broker.NewBrokerClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `BrokerClient.CreateOrUpdate` + +```go +ctx := context.TODO() +id := broker.NewBrokerID("12345678-1234-9876-4563-123456789012", "example-resource-group", "instanceName", "brokerName") + +payload := broker.BrokerResource{ + // ... +} + + +if err := client.CreateOrUpdateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `BrokerClient.Delete` + +```go +ctx := context.TODO() +id := broker.NewBrokerID("12345678-1234-9876-4563-123456789012", "example-resource-group", "instanceName", "brokerName") + +if err := client.DeleteThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `BrokerClient.Get` + +```go +ctx := context.TODO() +id := broker.NewBrokerID("12345678-1234-9876-4563-123456789012", "example-resource-group", "instanceName", "brokerName") + +read, err := client.Get(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `BrokerClient.ListByResourceGroup` + +```go +ctx := context.TODO() +id := broker.NewInstanceID("12345678-1234-9876-4563-123456789012", "example-resource-group", "instanceName") + +// alternatively `client.ListByResourceGroup(ctx, id)` can be used to do batched pagination +items, err := client.ListByResourceGroupComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/client.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/client.go new file mode 100644 index 000000000000..56e896cd4324 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/client.go @@ -0,0 +1,26 @@ +package broker + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BrokerClient struct { + Client *resourcemanager.Client +} + +func NewBrokerClientWithBaseURI(sdkApi sdkEnv.Api) (*BrokerClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "broker", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating BrokerClient: %+v", err) + } + + return &BrokerClient{ + Client: client, + }, nil +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/constants.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/constants.go new file mode 100644 index 000000000000..ae64a9f4eeb8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/constants.go @@ -0,0 +1,377 @@ +package broker + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BrokerMemoryProfile string + +const ( + BrokerMemoryProfileHigh BrokerMemoryProfile = "High" + BrokerMemoryProfileLow BrokerMemoryProfile = "Low" + BrokerMemoryProfileMedium BrokerMemoryProfile = "Medium" + BrokerMemoryProfileTiny BrokerMemoryProfile = "Tiny" +) + +func PossibleValuesForBrokerMemoryProfile() []string { + return []string{ + string(BrokerMemoryProfileHigh), + string(BrokerMemoryProfileLow), + string(BrokerMemoryProfileMedium), + string(BrokerMemoryProfileTiny), + } +} + +func (s *BrokerMemoryProfile) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseBrokerMemoryProfile(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseBrokerMemoryProfile(input string) (*BrokerMemoryProfile, error) { + vals := map[string]BrokerMemoryProfile{ + "high": BrokerMemoryProfileHigh, + "low": BrokerMemoryProfileLow, + "medium": BrokerMemoryProfileMedium, + "tiny": BrokerMemoryProfileTiny, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := BrokerMemoryProfile(input) + return &out, nil +} + +type ExtendedLocationType string + +const ( + ExtendedLocationTypeCustomLocation ExtendedLocationType = "CustomLocation" +) + +func PossibleValuesForExtendedLocationType() []string { + return []string{ + string(ExtendedLocationTypeCustomLocation), + } +} + +func (s *ExtendedLocationType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseExtendedLocationType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseExtendedLocationType(input string) (*ExtendedLocationType, error) { + vals := map[string]ExtendedLocationType{ + "customlocation": ExtendedLocationTypeCustomLocation, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ExtendedLocationType(input) + return &out, nil +} + +type OperationalMode string + +const ( + OperationalModeDisabled OperationalMode = "Disabled" + OperationalModeEnabled OperationalMode = "Enabled" +) + +func PossibleValuesForOperationalMode() []string { + return []string{ + string(OperationalModeDisabled), + string(OperationalModeEnabled), + } +} + +func (s *OperationalMode) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseOperationalMode(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseOperationalMode(input string) (*OperationalMode, error) { + vals := map[string]OperationalMode{ + "disabled": OperationalModeDisabled, + "enabled": OperationalModeEnabled, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := OperationalMode(input) + return &out, nil +} + +type OperatorValues string + +const ( + OperatorValuesDoesNotExist OperatorValues = "DoesNotExist" + OperatorValuesExists OperatorValues = "Exists" + OperatorValuesIn OperatorValues = "In" + OperatorValuesNotIn OperatorValues = "NotIn" +) + +func PossibleValuesForOperatorValues() []string { + return []string{ + string(OperatorValuesDoesNotExist), + string(OperatorValuesExists), + string(OperatorValuesIn), + string(OperatorValuesNotIn), + } +} + +func (s *OperatorValues) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseOperatorValues(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseOperatorValues(input string) (*OperatorValues, error) { + vals := map[string]OperatorValues{ + "doesnotexist": OperatorValuesDoesNotExist, + "exists": OperatorValuesExists, + "in": OperatorValuesIn, + "notin": OperatorValuesNotIn, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := OperatorValues(input) + return &out, nil +} + +type PrivateKeyAlgorithm string + +const ( + PrivateKeyAlgorithmEcFiveTwoOne PrivateKeyAlgorithm = "Ec521" + PrivateKeyAlgorithmEcThreeEightFour PrivateKeyAlgorithm = "Ec384" + PrivateKeyAlgorithmEcTwoFiveSix PrivateKeyAlgorithm = "Ec256" + PrivateKeyAlgorithmEdTwoFiveFiveOneNine PrivateKeyAlgorithm = "Ed25519" + PrivateKeyAlgorithmRsaEightOneNineTwo PrivateKeyAlgorithm = "Rsa8192" + PrivateKeyAlgorithmRsaFourZeroNineSix PrivateKeyAlgorithm = "Rsa4096" + PrivateKeyAlgorithmRsaTwoZeroFourEight PrivateKeyAlgorithm = "Rsa2048" +) + +func PossibleValuesForPrivateKeyAlgorithm() []string { + return []string{ + string(PrivateKeyAlgorithmEcFiveTwoOne), + string(PrivateKeyAlgorithmEcThreeEightFour), + string(PrivateKeyAlgorithmEcTwoFiveSix), + string(PrivateKeyAlgorithmEdTwoFiveFiveOneNine), + string(PrivateKeyAlgorithmRsaEightOneNineTwo), + string(PrivateKeyAlgorithmRsaFourZeroNineSix), + string(PrivateKeyAlgorithmRsaTwoZeroFourEight), + } +} + +func (s *PrivateKeyAlgorithm) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parsePrivateKeyAlgorithm(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parsePrivateKeyAlgorithm(input string) (*PrivateKeyAlgorithm, error) { + vals := map[string]PrivateKeyAlgorithm{ + "ec521": PrivateKeyAlgorithmEcFiveTwoOne, + "ec384": PrivateKeyAlgorithmEcThreeEightFour, + "ec256": PrivateKeyAlgorithmEcTwoFiveSix, + "ed25519": PrivateKeyAlgorithmEdTwoFiveFiveOneNine, + "rsa8192": PrivateKeyAlgorithmRsaEightOneNineTwo, + "rsa4096": PrivateKeyAlgorithmRsaFourZeroNineSix, + "rsa2048": PrivateKeyAlgorithmRsaTwoZeroFourEight, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := PrivateKeyAlgorithm(input) + return &out, nil +} + +type PrivateKeyRotationPolicy string + +const ( + PrivateKeyRotationPolicyAlways PrivateKeyRotationPolicy = "Always" + PrivateKeyRotationPolicyNever PrivateKeyRotationPolicy = "Never" +) + +func PossibleValuesForPrivateKeyRotationPolicy() []string { + return []string{ + string(PrivateKeyRotationPolicyAlways), + string(PrivateKeyRotationPolicyNever), + } +} + +func (s *PrivateKeyRotationPolicy) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parsePrivateKeyRotationPolicy(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parsePrivateKeyRotationPolicy(input string) (*PrivateKeyRotationPolicy, error) { + vals := map[string]PrivateKeyRotationPolicy{ + "always": PrivateKeyRotationPolicyAlways, + "never": PrivateKeyRotationPolicyNever, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := PrivateKeyRotationPolicy(input) + return &out, nil +} + +type ProvisioningState string + +const ( + ProvisioningStateAccepted ProvisioningState = "Accepted" + ProvisioningStateCanceled ProvisioningState = "Canceled" + ProvisioningStateDeleting ProvisioningState = "Deleting" + ProvisioningStateFailed ProvisioningState = "Failed" + ProvisioningStateProvisioning ProvisioningState = "Provisioning" + ProvisioningStateSucceeded ProvisioningState = "Succeeded" + ProvisioningStateUpdating ProvisioningState = "Updating" +) + +func PossibleValuesForProvisioningState() []string { + return []string{ + string(ProvisioningStateAccepted), + string(ProvisioningStateCanceled), + string(ProvisioningStateDeleting), + string(ProvisioningStateFailed), + string(ProvisioningStateProvisioning), + string(ProvisioningStateSucceeded), + string(ProvisioningStateUpdating), + } +} + +func (s *ProvisioningState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseProvisioningState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseProvisioningState(input string) (*ProvisioningState, error) { + vals := map[string]ProvisioningState{ + "accepted": ProvisioningStateAccepted, + "canceled": ProvisioningStateCanceled, + "deleting": ProvisioningStateDeleting, + "failed": ProvisioningStateFailed, + "provisioning": ProvisioningStateProvisioning, + "succeeded": ProvisioningStateSucceeded, + "updating": ProvisioningStateUpdating, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ProvisioningState(input) + return &out, nil +} + +type SubscriberMessageDropStrategy string + +const ( + SubscriberMessageDropStrategyDropOldest SubscriberMessageDropStrategy = "DropOldest" + SubscriberMessageDropStrategyNone SubscriberMessageDropStrategy = "None" +) + +func PossibleValuesForSubscriberMessageDropStrategy() []string { + return []string{ + string(SubscriberMessageDropStrategyDropOldest), + string(SubscriberMessageDropStrategyNone), + } +} + +func (s *SubscriberMessageDropStrategy) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSubscriberMessageDropStrategy(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSubscriberMessageDropStrategy(input string) (*SubscriberMessageDropStrategy, error) { + vals := map[string]SubscriberMessageDropStrategy{ + "dropoldest": SubscriberMessageDropStrategyDropOldest, + "none": SubscriberMessageDropStrategyNone, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SubscriberMessageDropStrategy(input) + return &out, nil +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/id_broker.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/id_broker.go new file mode 100644 index 000000000000..100b0a98a40a --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/id_broker.go @@ -0,0 +1,139 @@ +package broker + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&BrokerId{}) +} + +var _ resourceids.ResourceId = &BrokerId{} + +// BrokerId is a struct representing the Resource ID for a Broker +type BrokerId struct { + SubscriptionId string + ResourceGroupName string + InstanceName string + BrokerName string +} + +// NewBrokerID returns a new BrokerId struct +func NewBrokerID(subscriptionId string, resourceGroupName string, instanceName string, brokerName string) BrokerId { + return BrokerId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + InstanceName: instanceName, + BrokerName: brokerName, + } +} + +// ParseBrokerID parses 'input' into a BrokerId +func ParseBrokerID(input string) (*BrokerId, error) { + parser := resourceids.NewParserFromResourceIdType(&BrokerId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := BrokerId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseBrokerIDInsensitively parses 'input' case-insensitively into a BrokerId +// note: this method should only be used for API response data and not user input +func ParseBrokerIDInsensitively(input string) (*BrokerId, error) { + parser := resourceids.NewParserFromResourceIdType(&BrokerId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := BrokerId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *BrokerId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.InstanceName, ok = input.Parsed["instanceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "instanceName", input) + } + + if id.BrokerName, ok = input.Parsed["brokerName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "brokerName", input) + } + + return nil +} + +// ValidateBrokerID checks that 'input' can be parsed as a Broker ID +func ValidateBrokerID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseBrokerID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Broker ID +func (id BrokerId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.IoTOperations/instances/%s/brokers/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.InstanceName, id.BrokerName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Broker ID +func (id BrokerId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftIoTOperations", "Microsoft.IoTOperations", "Microsoft.IoTOperations"), + resourceids.StaticSegment("staticInstances", "instances", "instances"), + resourceids.UserSpecifiedSegment("instanceName", "instanceName"), + resourceids.StaticSegment("staticBrokers", "brokers", "brokers"), + resourceids.UserSpecifiedSegment("brokerName", "brokerName"), + } +} + +// String returns a human-readable description of this Broker ID +func (id BrokerId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Instance Name: %q", id.InstanceName), + fmt.Sprintf("Broker Name: %q", id.BrokerName), + } + return fmt.Sprintf("Broker (%s)", strings.Join(components, "\n")) +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/id_instance.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/id_instance.go new file mode 100644 index 000000000000..567bf222c997 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/id_instance.go @@ -0,0 +1,130 @@ +package broker + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&InstanceId{}) +} + +var _ resourceids.ResourceId = &InstanceId{} + +// InstanceId is a struct representing the Resource ID for a Instance +type InstanceId struct { + SubscriptionId string + ResourceGroupName string + InstanceName string +} + +// NewInstanceID returns a new InstanceId struct +func NewInstanceID(subscriptionId string, resourceGroupName string, instanceName string) InstanceId { + return InstanceId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + InstanceName: instanceName, + } +} + +// ParseInstanceID parses 'input' into a InstanceId +func ParseInstanceID(input string) (*InstanceId, error) { + parser := resourceids.NewParserFromResourceIdType(&InstanceId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := InstanceId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseInstanceIDInsensitively parses 'input' case-insensitively into a InstanceId +// note: this method should only be used for API response data and not user input +func ParseInstanceIDInsensitively(input string) (*InstanceId, error) { + parser := resourceids.NewParserFromResourceIdType(&InstanceId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := InstanceId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *InstanceId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.InstanceName, ok = input.Parsed["instanceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "instanceName", input) + } + + return nil +} + +// ValidateInstanceID checks that 'input' can be parsed as a Instance ID +func ValidateInstanceID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseInstanceID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Instance ID +func (id InstanceId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.IoTOperations/instances/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.InstanceName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Instance ID +func (id InstanceId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftIoTOperations", "Microsoft.IoTOperations", "Microsoft.IoTOperations"), + resourceids.StaticSegment("staticInstances", "instances", "instances"), + resourceids.UserSpecifiedSegment("instanceName", "instanceName"), + } +} + +// String returns a human-readable description of this Instance ID +func (id InstanceId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Instance Name: %q", id.InstanceName), + } + return fmt.Sprintf("Instance (%s)", strings.Join(components, "\n")) +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/method_createorupdate.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/method_createorupdate.go new file mode 100644 index 000000000000..0325f8f38782 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/method_createorupdate.go @@ -0,0 +1,75 @@ +package broker + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CreateOrUpdateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *BrokerResource +} + +// CreateOrUpdate ... +func (c BrokerClient) CreateOrUpdate(ctx context.Context, id BrokerId, input BrokerResource) (result CreateOrUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPut, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// CreateOrUpdateThenPoll performs CreateOrUpdate then polls until it's completed +func (c BrokerClient) CreateOrUpdateThenPoll(ctx context.Context, id BrokerId, input BrokerResource) error { + result, err := c.CreateOrUpdate(ctx, id, input) + if err != nil { + return fmt.Errorf("performing CreateOrUpdate: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after CreateOrUpdate: %+v", err) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/method_delete.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/method_delete.go new file mode 100644 index 000000000000..25734f3163c8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/method_delete.go @@ -0,0 +1,70 @@ +package broker + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DeleteOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// Delete ... +func (c BrokerClient) Delete(ctx context.Context, id BrokerId) (result DeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusNoContent, + }, + HttpMethod: http.MethodDelete, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// DeleteThenPoll performs Delete then polls until it's completed +func (c BrokerClient) DeleteThenPoll(ctx context.Context, id BrokerId) error { + result, err := c.Delete(ctx, id) + if err != nil { + return fmt.Errorf("performing Delete: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after Delete: %+v", err) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/method_get.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/method_get.go new file mode 100644 index 000000000000..5b46b46413c5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/method_get.go @@ -0,0 +1,53 @@ +package broker + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *BrokerResource +} + +// Get ... +func (c BrokerClient) Get(ctx context.Context, id BrokerId) (result GetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model BrokerResource + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/method_listbyresourcegroup.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/method_listbyresourcegroup.go new file mode 100644 index 000000000000..dafff2724198 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/method_listbyresourcegroup.go @@ -0,0 +1,105 @@ +package broker + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListByResourceGroupOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]BrokerResource +} + +type ListByResourceGroupCompleteResult struct { + LatestHttpResponse *http.Response + Items []BrokerResource +} + +type ListByResourceGroupCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ListByResourceGroupCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// ListByResourceGroup ... +func (c BrokerClient) ListByResourceGroup(ctx context.Context, id InstanceId) (result ListByResourceGroupOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &ListByResourceGroupCustomPager{}, + Path: fmt.Sprintf("%s/brokers", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]BrokerResource `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ListByResourceGroupComplete retrieves all the results into a single object +func (c BrokerClient) ListByResourceGroupComplete(ctx context.Context, id InstanceId) (ListByResourceGroupCompleteResult, error) { + return c.ListByResourceGroupCompleteMatchingPredicate(ctx, id, BrokerResourceOperationPredicate{}) +} + +// ListByResourceGroupCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c BrokerClient) ListByResourceGroupCompleteMatchingPredicate(ctx context.Context, id InstanceId, predicate BrokerResourceOperationPredicate) (result ListByResourceGroupCompleteResult, err error) { + items := make([]BrokerResource, 0) + + resp, err := c.ListByResourceGroup(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ListByResourceGroupCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_advancedsettings.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_advancedsettings.go new file mode 100644 index 000000000000..192c78010d20 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_advancedsettings.go @@ -0,0 +1,10 @@ +package broker + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AdvancedSettings struct { + Clients *ClientConfig `json:"clients,omitempty"` + EncryptInternalTraffic *OperationalMode `json:"encryptInternalTraffic,omitempty"` + InternalCerts *CertManagerCertOptions `json:"internalCerts,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_backendchain.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_backendchain.go new file mode 100644 index 000000000000..0888c06d5ca0 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_backendchain.go @@ -0,0 +1,10 @@ +package broker + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackendChain struct { + Partitions int64 `json:"partitions"` + RedundancyFactor int64 `json:"redundancyFactor"` + Workers *int64 `json:"workers,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_brokerdiagnostics.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_brokerdiagnostics.go new file mode 100644 index 000000000000..4b7c49ddc7da --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_brokerdiagnostics.go @@ -0,0 +1,11 @@ +package broker + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BrokerDiagnostics struct { + Logs *DiagnosticsLogs `json:"logs,omitempty"` + Metrics *Metrics `json:"metrics,omitempty"` + SelfCheck *SelfCheck `json:"selfCheck,omitempty"` + Traces *Traces `json:"traces,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_brokerproperties.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_brokerproperties.go new file mode 100644 index 000000000000..dad11ce3a9fe --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_brokerproperties.go @@ -0,0 +1,14 @@ +package broker + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BrokerProperties struct { + Advanced *AdvancedSettings `json:"advanced,omitempty"` + Cardinality *Cardinality `json:"cardinality,omitempty"` + Diagnostics *BrokerDiagnostics `json:"diagnostics,omitempty"` + DiskBackedMessageBuffer *DiskBackedMessageBuffer `json:"diskBackedMessageBuffer,omitempty"` + GenerateResourceLimits *GenerateResourceLimits `json:"generateResourceLimits,omitempty"` + MemoryProfile *BrokerMemoryProfile `json:"memoryProfile,omitempty"` + ProvisioningState *ProvisioningState `json:"provisioningState,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_brokerresource.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_brokerresource.go new file mode 100644 index 000000000000..f5a038bf4bea --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_brokerresource.go @@ -0,0 +1,17 @@ +package broker + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BrokerResource struct { + ExtendedLocation ExtendedLocation `json:"extendedLocation"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *BrokerProperties `json:"properties,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_cardinality.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_cardinality.go new file mode 100644 index 000000000000..734e71405900 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_cardinality.go @@ -0,0 +1,9 @@ +package broker + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type Cardinality struct { + BackendChain BackendChain `json:"backendChain"` + Frontend Frontend `json:"frontend"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_certmanagercertoptions.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_certmanagercertoptions.go new file mode 100644 index 000000000000..8843e2be46e1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_certmanagercertoptions.go @@ -0,0 +1,10 @@ +package broker + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CertManagerCertOptions struct { + Duration string `json:"duration"` + PrivateKey CertManagerPrivateKey `json:"privateKey"` + RenewBefore string `json:"renewBefore"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_certmanagerprivatekey.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_certmanagerprivatekey.go new file mode 100644 index 000000000000..30450daa0e5b --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_certmanagerprivatekey.go @@ -0,0 +1,9 @@ +package broker + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CertManagerPrivateKey struct { + Algorithm PrivateKeyAlgorithm `json:"algorithm"` + RotationPolicy PrivateKeyRotationPolicy `json:"rotationPolicy"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_clientconfig.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_clientconfig.go new file mode 100644 index 000000000000..1fcc6790cd90 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_clientconfig.go @@ -0,0 +1,13 @@ +package broker + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ClientConfig struct { + MaxKeepAliveSeconds *int64 `json:"maxKeepAliveSeconds,omitempty"` + MaxMessageExpirySeconds *int64 `json:"maxMessageExpirySeconds,omitempty"` + MaxPacketSizeBytes *int64 `json:"maxPacketSizeBytes,omitempty"` + MaxReceiveMaximum *int64 `json:"maxReceiveMaximum,omitempty"` + MaxSessionExpirySeconds *int64 `json:"maxSessionExpirySeconds,omitempty"` + SubscriberQueueLimit *SubscriberQueueLimit `json:"subscriberQueueLimit,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_diagnosticslogs.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_diagnosticslogs.go new file mode 100644 index 000000000000..629958963f16 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_diagnosticslogs.go @@ -0,0 +1,8 @@ +package broker + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DiagnosticsLogs struct { + Level *string `json:"level,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_diskbackedmessagebuffer.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_diskbackedmessagebuffer.go new file mode 100644 index 000000000000..bbe4ef37a285 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_diskbackedmessagebuffer.go @@ -0,0 +1,10 @@ +package broker + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DiskBackedMessageBuffer struct { + EphemeralVolumeClaimSpec *VolumeClaimSpec `json:"ephemeralVolumeClaimSpec,omitempty"` + MaxSize string `json:"maxSize"` + PersistentVolumeClaimSpec *VolumeClaimSpec `json:"persistentVolumeClaimSpec,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_extendedlocation.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_extendedlocation.go new file mode 100644 index 000000000000..004e3f4b435f --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_extendedlocation.go @@ -0,0 +1,9 @@ +package broker + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ExtendedLocation struct { + Name string `json:"name"` + Type ExtendedLocationType `json:"type"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_frontend.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_frontend.go new file mode 100644 index 000000000000..5046bd0ba8fc --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_frontend.go @@ -0,0 +1,9 @@ +package broker + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type Frontend struct { + Replicas int64 `json:"replicas"` + Workers *int64 `json:"workers,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_generateresourcelimits.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_generateresourcelimits.go new file mode 100644 index 000000000000..4736f0007308 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_generateresourcelimits.go @@ -0,0 +1,8 @@ +package broker + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GenerateResourceLimits struct { + Cpu *OperationalMode `json:"cpu,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_kubernetesreference.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_kubernetesreference.go new file mode 100644 index 000000000000..9858c65279c0 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_kubernetesreference.go @@ -0,0 +1,11 @@ +package broker + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type KubernetesReference struct { + ApiGroup *string `json:"apiGroup,omitempty"` + Kind string `json:"kind"` + Name string `json:"name"` + Namespace *string `json:"namespace,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_localkubernetesreference.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_localkubernetesreference.go new file mode 100644 index 000000000000..3ca0a98bad9e --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_localkubernetesreference.go @@ -0,0 +1,10 @@ +package broker + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type LocalKubernetesReference struct { + ApiGroup *string `json:"apiGroup,omitempty"` + Kind string `json:"kind"` + Name string `json:"name"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_metrics.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_metrics.go new file mode 100644 index 000000000000..6da2e5104181 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_metrics.go @@ -0,0 +1,8 @@ +package broker + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type Metrics struct { + PrometheusPort *int64 `json:"prometheusPort,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_selfcheck.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_selfcheck.go new file mode 100644 index 000000000000..1efdaaf9ba8c --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_selfcheck.go @@ -0,0 +1,10 @@ +package broker + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SelfCheck struct { + IntervalSeconds *int64 `json:"intervalSeconds,omitempty"` + Mode *OperationalMode `json:"mode,omitempty"` + TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_selftracing.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_selftracing.go new file mode 100644 index 000000000000..5be24748c7ad --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_selftracing.go @@ -0,0 +1,9 @@ +package broker + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SelfTracing struct { + IntervalSeconds *int64 `json:"intervalSeconds,omitempty"` + Mode *OperationalMode `json:"mode,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_subscriberqueuelimit.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_subscriberqueuelimit.go new file mode 100644 index 000000000000..49fbd52bfc4b --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_subscriberqueuelimit.go @@ -0,0 +1,9 @@ +package broker + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SubscriberQueueLimit struct { + Length *int64 `json:"length,omitempty"` + Strategy *SubscriberMessageDropStrategy `json:"strategy,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_traces.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_traces.go new file mode 100644 index 000000000000..5ae6a7a8d176 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_traces.go @@ -0,0 +1,11 @@ +package broker + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type Traces struct { + CacheSizeMegabytes *int64 `json:"cacheSizeMegabytes,omitempty"` + Mode *OperationalMode `json:"mode,omitempty"` + SelfTracing *SelfTracing `json:"selfTracing,omitempty"` + SpanChannelCapacity *int64 `json:"spanChannelCapacity,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_volumeclaimresourcerequirements.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_volumeclaimresourcerequirements.go new file mode 100644 index 000000000000..360ff99c4f2d --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_volumeclaimresourcerequirements.go @@ -0,0 +1,9 @@ +package broker + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumeClaimResourceRequirements struct { + Limits *map[string]string `json:"limits,omitempty"` + Requests *map[string]string `json:"requests,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_volumeclaimspec.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_volumeclaimspec.go new file mode 100644 index 000000000000..16b3edfe57dd --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_volumeclaimspec.go @@ -0,0 +1,15 @@ +package broker + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumeClaimSpec struct { + AccessModes *[]string `json:"accessModes,omitempty"` + DataSource *LocalKubernetesReference `json:"dataSource,omitempty"` + DataSourceRef *KubernetesReference `json:"dataSourceRef,omitempty"` + Resources *VolumeClaimResourceRequirements `json:"resources,omitempty"` + Selector *VolumeClaimSpecSelector `json:"selector,omitempty"` + StorageClassName *string `json:"storageClassName,omitempty"` + VolumeMode *string `json:"volumeMode,omitempty"` + VolumeName *string `json:"volumeName,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_volumeclaimspecselector.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_volumeclaimspecselector.go new file mode 100644 index 000000000000..da66fbacedf1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_volumeclaimspecselector.go @@ -0,0 +1,9 @@ +package broker + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumeClaimSpecSelector struct { + MatchExpressions *[]VolumeClaimSpecSelectorMatchExpressions `json:"matchExpressions,omitempty"` + MatchLabels *map[string]string `json:"matchLabels,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_volumeclaimspecselectormatchexpressions.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_volumeclaimspecselectormatchexpressions.go new file mode 100644 index 000000000000..34fd0249d0dc --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/model_volumeclaimspecselectormatchexpressions.go @@ -0,0 +1,10 @@ +package broker + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumeClaimSpecSelectorMatchExpressions struct { + Key string `json:"key"` + Operator OperatorValues `json:"operator"` + Values *[]string `json:"values,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/predicates.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/predicates.go new file mode 100644 index 000000000000..c0c0ef073f21 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/predicates.go @@ -0,0 +1,27 @@ +package broker + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BrokerResourceOperationPredicate struct { + Id *string + Name *string + Type *string +} + +func (p BrokerResourceOperationPredicate) Matches(input BrokerResource) bool { + + if p.Id != nil && (input.Id == nil || *p.Id != *input.Id) { + return false + } + + if p.Name != nil && (input.Name == nil || *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil || *p.Type != *input.Type) { + return false + } + + return true +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/version.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/version.go new file mode 100644 index 000000000000..93c8505bcf6b --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker/version.go @@ -0,0 +1,10 @@ +package broker + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2024-11-01" + +func userAgent() string { + return "hashicorp/go-azure-sdk/broker/2024-11-01" +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/README.md b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/README.md new file mode 100644 index 000000000000..30bebf5c1c58 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/README.md @@ -0,0 +1,82 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication` Documentation + +The `brokerauthentication` SDK allows for interaction with Azure Resource Manager `iotoperations` (API Version `2024-11-01`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication" +``` + + +### Client Initialization + +```go +client := brokerauthentication.NewBrokerAuthenticationClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `BrokerAuthenticationClient.CreateOrUpdate` + +```go +ctx := context.TODO() +id := brokerauthentication.NewAuthenticationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "instanceName", "brokerName", "authenticationName") + +payload := brokerauthentication.BrokerAuthenticationResource{ + // ... +} + + +if err := client.CreateOrUpdateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `BrokerAuthenticationClient.Delete` + +```go +ctx := context.TODO() +id := brokerauthentication.NewAuthenticationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "instanceName", "brokerName", "authenticationName") + +if err := client.DeleteThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `BrokerAuthenticationClient.Get` + +```go +ctx := context.TODO() +id := brokerauthentication.NewAuthenticationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "instanceName", "brokerName", "authenticationName") + +read, err := client.Get(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `BrokerAuthenticationClient.ListByResourceGroup` + +```go +ctx := context.TODO() +id := brokerauthentication.NewBrokerID("12345678-1234-9876-4563-123456789012", "example-resource-group", "instanceName", "brokerName") + +// alternatively `client.ListByResourceGroup(ctx, id)` can be used to do batched pagination +items, err := client.ListByResourceGroupComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/client.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/client.go new file mode 100644 index 000000000000..271f896cbd32 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/client.go @@ -0,0 +1,26 @@ +package brokerauthentication + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BrokerAuthenticationClient struct { + Client *resourcemanager.Client +} + +func NewBrokerAuthenticationClientWithBaseURI(sdkApi sdkEnv.Api) (*BrokerAuthenticationClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "brokerauthentication", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating BrokerAuthenticationClient: %+v", err) + } + + return &BrokerAuthenticationClient{ + Client: client, + }, nil +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/constants.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/constants.go new file mode 100644 index 000000000000..0bd93144cf51 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/constants.go @@ -0,0 +1,148 @@ +package brokerauthentication + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BrokerAuthenticationMethod string + +const ( + BrokerAuthenticationMethodCustom BrokerAuthenticationMethod = "Custom" + BrokerAuthenticationMethodServiceAccountToken BrokerAuthenticationMethod = "ServiceAccountToken" + BrokerAuthenticationMethodXFiveZeroNine BrokerAuthenticationMethod = "X509" +) + +func PossibleValuesForBrokerAuthenticationMethod() []string { + return []string{ + string(BrokerAuthenticationMethodCustom), + string(BrokerAuthenticationMethodServiceAccountToken), + string(BrokerAuthenticationMethodXFiveZeroNine), + } +} + +func (s *BrokerAuthenticationMethod) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseBrokerAuthenticationMethod(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseBrokerAuthenticationMethod(input string) (*BrokerAuthenticationMethod, error) { + vals := map[string]BrokerAuthenticationMethod{ + "custom": BrokerAuthenticationMethodCustom, + "serviceaccounttoken": BrokerAuthenticationMethodServiceAccountToken, + "x509": BrokerAuthenticationMethodXFiveZeroNine, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := BrokerAuthenticationMethod(input) + return &out, nil +} + +type ExtendedLocationType string + +const ( + ExtendedLocationTypeCustomLocation ExtendedLocationType = "CustomLocation" +) + +func PossibleValuesForExtendedLocationType() []string { + return []string{ + string(ExtendedLocationTypeCustomLocation), + } +} + +func (s *ExtendedLocationType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseExtendedLocationType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseExtendedLocationType(input string) (*ExtendedLocationType, error) { + vals := map[string]ExtendedLocationType{ + "customlocation": ExtendedLocationTypeCustomLocation, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ExtendedLocationType(input) + return &out, nil +} + +type ProvisioningState string + +const ( + ProvisioningStateAccepted ProvisioningState = "Accepted" + ProvisioningStateCanceled ProvisioningState = "Canceled" + ProvisioningStateDeleting ProvisioningState = "Deleting" + ProvisioningStateFailed ProvisioningState = "Failed" + ProvisioningStateProvisioning ProvisioningState = "Provisioning" + ProvisioningStateSucceeded ProvisioningState = "Succeeded" + ProvisioningStateUpdating ProvisioningState = "Updating" +) + +func PossibleValuesForProvisioningState() []string { + return []string{ + string(ProvisioningStateAccepted), + string(ProvisioningStateCanceled), + string(ProvisioningStateDeleting), + string(ProvisioningStateFailed), + string(ProvisioningStateProvisioning), + string(ProvisioningStateSucceeded), + string(ProvisioningStateUpdating), + } +} + +func (s *ProvisioningState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseProvisioningState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseProvisioningState(input string) (*ProvisioningState, error) { + vals := map[string]ProvisioningState{ + "accepted": ProvisioningStateAccepted, + "canceled": ProvisioningStateCanceled, + "deleting": ProvisioningStateDeleting, + "failed": ProvisioningStateFailed, + "provisioning": ProvisioningStateProvisioning, + "succeeded": ProvisioningStateSucceeded, + "updating": ProvisioningStateUpdating, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ProvisioningState(input) + return &out, nil +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/id_authentication.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/id_authentication.go new file mode 100644 index 000000000000..c2d7a754b191 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/id_authentication.go @@ -0,0 +1,148 @@ +package brokerauthentication + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&AuthenticationId{}) +} + +var _ resourceids.ResourceId = &AuthenticationId{} + +// AuthenticationId is a struct representing the Resource ID for a Authentication +type AuthenticationId struct { + SubscriptionId string + ResourceGroupName string + InstanceName string + BrokerName string + AuthenticationName string +} + +// NewAuthenticationID returns a new AuthenticationId struct +func NewAuthenticationID(subscriptionId string, resourceGroupName string, instanceName string, brokerName string, authenticationName string) AuthenticationId { + return AuthenticationId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + InstanceName: instanceName, + BrokerName: brokerName, + AuthenticationName: authenticationName, + } +} + +// ParseAuthenticationID parses 'input' into a AuthenticationId +func ParseAuthenticationID(input string) (*AuthenticationId, error) { + parser := resourceids.NewParserFromResourceIdType(&AuthenticationId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := AuthenticationId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseAuthenticationIDInsensitively parses 'input' case-insensitively into a AuthenticationId +// note: this method should only be used for API response data and not user input +func ParseAuthenticationIDInsensitively(input string) (*AuthenticationId, error) { + parser := resourceids.NewParserFromResourceIdType(&AuthenticationId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := AuthenticationId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *AuthenticationId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.InstanceName, ok = input.Parsed["instanceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "instanceName", input) + } + + if id.BrokerName, ok = input.Parsed["brokerName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "brokerName", input) + } + + if id.AuthenticationName, ok = input.Parsed["authenticationName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "authenticationName", input) + } + + return nil +} + +// ValidateAuthenticationID checks that 'input' can be parsed as a Authentication ID +func ValidateAuthenticationID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseAuthenticationID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Authentication ID +func (id AuthenticationId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.IoTOperations/instances/%s/brokers/%s/authentications/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.InstanceName, id.BrokerName, id.AuthenticationName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Authentication ID +func (id AuthenticationId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftIoTOperations", "Microsoft.IoTOperations", "Microsoft.IoTOperations"), + resourceids.StaticSegment("staticInstances", "instances", "instances"), + resourceids.UserSpecifiedSegment("instanceName", "instanceName"), + resourceids.StaticSegment("staticBrokers", "brokers", "brokers"), + resourceids.UserSpecifiedSegment("brokerName", "brokerName"), + resourceids.StaticSegment("staticAuthentications", "authentications", "authentications"), + resourceids.UserSpecifiedSegment("authenticationName", "authenticationName"), + } +} + +// String returns a human-readable description of this Authentication ID +func (id AuthenticationId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Instance Name: %q", id.InstanceName), + fmt.Sprintf("Broker Name: %q", id.BrokerName), + fmt.Sprintf("Authentication Name: %q", id.AuthenticationName), + } + return fmt.Sprintf("Authentication (%s)", strings.Join(components, "\n")) +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/id_broker.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/id_broker.go new file mode 100644 index 000000000000..a16669e4f951 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/id_broker.go @@ -0,0 +1,139 @@ +package brokerauthentication + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&BrokerId{}) +} + +var _ resourceids.ResourceId = &BrokerId{} + +// BrokerId is a struct representing the Resource ID for a Broker +type BrokerId struct { + SubscriptionId string + ResourceGroupName string + InstanceName string + BrokerName string +} + +// NewBrokerID returns a new BrokerId struct +func NewBrokerID(subscriptionId string, resourceGroupName string, instanceName string, brokerName string) BrokerId { + return BrokerId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + InstanceName: instanceName, + BrokerName: brokerName, + } +} + +// ParseBrokerID parses 'input' into a BrokerId +func ParseBrokerID(input string) (*BrokerId, error) { + parser := resourceids.NewParserFromResourceIdType(&BrokerId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := BrokerId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseBrokerIDInsensitively parses 'input' case-insensitively into a BrokerId +// note: this method should only be used for API response data and not user input +func ParseBrokerIDInsensitively(input string) (*BrokerId, error) { + parser := resourceids.NewParserFromResourceIdType(&BrokerId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := BrokerId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *BrokerId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.InstanceName, ok = input.Parsed["instanceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "instanceName", input) + } + + if id.BrokerName, ok = input.Parsed["brokerName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "brokerName", input) + } + + return nil +} + +// ValidateBrokerID checks that 'input' can be parsed as a Broker ID +func ValidateBrokerID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseBrokerID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Broker ID +func (id BrokerId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.IoTOperations/instances/%s/brokers/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.InstanceName, id.BrokerName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Broker ID +func (id BrokerId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftIoTOperations", "Microsoft.IoTOperations", "Microsoft.IoTOperations"), + resourceids.StaticSegment("staticInstances", "instances", "instances"), + resourceids.UserSpecifiedSegment("instanceName", "instanceName"), + resourceids.StaticSegment("staticBrokers", "brokers", "brokers"), + resourceids.UserSpecifiedSegment("brokerName", "brokerName"), + } +} + +// String returns a human-readable description of this Broker ID +func (id BrokerId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Instance Name: %q", id.InstanceName), + fmt.Sprintf("Broker Name: %q", id.BrokerName), + } + return fmt.Sprintf("Broker (%s)", strings.Join(components, "\n")) +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/method_createorupdate.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/method_createorupdate.go new file mode 100644 index 000000000000..a509567919d7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/method_createorupdate.go @@ -0,0 +1,75 @@ +package brokerauthentication + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CreateOrUpdateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *BrokerAuthenticationResource +} + +// CreateOrUpdate ... +func (c BrokerAuthenticationClient) CreateOrUpdate(ctx context.Context, id AuthenticationId, input BrokerAuthenticationResource) (result CreateOrUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPut, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// CreateOrUpdateThenPoll performs CreateOrUpdate then polls until it's completed +func (c BrokerAuthenticationClient) CreateOrUpdateThenPoll(ctx context.Context, id AuthenticationId, input BrokerAuthenticationResource) error { + result, err := c.CreateOrUpdate(ctx, id, input) + if err != nil { + return fmt.Errorf("performing CreateOrUpdate: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after CreateOrUpdate: %+v", err) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/method_delete.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/method_delete.go new file mode 100644 index 000000000000..37647e4f1c87 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/method_delete.go @@ -0,0 +1,70 @@ +package brokerauthentication + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DeleteOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// Delete ... +func (c BrokerAuthenticationClient) Delete(ctx context.Context, id AuthenticationId) (result DeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusNoContent, + }, + HttpMethod: http.MethodDelete, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// DeleteThenPoll performs Delete then polls until it's completed +func (c BrokerAuthenticationClient) DeleteThenPoll(ctx context.Context, id AuthenticationId) error { + result, err := c.Delete(ctx, id) + if err != nil { + return fmt.Errorf("performing Delete: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after Delete: %+v", err) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/method_get.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/method_get.go new file mode 100644 index 000000000000..14e9936e9794 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/method_get.go @@ -0,0 +1,53 @@ +package brokerauthentication + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *BrokerAuthenticationResource +} + +// Get ... +func (c BrokerAuthenticationClient) Get(ctx context.Context, id AuthenticationId) (result GetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model BrokerAuthenticationResource + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/method_listbyresourcegroup.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/method_listbyresourcegroup.go new file mode 100644 index 000000000000..4dcdb016025f --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/method_listbyresourcegroup.go @@ -0,0 +1,105 @@ +package brokerauthentication + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListByResourceGroupOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]BrokerAuthenticationResource +} + +type ListByResourceGroupCompleteResult struct { + LatestHttpResponse *http.Response + Items []BrokerAuthenticationResource +} + +type ListByResourceGroupCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ListByResourceGroupCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// ListByResourceGroup ... +func (c BrokerAuthenticationClient) ListByResourceGroup(ctx context.Context, id BrokerId) (result ListByResourceGroupOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &ListByResourceGroupCustomPager{}, + Path: fmt.Sprintf("%s/authentications", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]BrokerAuthenticationResource `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ListByResourceGroupComplete retrieves all the results into a single object +func (c BrokerAuthenticationClient) ListByResourceGroupComplete(ctx context.Context, id BrokerId) (ListByResourceGroupCompleteResult, error) { + return c.ListByResourceGroupCompleteMatchingPredicate(ctx, id, BrokerAuthenticationResourceOperationPredicate{}) +} + +// ListByResourceGroupCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c BrokerAuthenticationClient) ListByResourceGroupCompleteMatchingPredicate(ctx context.Context, id BrokerId, predicate BrokerAuthenticationResourceOperationPredicate) (result ListByResourceGroupCompleteResult, err error) { + items := make([]BrokerAuthenticationResource, 0) + + resp, err := c.ListByResourceGroup(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ListByResourceGroupCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/model_brokerauthenticationproperties.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/model_brokerauthenticationproperties.go new file mode 100644 index 000000000000..aee0efa08c54 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/model_brokerauthenticationproperties.go @@ -0,0 +1,9 @@ +package brokerauthentication + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BrokerAuthenticationProperties struct { + AuthenticationMethods []BrokerAuthenticatorMethods `json:"authenticationMethods"` + ProvisioningState *ProvisioningState `json:"provisioningState,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/model_brokerauthenticationresource.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/model_brokerauthenticationresource.go new file mode 100644 index 000000000000..7837b38fcad7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/model_brokerauthenticationresource.go @@ -0,0 +1,17 @@ +package brokerauthentication + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BrokerAuthenticationResource struct { + ExtendedLocation ExtendedLocation `json:"extendedLocation"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *BrokerAuthenticationProperties `json:"properties,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/model_brokerauthenticatorcustomauth.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/model_brokerauthenticatorcustomauth.go new file mode 100644 index 000000000000..5141450dd1ed --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/model_brokerauthenticatorcustomauth.go @@ -0,0 +1,8 @@ +package brokerauthentication + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BrokerAuthenticatorCustomAuth struct { + X509 X509ManualCertificate `json:"x509"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/model_brokerauthenticatormethodcustom.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/model_brokerauthenticatormethodcustom.go new file mode 100644 index 000000000000..2c7c2303c869 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/model_brokerauthenticatormethodcustom.go @@ -0,0 +1,11 @@ +package brokerauthentication + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BrokerAuthenticatorMethodCustom struct { + Auth *BrokerAuthenticatorCustomAuth `json:"auth,omitempty"` + CaCertConfigMap *string `json:"caCertConfigMap,omitempty"` + Endpoint string `json:"endpoint"` + Headers *map[string]string `json:"headers,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/model_brokerauthenticatormethods.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/model_brokerauthenticatormethods.go new file mode 100644 index 000000000000..2de7907a992e --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/model_brokerauthenticatormethods.go @@ -0,0 +1,11 @@ +package brokerauthentication + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BrokerAuthenticatorMethods struct { + CustomSettings *BrokerAuthenticatorMethodCustom `json:"customSettings,omitempty"` + Method BrokerAuthenticationMethod `json:"method"` + ServiceAccountTokenSettings *BrokerAuthenticatorMethodSat `json:"serviceAccountTokenSettings,omitempty"` + X509Settings *BrokerAuthenticatorMethodX509 `json:"x509Settings,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/model_brokerauthenticatormethodsat.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/model_brokerauthenticatormethodsat.go new file mode 100644 index 000000000000..6def50a6cf63 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/model_brokerauthenticatormethodsat.go @@ -0,0 +1,8 @@ +package brokerauthentication + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BrokerAuthenticatorMethodSat struct { + Audiences []string `json:"audiences"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/model_brokerauthenticatormethodx509.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/model_brokerauthenticatormethodx509.go new file mode 100644 index 000000000000..3f63e8fb7d97 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/model_brokerauthenticatormethodx509.go @@ -0,0 +1,9 @@ +package brokerauthentication + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BrokerAuthenticatorMethodX509 struct { + AuthorizationAttributes *map[string]BrokerAuthenticatorMethodX509Attributes `json:"authorizationAttributes,omitempty"` + TrustedClientCaCert *string `json:"trustedClientCaCert,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/model_brokerauthenticatormethodx509attributes.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/model_brokerauthenticatormethodx509attributes.go new file mode 100644 index 000000000000..36e16301ec8b --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/model_brokerauthenticatormethodx509attributes.go @@ -0,0 +1,9 @@ +package brokerauthentication + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BrokerAuthenticatorMethodX509Attributes struct { + Attributes map[string]string `json:"attributes"` + Subject string `json:"subject"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/model_extendedlocation.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/model_extendedlocation.go new file mode 100644 index 000000000000..3c056b410f76 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/model_extendedlocation.go @@ -0,0 +1,9 @@ +package brokerauthentication + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ExtendedLocation struct { + Name string `json:"name"` + Type ExtendedLocationType `json:"type"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/model_x509manualcertificate.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/model_x509manualcertificate.go new file mode 100644 index 000000000000..4675b0d4592d --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/model_x509manualcertificate.go @@ -0,0 +1,8 @@ +package brokerauthentication + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type X509ManualCertificate struct { + SecretRef string `json:"secretRef"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/predicates.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/predicates.go new file mode 100644 index 000000000000..cbccd2d3484b --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/predicates.go @@ -0,0 +1,27 @@ +package brokerauthentication + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BrokerAuthenticationResourceOperationPredicate struct { + Id *string + Name *string + Type *string +} + +func (p BrokerAuthenticationResourceOperationPredicate) Matches(input BrokerAuthenticationResource) bool { + + if p.Id != nil && (input.Id == nil || *p.Id != *input.Id) { + return false + } + + if p.Name != nil && (input.Name == nil || *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil || *p.Type != *input.Type) { + return false + } + + return true +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/version.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/version.go new file mode 100644 index 000000000000..cdeb76a90115 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication/version.go @@ -0,0 +1,10 @@ +package brokerauthentication + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2024-11-01" + +func userAgent() string { + return "hashicorp/go-azure-sdk/brokerauthentication/2024-11-01" +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/README.md b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/README.md new file mode 100644 index 000000000000..ee71092919f2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/README.md @@ -0,0 +1,82 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization` Documentation + +The `brokerauthorization` SDK allows for interaction with Azure Resource Manager `iotoperations` (API Version `2024-11-01`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization" +``` + + +### Client Initialization + +```go +client := brokerauthorization.NewBrokerAuthorizationClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `BrokerAuthorizationClient.CreateOrUpdate` + +```go +ctx := context.TODO() +id := brokerauthorization.NewAuthorizationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "instanceName", "brokerName", "authorizationName") + +payload := brokerauthorization.BrokerAuthorizationResource{ + // ... +} + + +if err := client.CreateOrUpdateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `BrokerAuthorizationClient.Delete` + +```go +ctx := context.TODO() +id := brokerauthorization.NewAuthorizationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "instanceName", "brokerName", "authorizationName") + +if err := client.DeleteThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `BrokerAuthorizationClient.Get` + +```go +ctx := context.TODO() +id := brokerauthorization.NewAuthorizationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "instanceName", "brokerName", "authorizationName") + +read, err := client.Get(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `BrokerAuthorizationClient.ListByResourceGroup` + +```go +ctx := context.TODO() +id := brokerauthorization.NewBrokerID("12345678-1234-9876-4563-123456789012", "example-resource-group", "instanceName", "brokerName") + +// alternatively `client.ListByResourceGroup(ctx, id)` can be used to do batched pagination +items, err := client.ListByResourceGroupComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/client.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/client.go new file mode 100644 index 000000000000..24b131510e6c --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/client.go @@ -0,0 +1,26 @@ +package brokerauthorization + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BrokerAuthorizationClient struct { + Client *resourcemanager.Client +} + +func NewBrokerAuthorizationClientWithBaseURI(sdkApi sdkEnv.Api) (*BrokerAuthorizationClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "brokerauthorization", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating BrokerAuthorizationClient: %+v", err) + } + + return &BrokerAuthorizationClient{ + Client: client, + }, nil +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/constants.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/constants.go new file mode 100644 index 000000000000..a5bcbdd4cbe4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/constants.go @@ -0,0 +1,277 @@ +package brokerauthorization + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BrokerResourceDefinitionMethods string + +const ( + BrokerResourceDefinitionMethodsConnect BrokerResourceDefinitionMethods = "Connect" + BrokerResourceDefinitionMethodsPublish BrokerResourceDefinitionMethods = "Publish" + BrokerResourceDefinitionMethodsSubscribe BrokerResourceDefinitionMethods = "Subscribe" +) + +func PossibleValuesForBrokerResourceDefinitionMethods() []string { + return []string{ + string(BrokerResourceDefinitionMethodsConnect), + string(BrokerResourceDefinitionMethodsPublish), + string(BrokerResourceDefinitionMethodsSubscribe), + } +} + +func (s *BrokerResourceDefinitionMethods) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseBrokerResourceDefinitionMethods(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseBrokerResourceDefinitionMethods(input string) (*BrokerResourceDefinitionMethods, error) { + vals := map[string]BrokerResourceDefinitionMethods{ + "connect": BrokerResourceDefinitionMethodsConnect, + "publish": BrokerResourceDefinitionMethodsPublish, + "subscribe": BrokerResourceDefinitionMethodsSubscribe, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := BrokerResourceDefinitionMethods(input) + return &out, nil +} + +type ExtendedLocationType string + +const ( + ExtendedLocationTypeCustomLocation ExtendedLocationType = "CustomLocation" +) + +func PossibleValuesForExtendedLocationType() []string { + return []string{ + string(ExtendedLocationTypeCustomLocation), + } +} + +func (s *ExtendedLocationType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseExtendedLocationType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseExtendedLocationType(input string) (*ExtendedLocationType, error) { + vals := map[string]ExtendedLocationType{ + "customlocation": ExtendedLocationTypeCustomLocation, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ExtendedLocationType(input) + return &out, nil +} + +type OperationalMode string + +const ( + OperationalModeDisabled OperationalMode = "Disabled" + OperationalModeEnabled OperationalMode = "Enabled" +) + +func PossibleValuesForOperationalMode() []string { + return []string{ + string(OperationalModeDisabled), + string(OperationalModeEnabled), + } +} + +func (s *OperationalMode) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseOperationalMode(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseOperationalMode(input string) (*OperationalMode, error) { + vals := map[string]OperationalMode{ + "disabled": OperationalModeDisabled, + "enabled": OperationalModeEnabled, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := OperationalMode(input) + return &out, nil +} + +type ProvisioningState string + +const ( + ProvisioningStateAccepted ProvisioningState = "Accepted" + ProvisioningStateCanceled ProvisioningState = "Canceled" + ProvisioningStateDeleting ProvisioningState = "Deleting" + ProvisioningStateFailed ProvisioningState = "Failed" + ProvisioningStateProvisioning ProvisioningState = "Provisioning" + ProvisioningStateSucceeded ProvisioningState = "Succeeded" + ProvisioningStateUpdating ProvisioningState = "Updating" +) + +func PossibleValuesForProvisioningState() []string { + return []string{ + string(ProvisioningStateAccepted), + string(ProvisioningStateCanceled), + string(ProvisioningStateDeleting), + string(ProvisioningStateFailed), + string(ProvisioningStateProvisioning), + string(ProvisioningStateSucceeded), + string(ProvisioningStateUpdating), + } +} + +func (s *ProvisioningState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseProvisioningState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseProvisioningState(input string) (*ProvisioningState, error) { + vals := map[string]ProvisioningState{ + "accepted": ProvisioningStateAccepted, + "canceled": ProvisioningStateCanceled, + "deleting": ProvisioningStateDeleting, + "failed": ProvisioningStateFailed, + "provisioning": ProvisioningStateProvisioning, + "succeeded": ProvisioningStateSucceeded, + "updating": ProvisioningStateUpdating, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ProvisioningState(input) + return &out, nil +} + +type StateStoreResourceDefinitionMethods string + +const ( + StateStoreResourceDefinitionMethodsRead StateStoreResourceDefinitionMethods = "Read" + StateStoreResourceDefinitionMethodsReadWrite StateStoreResourceDefinitionMethods = "ReadWrite" + StateStoreResourceDefinitionMethodsWrite StateStoreResourceDefinitionMethods = "Write" +) + +func PossibleValuesForStateStoreResourceDefinitionMethods() []string { + return []string{ + string(StateStoreResourceDefinitionMethodsRead), + string(StateStoreResourceDefinitionMethodsReadWrite), + string(StateStoreResourceDefinitionMethodsWrite), + } +} + +func (s *StateStoreResourceDefinitionMethods) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseStateStoreResourceDefinitionMethods(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseStateStoreResourceDefinitionMethods(input string) (*StateStoreResourceDefinitionMethods, error) { + vals := map[string]StateStoreResourceDefinitionMethods{ + "read": StateStoreResourceDefinitionMethodsRead, + "readwrite": StateStoreResourceDefinitionMethodsReadWrite, + "write": StateStoreResourceDefinitionMethodsWrite, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := StateStoreResourceDefinitionMethods(input) + return &out, nil +} + +type StateStoreResourceKeyTypes string + +const ( + StateStoreResourceKeyTypesBinary StateStoreResourceKeyTypes = "Binary" + StateStoreResourceKeyTypesPattern StateStoreResourceKeyTypes = "Pattern" + StateStoreResourceKeyTypesString StateStoreResourceKeyTypes = "String" +) + +func PossibleValuesForStateStoreResourceKeyTypes() []string { + return []string{ + string(StateStoreResourceKeyTypesBinary), + string(StateStoreResourceKeyTypesPattern), + string(StateStoreResourceKeyTypesString), + } +} + +func (s *StateStoreResourceKeyTypes) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseStateStoreResourceKeyTypes(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseStateStoreResourceKeyTypes(input string) (*StateStoreResourceKeyTypes, error) { + vals := map[string]StateStoreResourceKeyTypes{ + "binary": StateStoreResourceKeyTypesBinary, + "pattern": StateStoreResourceKeyTypesPattern, + "string": StateStoreResourceKeyTypesString, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := StateStoreResourceKeyTypes(input) + return &out, nil +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/id_authorization.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/id_authorization.go new file mode 100644 index 000000000000..297452afa25c --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/id_authorization.go @@ -0,0 +1,148 @@ +package brokerauthorization + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&AuthorizationId{}) +} + +var _ resourceids.ResourceId = &AuthorizationId{} + +// AuthorizationId is a struct representing the Resource ID for a Authorization +type AuthorizationId struct { + SubscriptionId string + ResourceGroupName string + InstanceName string + BrokerName string + AuthorizationName string +} + +// NewAuthorizationID returns a new AuthorizationId struct +func NewAuthorizationID(subscriptionId string, resourceGroupName string, instanceName string, brokerName string, authorizationName string) AuthorizationId { + return AuthorizationId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + InstanceName: instanceName, + BrokerName: brokerName, + AuthorizationName: authorizationName, + } +} + +// ParseAuthorizationID parses 'input' into a AuthorizationId +func ParseAuthorizationID(input string) (*AuthorizationId, error) { + parser := resourceids.NewParserFromResourceIdType(&AuthorizationId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := AuthorizationId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseAuthorizationIDInsensitively parses 'input' case-insensitively into a AuthorizationId +// note: this method should only be used for API response data and not user input +func ParseAuthorizationIDInsensitively(input string) (*AuthorizationId, error) { + parser := resourceids.NewParserFromResourceIdType(&AuthorizationId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := AuthorizationId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *AuthorizationId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.InstanceName, ok = input.Parsed["instanceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "instanceName", input) + } + + if id.BrokerName, ok = input.Parsed["brokerName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "brokerName", input) + } + + if id.AuthorizationName, ok = input.Parsed["authorizationName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "authorizationName", input) + } + + return nil +} + +// ValidateAuthorizationID checks that 'input' can be parsed as a Authorization ID +func ValidateAuthorizationID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseAuthorizationID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Authorization ID +func (id AuthorizationId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.IoTOperations/instances/%s/brokers/%s/authorizations/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.InstanceName, id.BrokerName, id.AuthorizationName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Authorization ID +func (id AuthorizationId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftIoTOperations", "Microsoft.IoTOperations", "Microsoft.IoTOperations"), + resourceids.StaticSegment("staticInstances", "instances", "instances"), + resourceids.UserSpecifiedSegment("instanceName", "instanceName"), + resourceids.StaticSegment("staticBrokers", "brokers", "brokers"), + resourceids.UserSpecifiedSegment("brokerName", "brokerName"), + resourceids.StaticSegment("staticAuthorizations", "authorizations", "authorizations"), + resourceids.UserSpecifiedSegment("authorizationName", "authorizationName"), + } +} + +// String returns a human-readable description of this Authorization ID +func (id AuthorizationId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Instance Name: %q", id.InstanceName), + fmt.Sprintf("Broker Name: %q", id.BrokerName), + fmt.Sprintf("Authorization Name: %q", id.AuthorizationName), + } + return fmt.Sprintf("Authorization (%s)", strings.Join(components, "\n")) +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/id_broker.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/id_broker.go new file mode 100644 index 000000000000..779a8335b63b --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/id_broker.go @@ -0,0 +1,139 @@ +package brokerauthorization + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&BrokerId{}) +} + +var _ resourceids.ResourceId = &BrokerId{} + +// BrokerId is a struct representing the Resource ID for a Broker +type BrokerId struct { + SubscriptionId string + ResourceGroupName string + InstanceName string + BrokerName string +} + +// NewBrokerID returns a new BrokerId struct +func NewBrokerID(subscriptionId string, resourceGroupName string, instanceName string, brokerName string) BrokerId { + return BrokerId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + InstanceName: instanceName, + BrokerName: brokerName, + } +} + +// ParseBrokerID parses 'input' into a BrokerId +func ParseBrokerID(input string) (*BrokerId, error) { + parser := resourceids.NewParserFromResourceIdType(&BrokerId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := BrokerId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseBrokerIDInsensitively parses 'input' case-insensitively into a BrokerId +// note: this method should only be used for API response data and not user input +func ParseBrokerIDInsensitively(input string) (*BrokerId, error) { + parser := resourceids.NewParserFromResourceIdType(&BrokerId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := BrokerId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *BrokerId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.InstanceName, ok = input.Parsed["instanceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "instanceName", input) + } + + if id.BrokerName, ok = input.Parsed["brokerName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "brokerName", input) + } + + return nil +} + +// ValidateBrokerID checks that 'input' can be parsed as a Broker ID +func ValidateBrokerID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseBrokerID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Broker ID +func (id BrokerId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.IoTOperations/instances/%s/brokers/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.InstanceName, id.BrokerName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Broker ID +func (id BrokerId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftIoTOperations", "Microsoft.IoTOperations", "Microsoft.IoTOperations"), + resourceids.StaticSegment("staticInstances", "instances", "instances"), + resourceids.UserSpecifiedSegment("instanceName", "instanceName"), + resourceids.StaticSegment("staticBrokers", "brokers", "brokers"), + resourceids.UserSpecifiedSegment("brokerName", "brokerName"), + } +} + +// String returns a human-readable description of this Broker ID +func (id BrokerId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Instance Name: %q", id.InstanceName), + fmt.Sprintf("Broker Name: %q", id.BrokerName), + } + return fmt.Sprintf("Broker (%s)", strings.Join(components, "\n")) +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/method_createorupdate.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/method_createorupdate.go new file mode 100644 index 000000000000..fae51653e8de --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/method_createorupdate.go @@ -0,0 +1,75 @@ +package brokerauthorization + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CreateOrUpdateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *BrokerAuthorizationResource +} + +// CreateOrUpdate ... +func (c BrokerAuthorizationClient) CreateOrUpdate(ctx context.Context, id AuthorizationId, input BrokerAuthorizationResource) (result CreateOrUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPut, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// CreateOrUpdateThenPoll performs CreateOrUpdate then polls until it's completed +func (c BrokerAuthorizationClient) CreateOrUpdateThenPoll(ctx context.Context, id AuthorizationId, input BrokerAuthorizationResource) error { + result, err := c.CreateOrUpdate(ctx, id, input) + if err != nil { + return fmt.Errorf("performing CreateOrUpdate: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after CreateOrUpdate: %+v", err) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/method_delete.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/method_delete.go new file mode 100644 index 000000000000..d1e02753e7e9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/method_delete.go @@ -0,0 +1,70 @@ +package brokerauthorization + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DeleteOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// Delete ... +func (c BrokerAuthorizationClient) Delete(ctx context.Context, id AuthorizationId) (result DeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusNoContent, + }, + HttpMethod: http.MethodDelete, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// DeleteThenPoll performs Delete then polls until it's completed +func (c BrokerAuthorizationClient) DeleteThenPoll(ctx context.Context, id AuthorizationId) error { + result, err := c.Delete(ctx, id) + if err != nil { + return fmt.Errorf("performing Delete: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after Delete: %+v", err) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/method_get.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/method_get.go new file mode 100644 index 000000000000..796fa02e6023 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/method_get.go @@ -0,0 +1,53 @@ +package brokerauthorization + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *BrokerAuthorizationResource +} + +// Get ... +func (c BrokerAuthorizationClient) Get(ctx context.Context, id AuthorizationId) (result GetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model BrokerAuthorizationResource + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/method_listbyresourcegroup.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/method_listbyresourcegroup.go new file mode 100644 index 000000000000..11b8edbe75c1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/method_listbyresourcegroup.go @@ -0,0 +1,105 @@ +package brokerauthorization + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListByResourceGroupOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]BrokerAuthorizationResource +} + +type ListByResourceGroupCompleteResult struct { + LatestHttpResponse *http.Response + Items []BrokerAuthorizationResource +} + +type ListByResourceGroupCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ListByResourceGroupCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// ListByResourceGroup ... +func (c BrokerAuthorizationClient) ListByResourceGroup(ctx context.Context, id BrokerId) (result ListByResourceGroupOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &ListByResourceGroupCustomPager{}, + Path: fmt.Sprintf("%s/authorizations", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]BrokerAuthorizationResource `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ListByResourceGroupComplete retrieves all the results into a single object +func (c BrokerAuthorizationClient) ListByResourceGroupComplete(ctx context.Context, id BrokerId) (ListByResourceGroupCompleteResult, error) { + return c.ListByResourceGroupCompleteMatchingPredicate(ctx, id, BrokerAuthorizationResourceOperationPredicate{}) +} + +// ListByResourceGroupCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c BrokerAuthorizationClient) ListByResourceGroupCompleteMatchingPredicate(ctx context.Context, id BrokerId, predicate BrokerAuthorizationResourceOperationPredicate) (result ListByResourceGroupCompleteResult, err error) { + items := make([]BrokerAuthorizationResource, 0) + + resp, err := c.ListByResourceGroup(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ListByResourceGroupCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/model_authorizationconfig.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/model_authorizationconfig.go new file mode 100644 index 000000000000..71282bcdc16c --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/model_authorizationconfig.go @@ -0,0 +1,9 @@ +package brokerauthorization + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AuthorizationConfig struct { + Cache *OperationalMode `json:"cache,omitempty"` + Rules *[]AuthorizationRule `json:"rules,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/model_authorizationrule.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/model_authorizationrule.go new file mode 100644 index 000000000000..3d7c13c2bebb --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/model_authorizationrule.go @@ -0,0 +1,10 @@ +package brokerauthorization + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AuthorizationRule struct { + BrokerResources []BrokerResourceRule `json:"brokerResources"` + Principals PrincipalDefinition `json:"principals"` + StateStoreResources *[]StateStoreResourceRule `json:"stateStoreResources,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/model_brokerauthorizationproperties.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/model_brokerauthorizationproperties.go new file mode 100644 index 000000000000..322b53b6ace6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/model_brokerauthorizationproperties.go @@ -0,0 +1,9 @@ +package brokerauthorization + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BrokerAuthorizationProperties struct { + AuthorizationPolicies AuthorizationConfig `json:"authorizationPolicies"` + ProvisioningState *ProvisioningState `json:"provisioningState,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/model_brokerauthorizationresource.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/model_brokerauthorizationresource.go new file mode 100644 index 000000000000..851e32e2ae2e --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/model_brokerauthorizationresource.go @@ -0,0 +1,17 @@ +package brokerauthorization + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BrokerAuthorizationResource struct { + ExtendedLocation ExtendedLocation `json:"extendedLocation"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *BrokerAuthorizationProperties `json:"properties,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/model_brokerresourcerule.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/model_brokerresourcerule.go new file mode 100644 index 000000000000..b4385d09f614 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/model_brokerresourcerule.go @@ -0,0 +1,10 @@ +package brokerauthorization + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BrokerResourceRule struct { + ClientIds *[]string `json:"clientIds,omitempty"` + Method BrokerResourceDefinitionMethods `json:"method"` + Topics *[]string `json:"topics,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/model_extendedlocation.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/model_extendedlocation.go new file mode 100644 index 000000000000..318c427a8a07 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/model_extendedlocation.go @@ -0,0 +1,9 @@ +package brokerauthorization + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ExtendedLocation struct { + Name string `json:"name"` + Type ExtendedLocationType `json:"type"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/model_principaldefinition.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/model_principaldefinition.go new file mode 100644 index 000000000000..1ffde3958a8f --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/model_principaldefinition.go @@ -0,0 +1,10 @@ +package brokerauthorization + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type PrincipalDefinition struct { + Attributes *[]map[string]string `json:"attributes,omitempty"` + ClientIds *[]string `json:"clientIds,omitempty"` + Usernames *[]string `json:"usernames,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/model_statestoreresourcerule.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/model_statestoreresourcerule.go new file mode 100644 index 000000000000..f134db87e697 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/model_statestoreresourcerule.go @@ -0,0 +1,10 @@ +package brokerauthorization + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type StateStoreResourceRule struct { + KeyType StateStoreResourceKeyTypes `json:"keyType"` + Keys []string `json:"keys"` + Method StateStoreResourceDefinitionMethods `json:"method"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/predicates.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/predicates.go new file mode 100644 index 000000000000..e62e237db45e --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/predicates.go @@ -0,0 +1,27 @@ +package brokerauthorization + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BrokerAuthorizationResourceOperationPredicate struct { + Id *string + Name *string + Type *string +} + +func (p BrokerAuthorizationResourceOperationPredicate) Matches(input BrokerAuthorizationResource) bool { + + if p.Id != nil && (input.Id == nil || *p.Id != *input.Id) { + return false + } + + if p.Name != nil && (input.Name == nil || *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil || *p.Type != *input.Type) { + return false + } + + return true +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/version.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/version.go new file mode 100644 index 000000000000..1714dd12c784 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization/version.go @@ -0,0 +1,10 @@ +package brokerauthorization + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2024-11-01" + +func userAgent() string { + return "hashicorp/go-azure-sdk/brokerauthorization/2024-11-01" +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/README.md b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/README.md new file mode 100644 index 000000000000..9739503bcea2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/README.md @@ -0,0 +1,82 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener` Documentation + +The `brokerlistener` SDK allows for interaction with Azure Resource Manager `iotoperations` (API Version `2024-11-01`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener" +``` + + +### Client Initialization + +```go +client := brokerlistener.NewBrokerListenerClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `BrokerListenerClient.CreateOrUpdate` + +```go +ctx := context.TODO() +id := brokerlistener.NewListenerID("12345678-1234-9876-4563-123456789012", "example-resource-group", "instanceName", "brokerName", "listenerName") + +payload := brokerlistener.BrokerListenerResource{ + // ... +} + + +if err := client.CreateOrUpdateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `BrokerListenerClient.Delete` + +```go +ctx := context.TODO() +id := brokerlistener.NewListenerID("12345678-1234-9876-4563-123456789012", "example-resource-group", "instanceName", "brokerName", "listenerName") + +if err := client.DeleteThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `BrokerListenerClient.Get` + +```go +ctx := context.TODO() +id := brokerlistener.NewListenerID("12345678-1234-9876-4563-123456789012", "example-resource-group", "instanceName", "brokerName", "listenerName") + +read, err := client.Get(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `BrokerListenerClient.ListByResourceGroup` + +```go +ctx := context.TODO() +id := brokerlistener.NewBrokerID("12345678-1234-9876-4563-123456789012", "example-resource-group", "instanceName", "brokerName") + +// alternatively `client.ListByResourceGroup(ctx, id)` can be used to do batched pagination +items, err := client.ListByResourceGroupComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/client.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/client.go new file mode 100644 index 000000000000..533b54c46799 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/client.go @@ -0,0 +1,26 @@ +package brokerlistener + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BrokerListenerClient struct { + Client *resourcemanager.Client +} + +func NewBrokerListenerClientWithBaseURI(sdkApi sdkEnv.Api) (*BrokerListenerClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "brokerlistener", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating BrokerListenerClient: %+v", err) + } + + return &BrokerListenerClient{ + Client: client, + }, nil +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/constants.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/constants.go new file mode 100644 index 000000000000..9b0d824386bd --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/constants.go @@ -0,0 +1,368 @@ +package brokerlistener + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BrokerProtocolType string + +const ( + BrokerProtocolTypeMqtt BrokerProtocolType = "Mqtt" + BrokerProtocolTypeWebSockets BrokerProtocolType = "WebSockets" +) + +func PossibleValuesForBrokerProtocolType() []string { + return []string{ + string(BrokerProtocolTypeMqtt), + string(BrokerProtocolTypeWebSockets), + } +} + +func (s *BrokerProtocolType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseBrokerProtocolType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseBrokerProtocolType(input string) (*BrokerProtocolType, error) { + vals := map[string]BrokerProtocolType{ + "mqtt": BrokerProtocolTypeMqtt, + "websockets": BrokerProtocolTypeWebSockets, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := BrokerProtocolType(input) + return &out, nil +} + +type CertManagerIssuerKind string + +const ( + CertManagerIssuerKindClusterIssuer CertManagerIssuerKind = "ClusterIssuer" + CertManagerIssuerKindIssuer CertManagerIssuerKind = "Issuer" +) + +func PossibleValuesForCertManagerIssuerKind() []string { + return []string{ + string(CertManagerIssuerKindClusterIssuer), + string(CertManagerIssuerKindIssuer), + } +} + +func (s *CertManagerIssuerKind) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseCertManagerIssuerKind(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseCertManagerIssuerKind(input string) (*CertManagerIssuerKind, error) { + vals := map[string]CertManagerIssuerKind{ + "clusterissuer": CertManagerIssuerKindClusterIssuer, + "issuer": CertManagerIssuerKindIssuer, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := CertManagerIssuerKind(input) + return &out, nil +} + +type ExtendedLocationType string + +const ( + ExtendedLocationTypeCustomLocation ExtendedLocationType = "CustomLocation" +) + +func PossibleValuesForExtendedLocationType() []string { + return []string{ + string(ExtendedLocationTypeCustomLocation), + } +} + +func (s *ExtendedLocationType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseExtendedLocationType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseExtendedLocationType(input string) (*ExtendedLocationType, error) { + vals := map[string]ExtendedLocationType{ + "customlocation": ExtendedLocationTypeCustomLocation, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ExtendedLocationType(input) + return &out, nil +} + +type PrivateKeyAlgorithm string + +const ( + PrivateKeyAlgorithmEcFiveTwoOne PrivateKeyAlgorithm = "Ec521" + PrivateKeyAlgorithmEcThreeEightFour PrivateKeyAlgorithm = "Ec384" + PrivateKeyAlgorithmEcTwoFiveSix PrivateKeyAlgorithm = "Ec256" + PrivateKeyAlgorithmEdTwoFiveFiveOneNine PrivateKeyAlgorithm = "Ed25519" + PrivateKeyAlgorithmRsaEightOneNineTwo PrivateKeyAlgorithm = "Rsa8192" + PrivateKeyAlgorithmRsaFourZeroNineSix PrivateKeyAlgorithm = "Rsa4096" + PrivateKeyAlgorithmRsaTwoZeroFourEight PrivateKeyAlgorithm = "Rsa2048" +) + +func PossibleValuesForPrivateKeyAlgorithm() []string { + return []string{ + string(PrivateKeyAlgorithmEcFiveTwoOne), + string(PrivateKeyAlgorithmEcThreeEightFour), + string(PrivateKeyAlgorithmEcTwoFiveSix), + string(PrivateKeyAlgorithmEdTwoFiveFiveOneNine), + string(PrivateKeyAlgorithmRsaEightOneNineTwo), + string(PrivateKeyAlgorithmRsaFourZeroNineSix), + string(PrivateKeyAlgorithmRsaTwoZeroFourEight), + } +} + +func (s *PrivateKeyAlgorithm) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parsePrivateKeyAlgorithm(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parsePrivateKeyAlgorithm(input string) (*PrivateKeyAlgorithm, error) { + vals := map[string]PrivateKeyAlgorithm{ + "ec521": PrivateKeyAlgorithmEcFiveTwoOne, + "ec384": PrivateKeyAlgorithmEcThreeEightFour, + "ec256": PrivateKeyAlgorithmEcTwoFiveSix, + "ed25519": PrivateKeyAlgorithmEdTwoFiveFiveOneNine, + "rsa8192": PrivateKeyAlgorithmRsaEightOneNineTwo, + "rsa4096": PrivateKeyAlgorithmRsaFourZeroNineSix, + "rsa2048": PrivateKeyAlgorithmRsaTwoZeroFourEight, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := PrivateKeyAlgorithm(input) + return &out, nil +} + +type PrivateKeyRotationPolicy string + +const ( + PrivateKeyRotationPolicyAlways PrivateKeyRotationPolicy = "Always" + PrivateKeyRotationPolicyNever PrivateKeyRotationPolicy = "Never" +) + +func PossibleValuesForPrivateKeyRotationPolicy() []string { + return []string{ + string(PrivateKeyRotationPolicyAlways), + string(PrivateKeyRotationPolicyNever), + } +} + +func (s *PrivateKeyRotationPolicy) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parsePrivateKeyRotationPolicy(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parsePrivateKeyRotationPolicy(input string) (*PrivateKeyRotationPolicy, error) { + vals := map[string]PrivateKeyRotationPolicy{ + "always": PrivateKeyRotationPolicyAlways, + "never": PrivateKeyRotationPolicyNever, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := PrivateKeyRotationPolicy(input) + return &out, nil +} + +type ProvisioningState string + +const ( + ProvisioningStateAccepted ProvisioningState = "Accepted" + ProvisioningStateCanceled ProvisioningState = "Canceled" + ProvisioningStateDeleting ProvisioningState = "Deleting" + ProvisioningStateFailed ProvisioningState = "Failed" + ProvisioningStateProvisioning ProvisioningState = "Provisioning" + ProvisioningStateSucceeded ProvisioningState = "Succeeded" + ProvisioningStateUpdating ProvisioningState = "Updating" +) + +func PossibleValuesForProvisioningState() []string { + return []string{ + string(ProvisioningStateAccepted), + string(ProvisioningStateCanceled), + string(ProvisioningStateDeleting), + string(ProvisioningStateFailed), + string(ProvisioningStateProvisioning), + string(ProvisioningStateSucceeded), + string(ProvisioningStateUpdating), + } +} + +func (s *ProvisioningState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseProvisioningState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseProvisioningState(input string) (*ProvisioningState, error) { + vals := map[string]ProvisioningState{ + "accepted": ProvisioningStateAccepted, + "canceled": ProvisioningStateCanceled, + "deleting": ProvisioningStateDeleting, + "failed": ProvisioningStateFailed, + "provisioning": ProvisioningStateProvisioning, + "succeeded": ProvisioningStateSucceeded, + "updating": ProvisioningStateUpdating, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ProvisioningState(input) + return &out, nil +} + +type ServiceType string + +const ( + ServiceTypeClusterIP ServiceType = "ClusterIp" + ServiceTypeLoadBalancer ServiceType = "LoadBalancer" + ServiceTypeNodePort ServiceType = "NodePort" +) + +func PossibleValuesForServiceType() []string { + return []string{ + string(ServiceTypeClusterIP), + string(ServiceTypeLoadBalancer), + string(ServiceTypeNodePort), + } +} + +func (s *ServiceType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseServiceType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseServiceType(input string) (*ServiceType, error) { + vals := map[string]ServiceType{ + "clusterip": ServiceTypeClusterIP, + "loadbalancer": ServiceTypeLoadBalancer, + "nodeport": ServiceTypeNodePort, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ServiceType(input) + return &out, nil +} + +type TlsCertMethodMode string + +const ( + TlsCertMethodModeAutomatic TlsCertMethodMode = "Automatic" + TlsCertMethodModeManual TlsCertMethodMode = "Manual" +) + +func PossibleValuesForTlsCertMethodMode() []string { + return []string{ + string(TlsCertMethodModeAutomatic), + string(TlsCertMethodModeManual), + } +} + +func (s *TlsCertMethodMode) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseTlsCertMethodMode(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseTlsCertMethodMode(input string) (*TlsCertMethodMode, error) { + vals := map[string]TlsCertMethodMode{ + "automatic": TlsCertMethodModeAutomatic, + "manual": TlsCertMethodModeManual, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := TlsCertMethodMode(input) + return &out, nil +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/id_broker.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/id_broker.go new file mode 100644 index 000000000000..314fa166a17f --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/id_broker.go @@ -0,0 +1,139 @@ +package brokerlistener + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&BrokerId{}) +} + +var _ resourceids.ResourceId = &BrokerId{} + +// BrokerId is a struct representing the Resource ID for a Broker +type BrokerId struct { + SubscriptionId string + ResourceGroupName string + InstanceName string + BrokerName string +} + +// NewBrokerID returns a new BrokerId struct +func NewBrokerID(subscriptionId string, resourceGroupName string, instanceName string, brokerName string) BrokerId { + return BrokerId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + InstanceName: instanceName, + BrokerName: brokerName, + } +} + +// ParseBrokerID parses 'input' into a BrokerId +func ParseBrokerID(input string) (*BrokerId, error) { + parser := resourceids.NewParserFromResourceIdType(&BrokerId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := BrokerId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseBrokerIDInsensitively parses 'input' case-insensitively into a BrokerId +// note: this method should only be used for API response data and not user input +func ParseBrokerIDInsensitively(input string) (*BrokerId, error) { + parser := resourceids.NewParserFromResourceIdType(&BrokerId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := BrokerId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *BrokerId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.InstanceName, ok = input.Parsed["instanceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "instanceName", input) + } + + if id.BrokerName, ok = input.Parsed["brokerName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "brokerName", input) + } + + return nil +} + +// ValidateBrokerID checks that 'input' can be parsed as a Broker ID +func ValidateBrokerID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseBrokerID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Broker ID +func (id BrokerId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.IoTOperations/instances/%s/brokers/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.InstanceName, id.BrokerName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Broker ID +func (id BrokerId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftIoTOperations", "Microsoft.IoTOperations", "Microsoft.IoTOperations"), + resourceids.StaticSegment("staticInstances", "instances", "instances"), + resourceids.UserSpecifiedSegment("instanceName", "instanceName"), + resourceids.StaticSegment("staticBrokers", "brokers", "brokers"), + resourceids.UserSpecifiedSegment("brokerName", "brokerName"), + } +} + +// String returns a human-readable description of this Broker ID +func (id BrokerId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Instance Name: %q", id.InstanceName), + fmt.Sprintf("Broker Name: %q", id.BrokerName), + } + return fmt.Sprintf("Broker (%s)", strings.Join(components, "\n")) +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/id_listener.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/id_listener.go new file mode 100644 index 000000000000..3f217b342ee8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/id_listener.go @@ -0,0 +1,148 @@ +package brokerlistener + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&ListenerId{}) +} + +var _ resourceids.ResourceId = &ListenerId{} + +// ListenerId is a struct representing the Resource ID for a Listener +type ListenerId struct { + SubscriptionId string + ResourceGroupName string + InstanceName string + BrokerName string + ListenerName string +} + +// NewListenerID returns a new ListenerId struct +func NewListenerID(subscriptionId string, resourceGroupName string, instanceName string, brokerName string, listenerName string) ListenerId { + return ListenerId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + InstanceName: instanceName, + BrokerName: brokerName, + ListenerName: listenerName, + } +} + +// ParseListenerID parses 'input' into a ListenerId +func ParseListenerID(input string) (*ListenerId, error) { + parser := resourceids.NewParserFromResourceIdType(&ListenerId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ListenerId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseListenerIDInsensitively parses 'input' case-insensitively into a ListenerId +// note: this method should only be used for API response data and not user input +func ParseListenerIDInsensitively(input string) (*ListenerId, error) { + parser := resourceids.NewParserFromResourceIdType(&ListenerId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ListenerId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *ListenerId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.InstanceName, ok = input.Parsed["instanceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "instanceName", input) + } + + if id.BrokerName, ok = input.Parsed["brokerName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "brokerName", input) + } + + if id.ListenerName, ok = input.Parsed["listenerName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "listenerName", input) + } + + return nil +} + +// ValidateListenerID checks that 'input' can be parsed as a Listener ID +func ValidateListenerID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseListenerID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Listener ID +func (id ListenerId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.IoTOperations/instances/%s/brokers/%s/listeners/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.InstanceName, id.BrokerName, id.ListenerName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Listener ID +func (id ListenerId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftIoTOperations", "Microsoft.IoTOperations", "Microsoft.IoTOperations"), + resourceids.StaticSegment("staticInstances", "instances", "instances"), + resourceids.UserSpecifiedSegment("instanceName", "instanceName"), + resourceids.StaticSegment("staticBrokers", "brokers", "brokers"), + resourceids.UserSpecifiedSegment("brokerName", "brokerName"), + resourceids.StaticSegment("staticListeners", "listeners", "listeners"), + resourceids.UserSpecifiedSegment("listenerName", "listenerName"), + } +} + +// String returns a human-readable description of this Listener ID +func (id ListenerId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Instance Name: %q", id.InstanceName), + fmt.Sprintf("Broker Name: %q", id.BrokerName), + fmt.Sprintf("Listener Name: %q", id.ListenerName), + } + return fmt.Sprintf("Listener (%s)", strings.Join(components, "\n")) +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/method_createorupdate.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/method_createorupdate.go new file mode 100644 index 000000000000..fb2f90da4339 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/method_createorupdate.go @@ -0,0 +1,75 @@ +package brokerlistener + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CreateOrUpdateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *BrokerListenerResource +} + +// CreateOrUpdate ... +func (c BrokerListenerClient) CreateOrUpdate(ctx context.Context, id ListenerId, input BrokerListenerResource) (result CreateOrUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPut, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// CreateOrUpdateThenPoll performs CreateOrUpdate then polls until it's completed +func (c BrokerListenerClient) CreateOrUpdateThenPoll(ctx context.Context, id ListenerId, input BrokerListenerResource) error { + result, err := c.CreateOrUpdate(ctx, id, input) + if err != nil { + return fmt.Errorf("performing CreateOrUpdate: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after CreateOrUpdate: %+v", err) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/method_delete.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/method_delete.go new file mode 100644 index 000000000000..2680b2a4ec6d --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/method_delete.go @@ -0,0 +1,70 @@ +package brokerlistener + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DeleteOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// Delete ... +func (c BrokerListenerClient) Delete(ctx context.Context, id ListenerId) (result DeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusNoContent, + }, + HttpMethod: http.MethodDelete, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// DeleteThenPoll performs Delete then polls until it's completed +func (c BrokerListenerClient) DeleteThenPoll(ctx context.Context, id ListenerId) error { + result, err := c.Delete(ctx, id) + if err != nil { + return fmt.Errorf("performing Delete: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after Delete: %+v", err) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/method_get.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/method_get.go new file mode 100644 index 000000000000..50ca4819f716 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/method_get.go @@ -0,0 +1,53 @@ +package brokerlistener + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *BrokerListenerResource +} + +// Get ... +func (c BrokerListenerClient) Get(ctx context.Context, id ListenerId) (result GetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model BrokerListenerResource + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/method_listbyresourcegroup.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/method_listbyresourcegroup.go new file mode 100644 index 000000000000..8f0fc4afc214 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/method_listbyresourcegroup.go @@ -0,0 +1,105 @@ +package brokerlistener + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListByResourceGroupOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]BrokerListenerResource +} + +type ListByResourceGroupCompleteResult struct { + LatestHttpResponse *http.Response + Items []BrokerListenerResource +} + +type ListByResourceGroupCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ListByResourceGroupCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// ListByResourceGroup ... +func (c BrokerListenerClient) ListByResourceGroup(ctx context.Context, id BrokerId) (result ListByResourceGroupOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &ListByResourceGroupCustomPager{}, + Path: fmt.Sprintf("%s/listeners", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]BrokerListenerResource `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ListByResourceGroupComplete retrieves all the results into a single object +func (c BrokerListenerClient) ListByResourceGroupComplete(ctx context.Context, id BrokerId) (ListByResourceGroupCompleteResult, error) { + return c.ListByResourceGroupCompleteMatchingPredicate(ctx, id, BrokerListenerResourceOperationPredicate{}) +} + +// ListByResourceGroupCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c BrokerListenerClient) ListByResourceGroupCompleteMatchingPredicate(ctx context.Context, id BrokerId, predicate BrokerListenerResourceOperationPredicate) (result ListByResourceGroupCompleteResult, err error) { + items := make([]BrokerListenerResource, 0) + + resp, err := c.ListByResourceGroup(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ListByResourceGroupCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/model_brokerlistenerproperties.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/model_brokerlistenerproperties.go new file mode 100644 index 000000000000..3791620c70a3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/model_brokerlistenerproperties.go @@ -0,0 +1,11 @@ +package brokerlistener + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BrokerListenerProperties struct { + Ports []ListenerPort `json:"ports"` + ProvisioningState *ProvisioningState `json:"provisioningState,omitempty"` + ServiceName *string `json:"serviceName,omitempty"` + ServiceType *ServiceType `json:"serviceType,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/model_brokerlistenerresource.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/model_brokerlistenerresource.go new file mode 100644 index 000000000000..bc0a71fd3711 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/model_brokerlistenerresource.go @@ -0,0 +1,17 @@ +package brokerlistener + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BrokerListenerResource struct { + ExtendedLocation ExtendedLocation `json:"extendedLocation"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *BrokerListenerProperties `json:"properties,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/model_certmanagercertificatespec.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/model_certmanagercertificatespec.go new file mode 100644 index 000000000000..8cd97c4dc914 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/model_certmanagercertificatespec.go @@ -0,0 +1,13 @@ +package brokerlistener + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CertManagerCertificateSpec struct { + Duration *string `json:"duration,omitempty"` + IssuerRef CertManagerIssuerRef `json:"issuerRef"` + PrivateKey *CertManagerPrivateKey `json:"privateKey,omitempty"` + RenewBefore *string `json:"renewBefore,omitempty"` + San *SanForCert `json:"san,omitempty"` + SecretName *string `json:"secretName,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/model_certmanagerissuerref.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/model_certmanagerissuerref.go new file mode 100644 index 000000000000..b2915f0e160a --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/model_certmanagerissuerref.go @@ -0,0 +1,10 @@ +package brokerlistener + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CertManagerIssuerRef struct { + Group string `json:"group"` + Kind CertManagerIssuerKind `json:"kind"` + Name string `json:"name"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/model_certmanagerprivatekey.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/model_certmanagerprivatekey.go new file mode 100644 index 000000000000..49421674dc85 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/model_certmanagerprivatekey.go @@ -0,0 +1,9 @@ +package brokerlistener + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CertManagerPrivateKey struct { + Algorithm PrivateKeyAlgorithm `json:"algorithm"` + RotationPolicy PrivateKeyRotationPolicy `json:"rotationPolicy"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/model_extendedlocation.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/model_extendedlocation.go new file mode 100644 index 000000000000..d51c17d81ad2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/model_extendedlocation.go @@ -0,0 +1,9 @@ +package brokerlistener + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ExtendedLocation struct { + Name string `json:"name"` + Type ExtendedLocationType `json:"type"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/model_listenerport.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/model_listenerport.go new file mode 100644 index 000000000000..9d92b294717e --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/model_listenerport.go @@ -0,0 +1,13 @@ +package brokerlistener + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListenerPort struct { + AuthenticationRef *string `json:"authenticationRef,omitempty"` + AuthorizationRef *string `json:"authorizationRef,omitempty"` + NodePort *int64 `json:"nodePort,omitempty"` + Port int64 `json:"port"` + Protocol *BrokerProtocolType `json:"protocol,omitempty"` + Tls *TlsCertMethod `json:"tls,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/model_sanforcert.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/model_sanforcert.go new file mode 100644 index 000000000000..b44f57f6b4be --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/model_sanforcert.go @@ -0,0 +1,9 @@ +package brokerlistener + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SanForCert struct { + Dns []string `json:"dns"` + IP []string `json:"ip"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/model_tlscertmethod.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/model_tlscertmethod.go new file mode 100644 index 000000000000..587b4f52f22e --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/model_tlscertmethod.go @@ -0,0 +1,10 @@ +package brokerlistener + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type TlsCertMethod struct { + CertManagerCertificateSpec *CertManagerCertificateSpec `json:"certManagerCertificateSpec,omitempty"` + Manual *X509ManualCertificate `json:"manual,omitempty"` + Mode TlsCertMethodMode `json:"mode"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/model_x509manualcertificate.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/model_x509manualcertificate.go new file mode 100644 index 000000000000..95c614eb8552 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/model_x509manualcertificate.go @@ -0,0 +1,8 @@ +package brokerlistener + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type X509ManualCertificate struct { + SecretRef string `json:"secretRef"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/predicates.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/predicates.go new file mode 100644 index 000000000000..7686d443c421 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/predicates.go @@ -0,0 +1,27 @@ +package brokerlistener + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BrokerListenerResourceOperationPredicate struct { + Id *string + Name *string + Type *string +} + +func (p BrokerListenerResourceOperationPredicate) Matches(input BrokerListenerResource) bool { + + if p.Id != nil && (input.Id == nil || *p.Id != *input.Id) { + return false + } + + if p.Name != nil && (input.Name == nil || *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil || *p.Type != *input.Type) { + return false + } + + return true +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/version.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/version.go new file mode 100644 index 000000000000..078fb544189a --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener/version.go @@ -0,0 +1,10 @@ +package brokerlistener + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2024-11-01" + +func userAgent() string { + return "hashicorp/go-azure-sdk/brokerlistener/2024-11-01" +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/README.md b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/README.md new file mode 100644 index 000000000000..89a3d6d6118f --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/README.md @@ -0,0 +1,82 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow` Documentation + +The `dataflow` SDK allows for interaction with Azure Resource Manager `iotoperations` (API Version `2024-11-01`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow" +``` + + +### Client Initialization + +```go +client := dataflow.NewDataflowClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `DataflowClient.CreateOrUpdate` + +```go +ctx := context.TODO() +id := dataflow.NewDataflowID("12345678-1234-9876-4563-123456789012", "example-resource-group", "instanceName", "dataflowProfileName", "dataflowName") + +payload := dataflow.DataflowResource{ + // ... +} + + +if err := client.CreateOrUpdateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `DataflowClient.Delete` + +```go +ctx := context.TODO() +id := dataflow.NewDataflowID("12345678-1234-9876-4563-123456789012", "example-resource-group", "instanceName", "dataflowProfileName", "dataflowName") + +if err := client.DeleteThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `DataflowClient.Get` + +```go +ctx := context.TODO() +id := dataflow.NewDataflowID("12345678-1234-9876-4563-123456789012", "example-resource-group", "instanceName", "dataflowProfileName", "dataflowName") + +read, err := client.Get(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `DataflowClient.ListByProfileResource` + +```go +ctx := context.TODO() +id := dataflow.NewDataflowProfileID("12345678-1234-9876-4563-123456789012", "example-resource-group", "instanceName", "dataflowProfileName") + +// alternatively `client.ListByProfileResource(ctx, id)` can be used to do batched pagination +items, err := client.ListByProfileResourceComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/client.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/client.go new file mode 100644 index 000000000000..3df6f0aa3d70 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/client.go @@ -0,0 +1,26 @@ +package dataflow + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataflowClient struct { + Client *resourcemanager.Client +} + +func NewDataflowClientWithBaseURI(sdkApi sdkEnv.Api) (*DataflowClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "dataflow", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating DataflowClient: %+v", err) + } + + return &DataflowClient{ + Client: client, + }, nil +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/constants.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/constants.go new file mode 100644 index 000000000000..322bdb8e315f --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/constants.go @@ -0,0 +1,359 @@ +package dataflow + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataflowMappingType string + +const ( + DataflowMappingTypeBuiltInFunction DataflowMappingType = "BuiltInFunction" + DataflowMappingTypeCompute DataflowMappingType = "Compute" + DataflowMappingTypeNewProperties DataflowMappingType = "NewProperties" + DataflowMappingTypePassThrough DataflowMappingType = "PassThrough" + DataflowMappingTypeRename DataflowMappingType = "Rename" +) + +func PossibleValuesForDataflowMappingType() []string { + return []string{ + string(DataflowMappingTypeBuiltInFunction), + string(DataflowMappingTypeCompute), + string(DataflowMappingTypeNewProperties), + string(DataflowMappingTypePassThrough), + string(DataflowMappingTypeRename), + } +} + +func (s *DataflowMappingType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDataflowMappingType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDataflowMappingType(input string) (*DataflowMappingType, error) { + vals := map[string]DataflowMappingType{ + "builtinfunction": DataflowMappingTypeBuiltInFunction, + "compute": DataflowMappingTypeCompute, + "newproperties": DataflowMappingTypeNewProperties, + "passthrough": DataflowMappingTypePassThrough, + "rename": DataflowMappingTypeRename, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DataflowMappingType(input) + return &out, nil +} + +type ExtendedLocationType string + +const ( + ExtendedLocationTypeCustomLocation ExtendedLocationType = "CustomLocation" +) + +func PossibleValuesForExtendedLocationType() []string { + return []string{ + string(ExtendedLocationTypeCustomLocation), + } +} + +func (s *ExtendedLocationType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseExtendedLocationType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseExtendedLocationType(input string) (*ExtendedLocationType, error) { + vals := map[string]ExtendedLocationType{ + "customlocation": ExtendedLocationTypeCustomLocation, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ExtendedLocationType(input) + return &out, nil +} + +type FilterType string + +const ( + FilterTypeFilter FilterType = "Filter" +) + +func PossibleValuesForFilterType() []string { + return []string{ + string(FilterTypeFilter), + } +} + +func (s *FilterType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseFilterType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseFilterType(input string) (*FilterType, error) { + vals := map[string]FilterType{ + "filter": FilterTypeFilter, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := FilterType(input) + return &out, nil +} + +type OperationType string + +const ( + OperationTypeBuiltInTransformation OperationType = "BuiltInTransformation" + OperationTypeDestination OperationType = "Destination" + OperationTypeSource OperationType = "Source" +) + +func PossibleValuesForOperationType() []string { + return []string{ + string(OperationTypeBuiltInTransformation), + string(OperationTypeDestination), + string(OperationTypeSource), + } +} + +func (s *OperationType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseOperationType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseOperationType(input string) (*OperationType, error) { + vals := map[string]OperationType{ + "builtintransformation": OperationTypeBuiltInTransformation, + "destination": OperationTypeDestination, + "source": OperationTypeSource, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := OperationType(input) + return &out, nil +} + +type OperationalMode string + +const ( + OperationalModeDisabled OperationalMode = "Disabled" + OperationalModeEnabled OperationalMode = "Enabled" +) + +func PossibleValuesForOperationalMode() []string { + return []string{ + string(OperationalModeDisabled), + string(OperationalModeEnabled), + } +} + +func (s *OperationalMode) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseOperationalMode(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseOperationalMode(input string) (*OperationalMode, error) { + vals := map[string]OperationalMode{ + "disabled": OperationalModeDisabled, + "enabled": OperationalModeEnabled, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := OperationalMode(input) + return &out, nil +} + +type ProvisioningState string + +const ( + ProvisioningStateAccepted ProvisioningState = "Accepted" + ProvisioningStateCanceled ProvisioningState = "Canceled" + ProvisioningStateDeleting ProvisioningState = "Deleting" + ProvisioningStateFailed ProvisioningState = "Failed" + ProvisioningStateProvisioning ProvisioningState = "Provisioning" + ProvisioningStateSucceeded ProvisioningState = "Succeeded" + ProvisioningStateUpdating ProvisioningState = "Updating" +) + +func PossibleValuesForProvisioningState() []string { + return []string{ + string(ProvisioningStateAccepted), + string(ProvisioningStateCanceled), + string(ProvisioningStateDeleting), + string(ProvisioningStateFailed), + string(ProvisioningStateProvisioning), + string(ProvisioningStateSucceeded), + string(ProvisioningStateUpdating), + } +} + +func (s *ProvisioningState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseProvisioningState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseProvisioningState(input string) (*ProvisioningState, error) { + vals := map[string]ProvisioningState{ + "accepted": ProvisioningStateAccepted, + "canceled": ProvisioningStateCanceled, + "deleting": ProvisioningStateDeleting, + "failed": ProvisioningStateFailed, + "provisioning": ProvisioningStateProvisioning, + "succeeded": ProvisioningStateSucceeded, + "updating": ProvisioningStateUpdating, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ProvisioningState(input) + return &out, nil +} + +type SourceSerializationFormat string + +const ( + SourceSerializationFormatJson SourceSerializationFormat = "Json" +) + +func PossibleValuesForSourceSerializationFormat() []string { + return []string{ + string(SourceSerializationFormatJson), + } +} + +func (s *SourceSerializationFormat) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSourceSerializationFormat(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSourceSerializationFormat(input string) (*SourceSerializationFormat, error) { + vals := map[string]SourceSerializationFormat{ + "json": SourceSerializationFormatJson, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SourceSerializationFormat(input) + return &out, nil +} + +type TransformationSerializationFormat string + +const ( + TransformationSerializationFormatDelta TransformationSerializationFormat = "Delta" + TransformationSerializationFormatJson TransformationSerializationFormat = "Json" + TransformationSerializationFormatParquet TransformationSerializationFormat = "Parquet" +) + +func PossibleValuesForTransformationSerializationFormat() []string { + return []string{ + string(TransformationSerializationFormatDelta), + string(TransformationSerializationFormatJson), + string(TransformationSerializationFormatParquet), + } +} + +func (s *TransformationSerializationFormat) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseTransformationSerializationFormat(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseTransformationSerializationFormat(input string) (*TransformationSerializationFormat, error) { + vals := map[string]TransformationSerializationFormat{ + "delta": TransformationSerializationFormatDelta, + "json": TransformationSerializationFormatJson, + "parquet": TransformationSerializationFormatParquet, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := TransformationSerializationFormat(input) + return &out, nil +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/id_dataflow.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/id_dataflow.go new file mode 100644 index 000000000000..4330978de22e --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/id_dataflow.go @@ -0,0 +1,148 @@ +package dataflow + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&DataflowId{}) +} + +var _ resourceids.ResourceId = &DataflowId{} + +// DataflowId is a struct representing the Resource ID for a Dataflow +type DataflowId struct { + SubscriptionId string + ResourceGroupName string + InstanceName string + DataflowProfileName string + DataflowName string +} + +// NewDataflowID returns a new DataflowId struct +func NewDataflowID(subscriptionId string, resourceGroupName string, instanceName string, dataflowProfileName string, dataflowName string) DataflowId { + return DataflowId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + InstanceName: instanceName, + DataflowProfileName: dataflowProfileName, + DataflowName: dataflowName, + } +} + +// ParseDataflowID parses 'input' into a DataflowId +func ParseDataflowID(input string) (*DataflowId, error) { + parser := resourceids.NewParserFromResourceIdType(&DataflowId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := DataflowId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseDataflowIDInsensitively parses 'input' case-insensitively into a DataflowId +// note: this method should only be used for API response data and not user input +func ParseDataflowIDInsensitively(input string) (*DataflowId, error) { + parser := resourceids.NewParserFromResourceIdType(&DataflowId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := DataflowId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *DataflowId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.InstanceName, ok = input.Parsed["instanceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "instanceName", input) + } + + if id.DataflowProfileName, ok = input.Parsed["dataflowProfileName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "dataflowProfileName", input) + } + + if id.DataflowName, ok = input.Parsed["dataflowName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "dataflowName", input) + } + + return nil +} + +// ValidateDataflowID checks that 'input' can be parsed as a Dataflow ID +func ValidateDataflowID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseDataflowID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Dataflow ID +func (id DataflowId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.IoTOperations/instances/%s/dataflowProfiles/%s/dataflows/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.InstanceName, id.DataflowProfileName, id.DataflowName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Dataflow ID +func (id DataflowId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftIoTOperations", "Microsoft.IoTOperations", "Microsoft.IoTOperations"), + resourceids.StaticSegment("staticInstances", "instances", "instances"), + resourceids.UserSpecifiedSegment("instanceName", "instanceName"), + resourceids.StaticSegment("staticDataflowProfiles", "dataflowProfiles", "dataflowProfiles"), + resourceids.UserSpecifiedSegment("dataflowProfileName", "dataflowProfileName"), + resourceids.StaticSegment("staticDataflows", "dataflows", "dataflows"), + resourceids.UserSpecifiedSegment("dataflowName", "dataflowName"), + } +} + +// String returns a human-readable description of this Dataflow ID +func (id DataflowId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Instance Name: %q", id.InstanceName), + fmt.Sprintf("Dataflow Profile Name: %q", id.DataflowProfileName), + fmt.Sprintf("Dataflow Name: %q", id.DataflowName), + } + return fmt.Sprintf("Dataflow (%s)", strings.Join(components, "\n")) +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/id_dataflowprofile.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/id_dataflowprofile.go new file mode 100644 index 000000000000..d1ca6ed1ea59 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/id_dataflowprofile.go @@ -0,0 +1,139 @@ +package dataflow + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&DataflowProfileId{}) +} + +var _ resourceids.ResourceId = &DataflowProfileId{} + +// DataflowProfileId is a struct representing the Resource ID for a Dataflow Profile +type DataflowProfileId struct { + SubscriptionId string + ResourceGroupName string + InstanceName string + DataflowProfileName string +} + +// NewDataflowProfileID returns a new DataflowProfileId struct +func NewDataflowProfileID(subscriptionId string, resourceGroupName string, instanceName string, dataflowProfileName string) DataflowProfileId { + return DataflowProfileId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + InstanceName: instanceName, + DataflowProfileName: dataflowProfileName, + } +} + +// ParseDataflowProfileID parses 'input' into a DataflowProfileId +func ParseDataflowProfileID(input string) (*DataflowProfileId, error) { + parser := resourceids.NewParserFromResourceIdType(&DataflowProfileId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := DataflowProfileId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseDataflowProfileIDInsensitively parses 'input' case-insensitively into a DataflowProfileId +// note: this method should only be used for API response data and not user input +func ParseDataflowProfileIDInsensitively(input string) (*DataflowProfileId, error) { + parser := resourceids.NewParserFromResourceIdType(&DataflowProfileId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := DataflowProfileId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *DataflowProfileId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.InstanceName, ok = input.Parsed["instanceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "instanceName", input) + } + + if id.DataflowProfileName, ok = input.Parsed["dataflowProfileName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "dataflowProfileName", input) + } + + return nil +} + +// ValidateDataflowProfileID checks that 'input' can be parsed as a Dataflow Profile ID +func ValidateDataflowProfileID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseDataflowProfileID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Dataflow Profile ID +func (id DataflowProfileId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.IoTOperations/instances/%s/dataflowProfiles/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.InstanceName, id.DataflowProfileName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Dataflow Profile ID +func (id DataflowProfileId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftIoTOperations", "Microsoft.IoTOperations", "Microsoft.IoTOperations"), + resourceids.StaticSegment("staticInstances", "instances", "instances"), + resourceids.UserSpecifiedSegment("instanceName", "instanceName"), + resourceids.StaticSegment("staticDataflowProfiles", "dataflowProfiles", "dataflowProfiles"), + resourceids.UserSpecifiedSegment("dataflowProfileName", "dataflowProfileName"), + } +} + +// String returns a human-readable description of this Dataflow Profile ID +func (id DataflowProfileId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Instance Name: %q", id.InstanceName), + fmt.Sprintf("Dataflow Profile Name: %q", id.DataflowProfileName), + } + return fmt.Sprintf("Dataflow Profile (%s)", strings.Join(components, "\n")) +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/method_createorupdate.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/method_createorupdate.go new file mode 100644 index 000000000000..6384339e05c4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/method_createorupdate.go @@ -0,0 +1,75 @@ +package dataflow + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CreateOrUpdateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *DataflowResource +} + +// CreateOrUpdate ... +func (c DataflowClient) CreateOrUpdate(ctx context.Context, id DataflowId, input DataflowResource) (result CreateOrUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPut, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// CreateOrUpdateThenPoll performs CreateOrUpdate then polls until it's completed +func (c DataflowClient) CreateOrUpdateThenPoll(ctx context.Context, id DataflowId, input DataflowResource) error { + result, err := c.CreateOrUpdate(ctx, id, input) + if err != nil { + return fmt.Errorf("performing CreateOrUpdate: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after CreateOrUpdate: %+v", err) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/method_delete.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/method_delete.go new file mode 100644 index 000000000000..52e31993717d --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/method_delete.go @@ -0,0 +1,70 @@ +package dataflow + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DeleteOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// Delete ... +func (c DataflowClient) Delete(ctx context.Context, id DataflowId) (result DeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusNoContent, + }, + HttpMethod: http.MethodDelete, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// DeleteThenPoll performs Delete then polls until it's completed +func (c DataflowClient) DeleteThenPoll(ctx context.Context, id DataflowId) error { + result, err := c.Delete(ctx, id) + if err != nil { + return fmt.Errorf("performing Delete: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after Delete: %+v", err) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/method_get.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/method_get.go new file mode 100644 index 000000000000..0e1a19308b1c --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/method_get.go @@ -0,0 +1,53 @@ +package dataflow + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *DataflowResource +} + +// Get ... +func (c DataflowClient) Get(ctx context.Context, id DataflowId) (result GetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model DataflowResource + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/method_listbyprofileresource.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/method_listbyprofileresource.go new file mode 100644 index 000000000000..7daddedf109b --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/method_listbyprofileresource.go @@ -0,0 +1,105 @@ +package dataflow + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListByProfileResourceOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]DataflowResource +} + +type ListByProfileResourceCompleteResult struct { + LatestHttpResponse *http.Response + Items []DataflowResource +} + +type ListByProfileResourceCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ListByProfileResourceCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// ListByProfileResource ... +func (c DataflowClient) ListByProfileResource(ctx context.Context, id DataflowProfileId) (result ListByProfileResourceOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &ListByProfileResourceCustomPager{}, + Path: fmt.Sprintf("%s/dataflows", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]DataflowResource `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ListByProfileResourceComplete retrieves all the results into a single object +func (c DataflowClient) ListByProfileResourceComplete(ctx context.Context, id DataflowProfileId) (ListByProfileResourceCompleteResult, error) { + return c.ListByProfileResourceCompleteMatchingPredicate(ctx, id, DataflowResourceOperationPredicate{}) +} + +// ListByProfileResourceCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c DataflowClient) ListByProfileResourceCompleteMatchingPredicate(ctx context.Context, id DataflowProfileId, predicate DataflowResourceOperationPredicate) (result ListByProfileResourceCompleteResult, err error) { + items := make([]DataflowResource, 0) + + resp, err := c.ListByProfileResource(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ListByProfileResourceCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/model_dataflowbuiltintransformationdataset.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/model_dataflowbuiltintransformationdataset.go new file mode 100644 index 000000000000..399fd7967ae8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/model_dataflowbuiltintransformationdataset.go @@ -0,0 +1,12 @@ +package dataflow + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataflowBuiltInTransformationDataset struct { + Description *string `json:"description,omitempty"` + Expression *string `json:"expression,omitempty"` + Inputs []string `json:"inputs"` + Key string `json:"key"` + SchemaRef *string `json:"schemaRef,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/model_dataflowbuiltintransformationfilter.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/model_dataflowbuiltintransformationfilter.go new file mode 100644 index 000000000000..a0b1d275b9cd --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/model_dataflowbuiltintransformationfilter.go @@ -0,0 +1,11 @@ +package dataflow + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataflowBuiltInTransformationFilter struct { + Description *string `json:"description,omitempty"` + Expression string `json:"expression"` + Inputs []string `json:"inputs"` + Type *FilterType `json:"type,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/model_dataflowbuiltintransformationmap.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/model_dataflowbuiltintransformationmap.go new file mode 100644 index 000000000000..f20fae7c495a --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/model_dataflowbuiltintransformationmap.go @@ -0,0 +1,12 @@ +package dataflow + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataflowBuiltInTransformationMap struct { + Description *string `json:"description,omitempty"` + Expression *string `json:"expression,omitempty"` + Inputs []string `json:"inputs"` + Output string `json:"output"` + Type *DataflowMappingType `json:"type,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/model_dataflowbuiltintransformationsettings.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/model_dataflowbuiltintransformationsettings.go new file mode 100644 index 000000000000..4fbb0c986ffe --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/model_dataflowbuiltintransformationsettings.go @@ -0,0 +1,12 @@ +package dataflow + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataflowBuiltInTransformationSettings struct { + Datasets *[]DataflowBuiltInTransformationDataset `json:"datasets,omitempty"` + Filter *[]DataflowBuiltInTransformationFilter `json:"filter,omitempty"` + Map *[]DataflowBuiltInTransformationMap `json:"map,omitempty"` + SchemaRef *string `json:"schemaRef,omitempty"` + SerializationFormat *TransformationSerializationFormat `json:"serializationFormat,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/model_dataflowdestinationoperationsettings.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/model_dataflowdestinationoperationsettings.go new file mode 100644 index 000000000000..a2c6697ce795 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/model_dataflowdestinationoperationsettings.go @@ -0,0 +1,9 @@ +package dataflow + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataflowDestinationOperationSettings struct { + DataDestination string `json:"dataDestination"` + EndpointRef string `json:"endpointRef"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/model_dataflowoperation.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/model_dataflowoperation.go new file mode 100644 index 000000000000..d3e4d06dbd24 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/model_dataflowoperation.go @@ -0,0 +1,12 @@ +package dataflow + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataflowOperation struct { + BuiltInTransformationSettings *DataflowBuiltInTransformationSettings `json:"builtInTransformationSettings,omitempty"` + DestinationSettings *DataflowDestinationOperationSettings `json:"destinationSettings,omitempty"` + Name *string `json:"name,omitempty"` + OperationType OperationType `json:"operationType"` + SourceSettings *DataflowSourceOperationSettings `json:"sourceSettings,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/model_dataflowproperties.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/model_dataflowproperties.go new file mode 100644 index 000000000000..f6b4844f0bbd --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/model_dataflowproperties.go @@ -0,0 +1,10 @@ +package dataflow + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataflowProperties struct { + Mode *OperationalMode `json:"mode,omitempty"` + Operations []DataflowOperation `json:"operations"` + ProvisioningState *ProvisioningState `json:"provisioningState,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/model_dataflowresource.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/model_dataflowresource.go new file mode 100644 index 000000000000..e200aa5852a0 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/model_dataflowresource.go @@ -0,0 +1,17 @@ +package dataflow + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataflowResource struct { + ExtendedLocation ExtendedLocation `json:"extendedLocation"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *DataflowProperties `json:"properties,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/model_dataflowsourceoperationsettings.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/model_dataflowsourceoperationsettings.go new file mode 100644 index 000000000000..b1d5ee254679 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/model_dataflowsourceoperationsettings.go @@ -0,0 +1,12 @@ +package dataflow + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataflowSourceOperationSettings struct { + AssetRef *string `json:"assetRef,omitempty"` + DataSources []string `json:"dataSources"` + EndpointRef string `json:"endpointRef"` + SchemaRef *string `json:"schemaRef,omitempty"` + SerializationFormat *SourceSerializationFormat `json:"serializationFormat,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/model_extendedlocation.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/model_extendedlocation.go new file mode 100644 index 000000000000..f269f27be83c --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/model_extendedlocation.go @@ -0,0 +1,9 @@ +package dataflow + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ExtendedLocation struct { + Name string `json:"name"` + Type ExtendedLocationType `json:"type"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/predicates.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/predicates.go new file mode 100644 index 000000000000..3b06a38c088c --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/predicates.go @@ -0,0 +1,27 @@ +package dataflow + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataflowResourceOperationPredicate struct { + Id *string + Name *string + Type *string +} + +func (p DataflowResourceOperationPredicate) Matches(input DataflowResource) bool { + + if p.Id != nil && (input.Id == nil || *p.Id != *input.Id) { + return false + } + + if p.Name != nil && (input.Name == nil || *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil || *p.Type != *input.Type) { + return false + } + + return true +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/version.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/version.go new file mode 100644 index 000000000000..fb50c2b81bbd --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow/version.go @@ -0,0 +1,10 @@ +package dataflow + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2024-11-01" + +func userAgent() string { + return "hashicorp/go-azure-sdk/dataflow/2024-11-01" +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/README.md b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/README.md new file mode 100644 index 000000000000..bbbd524eb9ad --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/README.md @@ -0,0 +1,82 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint` Documentation + +The `dataflowendpoint` SDK allows for interaction with Azure Resource Manager `iotoperations` (API Version `2024-11-01`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint" +``` + + +### Client Initialization + +```go +client := dataflowendpoint.NewDataflowEndpointClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `DataflowEndpointClient.CreateOrUpdate` + +```go +ctx := context.TODO() +id := dataflowendpoint.NewDataflowEndpointID("12345678-1234-9876-4563-123456789012", "example-resource-group", "instanceName", "dataflowEndpointName") + +payload := dataflowendpoint.DataflowEndpointResource{ + // ... +} + + +if err := client.CreateOrUpdateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `DataflowEndpointClient.Delete` + +```go +ctx := context.TODO() +id := dataflowendpoint.NewDataflowEndpointID("12345678-1234-9876-4563-123456789012", "example-resource-group", "instanceName", "dataflowEndpointName") + +if err := client.DeleteThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `DataflowEndpointClient.Get` + +```go +ctx := context.TODO() +id := dataflowendpoint.NewDataflowEndpointID("12345678-1234-9876-4563-123456789012", "example-resource-group", "instanceName", "dataflowEndpointName") + +read, err := client.Get(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `DataflowEndpointClient.ListByResourceGroup` + +```go +ctx := context.TODO() +id := dataflowendpoint.NewInstanceID("12345678-1234-9876-4563-123456789012", "example-resource-group", "instanceName") + +// alternatively `client.ListByResourceGroup(ctx, id)` can be used to do batched pagination +items, err := client.ListByResourceGroupComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/client.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/client.go new file mode 100644 index 000000000000..2f129912e382 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/client.go @@ -0,0 +1,26 @@ +package dataflowendpoint + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataflowEndpointClient struct { + Client *resourcemanager.Client +} + +func NewDataflowEndpointClientWithBaseURI(sdkApi sdkEnv.Api) (*DataflowEndpointClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "dataflowendpoint", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating DataflowEndpointClient: %+v", err) + } + + return &DataflowEndpointClient{ + Client: client, + }, nil +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/constants.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/constants.go new file mode 100644 index 000000000000..2c10f076e664 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/constants.go @@ -0,0 +1,729 @@ +package dataflowendpoint + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BrokerProtocolType string + +const ( + BrokerProtocolTypeMqtt BrokerProtocolType = "Mqtt" + BrokerProtocolTypeWebSockets BrokerProtocolType = "WebSockets" +) + +func PossibleValuesForBrokerProtocolType() []string { + return []string{ + string(BrokerProtocolTypeMqtt), + string(BrokerProtocolTypeWebSockets), + } +} + +func (s *BrokerProtocolType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseBrokerProtocolType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseBrokerProtocolType(input string) (*BrokerProtocolType, error) { + vals := map[string]BrokerProtocolType{ + "mqtt": BrokerProtocolTypeMqtt, + "websockets": BrokerProtocolTypeWebSockets, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := BrokerProtocolType(input) + return &out, nil +} + +type CloudEventAttributeType string + +const ( + CloudEventAttributeTypeCreateOrRemap CloudEventAttributeType = "CreateOrRemap" + CloudEventAttributeTypePropagate CloudEventAttributeType = "Propagate" +) + +func PossibleValuesForCloudEventAttributeType() []string { + return []string{ + string(CloudEventAttributeTypeCreateOrRemap), + string(CloudEventAttributeTypePropagate), + } +} + +func (s *CloudEventAttributeType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseCloudEventAttributeType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseCloudEventAttributeType(input string) (*CloudEventAttributeType, error) { + vals := map[string]CloudEventAttributeType{ + "createorremap": CloudEventAttributeTypeCreateOrRemap, + "propagate": CloudEventAttributeTypePropagate, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := CloudEventAttributeType(input) + return &out, nil +} + +type DataLakeStorageAuthMethod string + +const ( + DataLakeStorageAuthMethodAccessToken DataLakeStorageAuthMethod = "AccessToken" + DataLakeStorageAuthMethodSystemAssignedManagedIdentity DataLakeStorageAuthMethod = "SystemAssignedManagedIdentity" + DataLakeStorageAuthMethodUserAssignedManagedIdentity DataLakeStorageAuthMethod = "UserAssignedManagedIdentity" +) + +func PossibleValuesForDataLakeStorageAuthMethod() []string { + return []string{ + string(DataLakeStorageAuthMethodAccessToken), + string(DataLakeStorageAuthMethodSystemAssignedManagedIdentity), + string(DataLakeStorageAuthMethodUserAssignedManagedIdentity), + } +} + +func (s *DataLakeStorageAuthMethod) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDataLakeStorageAuthMethod(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDataLakeStorageAuthMethod(input string) (*DataLakeStorageAuthMethod, error) { + vals := map[string]DataLakeStorageAuthMethod{ + "accesstoken": DataLakeStorageAuthMethodAccessToken, + "systemassignedmanagedidentity": DataLakeStorageAuthMethodSystemAssignedManagedIdentity, + "userassignedmanagedidentity": DataLakeStorageAuthMethodUserAssignedManagedIdentity, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DataLakeStorageAuthMethod(input) + return &out, nil +} + +type DataflowEndpointAuthenticationSaslType string + +const ( + DataflowEndpointAuthenticationSaslTypePlain DataflowEndpointAuthenticationSaslType = "Plain" + DataflowEndpointAuthenticationSaslTypeScramShaFiveOneTwo DataflowEndpointAuthenticationSaslType = "ScramSha512" + DataflowEndpointAuthenticationSaslTypeScramShaTwoFiveSix DataflowEndpointAuthenticationSaslType = "ScramSha256" +) + +func PossibleValuesForDataflowEndpointAuthenticationSaslType() []string { + return []string{ + string(DataflowEndpointAuthenticationSaslTypePlain), + string(DataflowEndpointAuthenticationSaslTypeScramShaFiveOneTwo), + string(DataflowEndpointAuthenticationSaslTypeScramShaTwoFiveSix), + } +} + +func (s *DataflowEndpointAuthenticationSaslType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDataflowEndpointAuthenticationSaslType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDataflowEndpointAuthenticationSaslType(input string) (*DataflowEndpointAuthenticationSaslType, error) { + vals := map[string]DataflowEndpointAuthenticationSaslType{ + "plain": DataflowEndpointAuthenticationSaslTypePlain, + "scramsha512": DataflowEndpointAuthenticationSaslTypeScramShaFiveOneTwo, + "scramsha256": DataflowEndpointAuthenticationSaslTypeScramShaTwoFiveSix, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DataflowEndpointAuthenticationSaslType(input) + return &out, nil +} + +type DataflowEndpointFabricPathType string + +const ( + DataflowEndpointFabricPathTypeFiles DataflowEndpointFabricPathType = "Files" + DataflowEndpointFabricPathTypeTables DataflowEndpointFabricPathType = "Tables" +) + +func PossibleValuesForDataflowEndpointFabricPathType() []string { + return []string{ + string(DataflowEndpointFabricPathTypeFiles), + string(DataflowEndpointFabricPathTypeTables), + } +} + +func (s *DataflowEndpointFabricPathType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDataflowEndpointFabricPathType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDataflowEndpointFabricPathType(input string) (*DataflowEndpointFabricPathType, error) { + vals := map[string]DataflowEndpointFabricPathType{ + "files": DataflowEndpointFabricPathTypeFiles, + "tables": DataflowEndpointFabricPathTypeTables, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DataflowEndpointFabricPathType(input) + return &out, nil +} + +type DataflowEndpointKafkaAcks string + +const ( + DataflowEndpointKafkaAcksAll DataflowEndpointKafkaAcks = "All" + DataflowEndpointKafkaAcksOne DataflowEndpointKafkaAcks = "One" + DataflowEndpointKafkaAcksZero DataflowEndpointKafkaAcks = "Zero" +) + +func PossibleValuesForDataflowEndpointKafkaAcks() []string { + return []string{ + string(DataflowEndpointKafkaAcksAll), + string(DataflowEndpointKafkaAcksOne), + string(DataflowEndpointKafkaAcksZero), + } +} + +func (s *DataflowEndpointKafkaAcks) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDataflowEndpointKafkaAcks(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDataflowEndpointKafkaAcks(input string) (*DataflowEndpointKafkaAcks, error) { + vals := map[string]DataflowEndpointKafkaAcks{ + "all": DataflowEndpointKafkaAcksAll, + "one": DataflowEndpointKafkaAcksOne, + "zero": DataflowEndpointKafkaAcksZero, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DataflowEndpointKafkaAcks(input) + return &out, nil +} + +type DataflowEndpointKafkaCompression string + +const ( + DataflowEndpointKafkaCompressionGzip DataflowEndpointKafkaCompression = "Gzip" + DataflowEndpointKafkaCompressionLzFour DataflowEndpointKafkaCompression = "Lz4" + DataflowEndpointKafkaCompressionNone DataflowEndpointKafkaCompression = "None" + DataflowEndpointKafkaCompressionSnappy DataflowEndpointKafkaCompression = "Snappy" +) + +func PossibleValuesForDataflowEndpointKafkaCompression() []string { + return []string{ + string(DataflowEndpointKafkaCompressionGzip), + string(DataflowEndpointKafkaCompressionLzFour), + string(DataflowEndpointKafkaCompressionNone), + string(DataflowEndpointKafkaCompressionSnappy), + } +} + +func (s *DataflowEndpointKafkaCompression) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDataflowEndpointKafkaCompression(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDataflowEndpointKafkaCompression(input string) (*DataflowEndpointKafkaCompression, error) { + vals := map[string]DataflowEndpointKafkaCompression{ + "gzip": DataflowEndpointKafkaCompressionGzip, + "lz4": DataflowEndpointKafkaCompressionLzFour, + "none": DataflowEndpointKafkaCompressionNone, + "snappy": DataflowEndpointKafkaCompressionSnappy, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DataflowEndpointKafkaCompression(input) + return &out, nil +} + +type DataflowEndpointKafkaPartitionStrategy string + +const ( + DataflowEndpointKafkaPartitionStrategyDefault DataflowEndpointKafkaPartitionStrategy = "Default" + DataflowEndpointKafkaPartitionStrategyProperty DataflowEndpointKafkaPartitionStrategy = "Property" + DataflowEndpointKafkaPartitionStrategyStatic DataflowEndpointKafkaPartitionStrategy = "Static" + DataflowEndpointKafkaPartitionStrategyTopic DataflowEndpointKafkaPartitionStrategy = "Topic" +) + +func PossibleValuesForDataflowEndpointKafkaPartitionStrategy() []string { + return []string{ + string(DataflowEndpointKafkaPartitionStrategyDefault), + string(DataflowEndpointKafkaPartitionStrategyProperty), + string(DataflowEndpointKafkaPartitionStrategyStatic), + string(DataflowEndpointKafkaPartitionStrategyTopic), + } +} + +func (s *DataflowEndpointKafkaPartitionStrategy) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDataflowEndpointKafkaPartitionStrategy(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDataflowEndpointKafkaPartitionStrategy(input string) (*DataflowEndpointKafkaPartitionStrategy, error) { + vals := map[string]DataflowEndpointKafkaPartitionStrategy{ + "default": DataflowEndpointKafkaPartitionStrategyDefault, + "property": DataflowEndpointKafkaPartitionStrategyProperty, + "static": DataflowEndpointKafkaPartitionStrategyStatic, + "topic": DataflowEndpointKafkaPartitionStrategyTopic, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DataflowEndpointKafkaPartitionStrategy(input) + return &out, nil +} + +type EndpointType string + +const ( + EndpointTypeDataExplorer EndpointType = "DataExplorer" + EndpointTypeDataLakeStorage EndpointType = "DataLakeStorage" + EndpointTypeFabricOneLake EndpointType = "FabricOneLake" + EndpointTypeKafka EndpointType = "Kafka" + EndpointTypeLocalStorage EndpointType = "LocalStorage" + EndpointTypeMqtt EndpointType = "Mqtt" +) + +func PossibleValuesForEndpointType() []string { + return []string{ + string(EndpointTypeDataExplorer), + string(EndpointTypeDataLakeStorage), + string(EndpointTypeFabricOneLake), + string(EndpointTypeKafka), + string(EndpointTypeLocalStorage), + string(EndpointTypeMqtt), + } +} + +func (s *EndpointType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseEndpointType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseEndpointType(input string) (*EndpointType, error) { + vals := map[string]EndpointType{ + "dataexplorer": EndpointTypeDataExplorer, + "datalakestorage": EndpointTypeDataLakeStorage, + "fabriconelake": EndpointTypeFabricOneLake, + "kafka": EndpointTypeKafka, + "localstorage": EndpointTypeLocalStorage, + "mqtt": EndpointTypeMqtt, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := EndpointType(input) + return &out, nil +} + +type ExtendedLocationType string + +const ( + ExtendedLocationTypeCustomLocation ExtendedLocationType = "CustomLocation" +) + +func PossibleValuesForExtendedLocationType() []string { + return []string{ + string(ExtendedLocationTypeCustomLocation), + } +} + +func (s *ExtendedLocationType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseExtendedLocationType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseExtendedLocationType(input string) (*ExtendedLocationType, error) { + vals := map[string]ExtendedLocationType{ + "customlocation": ExtendedLocationTypeCustomLocation, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ExtendedLocationType(input) + return &out, nil +} + +type KafkaAuthMethod string + +const ( + KafkaAuthMethodAnonymous KafkaAuthMethod = "Anonymous" + KafkaAuthMethodSasl KafkaAuthMethod = "Sasl" + KafkaAuthMethodSystemAssignedManagedIdentity KafkaAuthMethod = "SystemAssignedManagedIdentity" + KafkaAuthMethodUserAssignedManagedIdentity KafkaAuthMethod = "UserAssignedManagedIdentity" + KafkaAuthMethodXFiveZeroNineCertificate KafkaAuthMethod = "X509Certificate" +) + +func PossibleValuesForKafkaAuthMethod() []string { + return []string{ + string(KafkaAuthMethodAnonymous), + string(KafkaAuthMethodSasl), + string(KafkaAuthMethodSystemAssignedManagedIdentity), + string(KafkaAuthMethodUserAssignedManagedIdentity), + string(KafkaAuthMethodXFiveZeroNineCertificate), + } +} + +func (s *KafkaAuthMethod) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseKafkaAuthMethod(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseKafkaAuthMethod(input string) (*KafkaAuthMethod, error) { + vals := map[string]KafkaAuthMethod{ + "anonymous": KafkaAuthMethodAnonymous, + "sasl": KafkaAuthMethodSasl, + "systemassignedmanagedidentity": KafkaAuthMethodSystemAssignedManagedIdentity, + "userassignedmanagedidentity": KafkaAuthMethodUserAssignedManagedIdentity, + "x509certificate": KafkaAuthMethodXFiveZeroNineCertificate, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := KafkaAuthMethod(input) + return &out, nil +} + +type ManagedIdentityMethod string + +const ( + ManagedIdentityMethodSystemAssignedManagedIdentity ManagedIdentityMethod = "SystemAssignedManagedIdentity" + ManagedIdentityMethodUserAssignedManagedIdentity ManagedIdentityMethod = "UserAssignedManagedIdentity" +) + +func PossibleValuesForManagedIdentityMethod() []string { + return []string{ + string(ManagedIdentityMethodSystemAssignedManagedIdentity), + string(ManagedIdentityMethodUserAssignedManagedIdentity), + } +} + +func (s *ManagedIdentityMethod) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseManagedIdentityMethod(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseManagedIdentityMethod(input string) (*ManagedIdentityMethod, error) { + vals := map[string]ManagedIdentityMethod{ + "systemassignedmanagedidentity": ManagedIdentityMethodSystemAssignedManagedIdentity, + "userassignedmanagedidentity": ManagedIdentityMethodUserAssignedManagedIdentity, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ManagedIdentityMethod(input) + return &out, nil +} + +type MqttAuthMethod string + +const ( + MqttAuthMethodAnonymous MqttAuthMethod = "Anonymous" + MqttAuthMethodServiceAccountToken MqttAuthMethod = "ServiceAccountToken" + MqttAuthMethodSystemAssignedManagedIdentity MqttAuthMethod = "SystemAssignedManagedIdentity" + MqttAuthMethodUserAssignedManagedIdentity MqttAuthMethod = "UserAssignedManagedIdentity" + MqttAuthMethodXFiveZeroNineCertificate MqttAuthMethod = "X509Certificate" +) + +func PossibleValuesForMqttAuthMethod() []string { + return []string{ + string(MqttAuthMethodAnonymous), + string(MqttAuthMethodServiceAccountToken), + string(MqttAuthMethodSystemAssignedManagedIdentity), + string(MqttAuthMethodUserAssignedManagedIdentity), + string(MqttAuthMethodXFiveZeroNineCertificate), + } +} + +func (s *MqttAuthMethod) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMqttAuthMethod(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMqttAuthMethod(input string) (*MqttAuthMethod, error) { + vals := map[string]MqttAuthMethod{ + "anonymous": MqttAuthMethodAnonymous, + "serviceaccounttoken": MqttAuthMethodServiceAccountToken, + "systemassignedmanagedidentity": MqttAuthMethodSystemAssignedManagedIdentity, + "userassignedmanagedidentity": MqttAuthMethodUserAssignedManagedIdentity, + "x509certificate": MqttAuthMethodXFiveZeroNineCertificate, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MqttAuthMethod(input) + return &out, nil +} + +type MqttRetainType string + +const ( + MqttRetainTypeKeep MqttRetainType = "Keep" + MqttRetainTypeNever MqttRetainType = "Never" +) + +func PossibleValuesForMqttRetainType() []string { + return []string{ + string(MqttRetainTypeKeep), + string(MqttRetainTypeNever), + } +} + +func (s *MqttRetainType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMqttRetainType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMqttRetainType(input string) (*MqttRetainType, error) { + vals := map[string]MqttRetainType{ + "keep": MqttRetainTypeKeep, + "never": MqttRetainTypeNever, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MqttRetainType(input) + return &out, nil +} + +type OperationalMode string + +const ( + OperationalModeDisabled OperationalMode = "Disabled" + OperationalModeEnabled OperationalMode = "Enabled" +) + +func PossibleValuesForOperationalMode() []string { + return []string{ + string(OperationalModeDisabled), + string(OperationalModeEnabled), + } +} + +func (s *OperationalMode) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseOperationalMode(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseOperationalMode(input string) (*OperationalMode, error) { + vals := map[string]OperationalMode{ + "disabled": OperationalModeDisabled, + "enabled": OperationalModeEnabled, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := OperationalMode(input) + return &out, nil +} + +type ProvisioningState string + +const ( + ProvisioningStateAccepted ProvisioningState = "Accepted" + ProvisioningStateCanceled ProvisioningState = "Canceled" + ProvisioningStateDeleting ProvisioningState = "Deleting" + ProvisioningStateFailed ProvisioningState = "Failed" + ProvisioningStateProvisioning ProvisioningState = "Provisioning" + ProvisioningStateSucceeded ProvisioningState = "Succeeded" + ProvisioningStateUpdating ProvisioningState = "Updating" +) + +func PossibleValuesForProvisioningState() []string { + return []string{ + string(ProvisioningStateAccepted), + string(ProvisioningStateCanceled), + string(ProvisioningStateDeleting), + string(ProvisioningStateFailed), + string(ProvisioningStateProvisioning), + string(ProvisioningStateSucceeded), + string(ProvisioningStateUpdating), + } +} + +func (s *ProvisioningState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseProvisioningState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseProvisioningState(input string) (*ProvisioningState, error) { + vals := map[string]ProvisioningState{ + "accepted": ProvisioningStateAccepted, + "canceled": ProvisioningStateCanceled, + "deleting": ProvisioningStateDeleting, + "failed": ProvisioningStateFailed, + "provisioning": ProvisioningStateProvisioning, + "succeeded": ProvisioningStateSucceeded, + "updating": ProvisioningStateUpdating, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ProvisioningState(input) + return &out, nil +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/id_dataflowendpoint.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/id_dataflowendpoint.go new file mode 100644 index 000000000000..4b91c9f7dd81 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/id_dataflowendpoint.go @@ -0,0 +1,139 @@ +package dataflowendpoint + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&DataflowEndpointId{}) +} + +var _ resourceids.ResourceId = &DataflowEndpointId{} + +// DataflowEndpointId is a struct representing the Resource ID for a Dataflow Endpoint +type DataflowEndpointId struct { + SubscriptionId string + ResourceGroupName string + InstanceName string + DataflowEndpointName string +} + +// NewDataflowEndpointID returns a new DataflowEndpointId struct +func NewDataflowEndpointID(subscriptionId string, resourceGroupName string, instanceName string, dataflowEndpointName string) DataflowEndpointId { + return DataflowEndpointId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + InstanceName: instanceName, + DataflowEndpointName: dataflowEndpointName, + } +} + +// ParseDataflowEndpointID parses 'input' into a DataflowEndpointId +func ParseDataflowEndpointID(input string) (*DataflowEndpointId, error) { + parser := resourceids.NewParserFromResourceIdType(&DataflowEndpointId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := DataflowEndpointId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseDataflowEndpointIDInsensitively parses 'input' case-insensitively into a DataflowEndpointId +// note: this method should only be used for API response data and not user input +func ParseDataflowEndpointIDInsensitively(input string) (*DataflowEndpointId, error) { + parser := resourceids.NewParserFromResourceIdType(&DataflowEndpointId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := DataflowEndpointId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *DataflowEndpointId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.InstanceName, ok = input.Parsed["instanceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "instanceName", input) + } + + if id.DataflowEndpointName, ok = input.Parsed["dataflowEndpointName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "dataflowEndpointName", input) + } + + return nil +} + +// ValidateDataflowEndpointID checks that 'input' can be parsed as a Dataflow Endpoint ID +func ValidateDataflowEndpointID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseDataflowEndpointID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Dataflow Endpoint ID +func (id DataflowEndpointId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.IoTOperations/instances/%s/dataflowEndpoints/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.InstanceName, id.DataflowEndpointName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Dataflow Endpoint ID +func (id DataflowEndpointId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftIoTOperations", "Microsoft.IoTOperations", "Microsoft.IoTOperations"), + resourceids.StaticSegment("staticInstances", "instances", "instances"), + resourceids.UserSpecifiedSegment("instanceName", "instanceName"), + resourceids.StaticSegment("staticDataflowEndpoints", "dataflowEndpoints", "dataflowEndpoints"), + resourceids.UserSpecifiedSegment("dataflowEndpointName", "dataflowEndpointName"), + } +} + +// String returns a human-readable description of this Dataflow Endpoint ID +func (id DataflowEndpointId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Instance Name: %q", id.InstanceName), + fmt.Sprintf("Dataflow Endpoint Name: %q", id.DataflowEndpointName), + } + return fmt.Sprintf("Dataflow Endpoint (%s)", strings.Join(components, "\n")) +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/id_instance.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/id_instance.go new file mode 100644 index 000000000000..1ea145830e09 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/id_instance.go @@ -0,0 +1,130 @@ +package dataflowendpoint + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&InstanceId{}) +} + +var _ resourceids.ResourceId = &InstanceId{} + +// InstanceId is a struct representing the Resource ID for a Instance +type InstanceId struct { + SubscriptionId string + ResourceGroupName string + InstanceName string +} + +// NewInstanceID returns a new InstanceId struct +func NewInstanceID(subscriptionId string, resourceGroupName string, instanceName string) InstanceId { + return InstanceId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + InstanceName: instanceName, + } +} + +// ParseInstanceID parses 'input' into a InstanceId +func ParseInstanceID(input string) (*InstanceId, error) { + parser := resourceids.NewParserFromResourceIdType(&InstanceId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := InstanceId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseInstanceIDInsensitively parses 'input' case-insensitively into a InstanceId +// note: this method should only be used for API response data and not user input +func ParseInstanceIDInsensitively(input string) (*InstanceId, error) { + parser := resourceids.NewParserFromResourceIdType(&InstanceId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := InstanceId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *InstanceId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.InstanceName, ok = input.Parsed["instanceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "instanceName", input) + } + + return nil +} + +// ValidateInstanceID checks that 'input' can be parsed as a Instance ID +func ValidateInstanceID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseInstanceID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Instance ID +func (id InstanceId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.IoTOperations/instances/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.InstanceName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Instance ID +func (id InstanceId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftIoTOperations", "Microsoft.IoTOperations", "Microsoft.IoTOperations"), + resourceids.StaticSegment("staticInstances", "instances", "instances"), + resourceids.UserSpecifiedSegment("instanceName", "instanceName"), + } +} + +// String returns a human-readable description of this Instance ID +func (id InstanceId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Instance Name: %q", id.InstanceName), + } + return fmt.Sprintf("Instance (%s)", strings.Join(components, "\n")) +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/method_createorupdate.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/method_createorupdate.go new file mode 100644 index 000000000000..f3466649b885 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/method_createorupdate.go @@ -0,0 +1,75 @@ +package dataflowendpoint + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CreateOrUpdateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *DataflowEndpointResource +} + +// CreateOrUpdate ... +func (c DataflowEndpointClient) CreateOrUpdate(ctx context.Context, id DataflowEndpointId, input DataflowEndpointResource) (result CreateOrUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPut, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// CreateOrUpdateThenPoll performs CreateOrUpdate then polls until it's completed +func (c DataflowEndpointClient) CreateOrUpdateThenPoll(ctx context.Context, id DataflowEndpointId, input DataflowEndpointResource) error { + result, err := c.CreateOrUpdate(ctx, id, input) + if err != nil { + return fmt.Errorf("performing CreateOrUpdate: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after CreateOrUpdate: %+v", err) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/method_delete.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/method_delete.go new file mode 100644 index 000000000000..f3a98ca2a00f --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/method_delete.go @@ -0,0 +1,70 @@ +package dataflowendpoint + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DeleteOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// Delete ... +func (c DataflowEndpointClient) Delete(ctx context.Context, id DataflowEndpointId) (result DeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusNoContent, + }, + HttpMethod: http.MethodDelete, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// DeleteThenPoll performs Delete then polls until it's completed +func (c DataflowEndpointClient) DeleteThenPoll(ctx context.Context, id DataflowEndpointId) error { + result, err := c.Delete(ctx, id) + if err != nil { + return fmt.Errorf("performing Delete: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after Delete: %+v", err) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/method_get.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/method_get.go new file mode 100644 index 000000000000..54f506d23525 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/method_get.go @@ -0,0 +1,53 @@ +package dataflowendpoint + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *DataflowEndpointResource +} + +// Get ... +func (c DataflowEndpointClient) Get(ctx context.Context, id DataflowEndpointId) (result GetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model DataflowEndpointResource + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/method_listbyresourcegroup.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/method_listbyresourcegroup.go new file mode 100644 index 000000000000..27c6f82a4dac --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/method_listbyresourcegroup.go @@ -0,0 +1,105 @@ +package dataflowendpoint + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListByResourceGroupOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]DataflowEndpointResource +} + +type ListByResourceGroupCompleteResult struct { + LatestHttpResponse *http.Response + Items []DataflowEndpointResource +} + +type ListByResourceGroupCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ListByResourceGroupCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// ListByResourceGroup ... +func (c DataflowEndpointClient) ListByResourceGroup(ctx context.Context, id InstanceId) (result ListByResourceGroupOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &ListByResourceGroupCustomPager{}, + Path: fmt.Sprintf("%s/dataflowEndpoints", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]DataflowEndpointResource `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ListByResourceGroupComplete retrieves all the results into a single object +func (c DataflowEndpointClient) ListByResourceGroupComplete(ctx context.Context, id InstanceId) (ListByResourceGroupCompleteResult, error) { + return c.ListByResourceGroupCompleteMatchingPredicate(ctx, id, DataflowEndpointResourceOperationPredicate{}) +} + +// ListByResourceGroupCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c DataflowEndpointClient) ListByResourceGroupCompleteMatchingPredicate(ctx context.Context, id InstanceId, predicate DataflowEndpointResourceOperationPredicate) (result ListByResourceGroupCompleteResult, err error) { + items := make([]DataflowEndpointResource, 0) + + resp, err := c.ListByResourceGroup(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ListByResourceGroupCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_batchingconfiguration.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_batchingconfiguration.go new file mode 100644 index 000000000000..a23e3e225743 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_batchingconfiguration.go @@ -0,0 +1,9 @@ +package dataflowendpoint + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BatchingConfiguration struct { + LatencySeconds *int64 `json:"latencySeconds,omitempty"` + MaxMessages *int64 `json:"maxMessages,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointauthenticationaccesstoken.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointauthenticationaccesstoken.go new file mode 100644 index 000000000000..42d981948b59 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointauthenticationaccesstoken.go @@ -0,0 +1,8 @@ +package dataflowendpoint + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataflowEndpointAuthenticationAccessToken struct { + SecretRef string `json:"secretRef"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointauthenticationsasl.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointauthenticationsasl.go new file mode 100644 index 000000000000..7043ec4e2ccc --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointauthenticationsasl.go @@ -0,0 +1,9 @@ +package dataflowendpoint + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataflowEndpointAuthenticationSasl struct { + SaslType DataflowEndpointAuthenticationSaslType `json:"saslType"` + SecretRef string `json:"secretRef"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointauthenticationserviceaccounttoken.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointauthenticationserviceaccounttoken.go new file mode 100644 index 000000000000..e9430fbc78ff --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointauthenticationserviceaccounttoken.go @@ -0,0 +1,8 @@ +package dataflowendpoint + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataflowEndpointAuthenticationServiceAccountToken struct { + Audience string `json:"audience"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointauthenticationsystemassignedmanagedidentity.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointauthenticationsystemassignedmanagedidentity.go new file mode 100644 index 000000000000..156ed1f686b6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointauthenticationsystemassignedmanagedidentity.go @@ -0,0 +1,8 @@ +package dataflowendpoint + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataflowEndpointAuthenticationSystemAssignedManagedIdentity struct { + Audience *string `json:"audience,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointauthenticationuserassignedmanagedidentity.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointauthenticationuserassignedmanagedidentity.go new file mode 100644 index 000000000000..b3e847c806a2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointauthenticationuserassignedmanagedidentity.go @@ -0,0 +1,10 @@ +package dataflowendpoint + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataflowEndpointAuthenticationUserAssignedManagedIdentity struct { + ClientId string `json:"clientId"` + Scope *string `json:"scope,omitempty"` + TenantId string `json:"tenantId"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointauthenticationx509.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointauthenticationx509.go new file mode 100644 index 000000000000..50a33cd7d877 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointauthenticationx509.go @@ -0,0 +1,8 @@ +package dataflowendpoint + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataflowEndpointAuthenticationX509 struct { + SecretRef string `json:"secretRef"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointdataexplorer.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointdataexplorer.go new file mode 100644 index 000000000000..213600e35472 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointdataexplorer.go @@ -0,0 +1,11 @@ +package dataflowendpoint + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataflowEndpointDataExplorer struct { + Authentication DataflowEndpointDataExplorerAuthentication `json:"authentication"` + Batching *BatchingConfiguration `json:"batching,omitempty"` + Database string `json:"database"` + Host string `json:"host"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointdataexplorerauthentication.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointdataexplorerauthentication.go new file mode 100644 index 000000000000..a58290bded7e --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointdataexplorerauthentication.go @@ -0,0 +1,10 @@ +package dataflowendpoint + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataflowEndpointDataExplorerAuthentication struct { + Method ManagedIdentityMethod `json:"method"` + SystemAssignedManagedIdentitySettings *DataflowEndpointAuthenticationSystemAssignedManagedIdentity `json:"systemAssignedManagedIdentitySettings,omitempty"` + UserAssignedManagedIdentitySettings *DataflowEndpointAuthenticationUserAssignedManagedIdentity `json:"userAssignedManagedIdentitySettings,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointdatalakestorage.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointdatalakestorage.go new file mode 100644 index 000000000000..c96ffd693a3a --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointdatalakestorage.go @@ -0,0 +1,10 @@ +package dataflowendpoint + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataflowEndpointDataLakeStorage struct { + Authentication DataflowEndpointDataLakeStorageAuthentication `json:"authentication"` + Batching *BatchingConfiguration `json:"batching,omitempty"` + Host string `json:"host"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointdatalakestorageauthentication.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointdatalakestorageauthentication.go new file mode 100644 index 000000000000..f0c3b95a8b7a --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointdatalakestorageauthentication.go @@ -0,0 +1,11 @@ +package dataflowendpoint + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataflowEndpointDataLakeStorageAuthentication struct { + AccessTokenSettings *DataflowEndpointAuthenticationAccessToken `json:"accessTokenSettings,omitempty"` + Method DataLakeStorageAuthMethod `json:"method"` + SystemAssignedManagedIdentitySettings *DataflowEndpointAuthenticationSystemAssignedManagedIdentity `json:"systemAssignedManagedIdentitySettings,omitempty"` + UserAssignedManagedIdentitySettings *DataflowEndpointAuthenticationUserAssignedManagedIdentity `json:"userAssignedManagedIdentitySettings,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointfabriconelake.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointfabriconelake.go new file mode 100644 index 000000000000..5549c72abc55 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointfabriconelake.go @@ -0,0 +1,12 @@ +package dataflowendpoint + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataflowEndpointFabricOneLake struct { + Authentication DataflowEndpointFabricOneLakeAuthentication `json:"authentication"` + Batching *BatchingConfiguration `json:"batching,omitempty"` + Host string `json:"host"` + Names DataflowEndpointFabricOneLakeNames `json:"names"` + OneLakePathType DataflowEndpointFabricPathType `json:"oneLakePathType"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointfabriconelakeauthentication.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointfabriconelakeauthentication.go new file mode 100644 index 000000000000..c12398f2f514 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointfabriconelakeauthentication.go @@ -0,0 +1,10 @@ +package dataflowendpoint + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataflowEndpointFabricOneLakeAuthentication struct { + Method ManagedIdentityMethod `json:"method"` + SystemAssignedManagedIdentitySettings *DataflowEndpointAuthenticationSystemAssignedManagedIdentity `json:"systemAssignedManagedIdentitySettings,omitempty"` + UserAssignedManagedIdentitySettings *DataflowEndpointAuthenticationUserAssignedManagedIdentity `json:"userAssignedManagedIdentitySettings,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointfabriconelakenames.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointfabriconelakenames.go new file mode 100644 index 000000000000..a12e8704b0d5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointfabriconelakenames.go @@ -0,0 +1,9 @@ +package dataflowendpoint + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataflowEndpointFabricOneLakeNames struct { + LakehouseName string `json:"lakehouseName"` + WorkspaceName string `json:"workspaceName"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointkafka.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointkafka.go new file mode 100644 index 000000000000..1dfb1503dfc9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointkafka.go @@ -0,0 +1,17 @@ +package dataflowendpoint + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataflowEndpointKafka struct { + Authentication DataflowEndpointKafkaAuthentication `json:"authentication"` + Batching *DataflowEndpointKafkaBatching `json:"batching,omitempty"` + CloudEventAttributes *CloudEventAttributeType `json:"cloudEventAttributes,omitempty"` + Compression *DataflowEndpointKafkaCompression `json:"compression,omitempty"` + ConsumerGroupId *string `json:"consumerGroupId,omitempty"` + CopyMqttProperties *OperationalMode `json:"copyMqttProperties,omitempty"` + Host string `json:"host"` + KafkaAcks *DataflowEndpointKafkaAcks `json:"kafkaAcks,omitempty"` + PartitionStrategy *DataflowEndpointKafkaPartitionStrategy `json:"partitionStrategy,omitempty"` + Tls *TlsProperties `json:"tls,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointkafkaauthentication.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointkafkaauthentication.go new file mode 100644 index 000000000000..e7543a15be7e --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointkafkaauthentication.go @@ -0,0 +1,12 @@ +package dataflowendpoint + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataflowEndpointKafkaAuthentication struct { + Method KafkaAuthMethod `json:"method"` + SaslSettings *DataflowEndpointAuthenticationSasl `json:"saslSettings,omitempty"` + SystemAssignedManagedIdentitySettings *DataflowEndpointAuthenticationSystemAssignedManagedIdentity `json:"systemAssignedManagedIdentitySettings,omitempty"` + UserAssignedManagedIdentitySettings *DataflowEndpointAuthenticationUserAssignedManagedIdentity `json:"userAssignedManagedIdentitySettings,omitempty"` + X509CertificateSettings *DataflowEndpointAuthenticationX509 `json:"x509CertificateSettings,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointkafkabatching.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointkafkabatching.go new file mode 100644 index 000000000000..25b13eb03eb2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointkafkabatching.go @@ -0,0 +1,11 @@ +package dataflowendpoint + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataflowEndpointKafkaBatching struct { + LatencyMs *int64 `json:"latencyMs,omitempty"` + MaxBytes *int64 `json:"maxBytes,omitempty"` + MaxMessages *int64 `json:"maxMessages,omitempty"` + Mode *OperationalMode `json:"mode,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointlocalstorage.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointlocalstorage.go new file mode 100644 index 000000000000..a4e3db1fbc23 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointlocalstorage.go @@ -0,0 +1,8 @@ +package dataflowendpoint + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataflowEndpointLocalStorage struct { + PersistentVolumeClaimRef string `json:"persistentVolumeClaimRef"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointmqtt.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointmqtt.go new file mode 100644 index 000000000000..3c61f3e2356d --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointmqtt.go @@ -0,0 +1,18 @@ +package dataflowendpoint + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataflowEndpointMqtt struct { + Authentication DataflowEndpointMqttAuthentication `json:"authentication"` + ClientIdPrefix *string `json:"clientIdPrefix,omitempty"` + CloudEventAttributes *CloudEventAttributeType `json:"cloudEventAttributes,omitempty"` + Host *string `json:"host,omitempty"` + KeepAliveSeconds *int64 `json:"keepAliveSeconds,omitempty"` + MaxInflightMessages *int64 `json:"maxInflightMessages,omitempty"` + Protocol *BrokerProtocolType `json:"protocol,omitempty"` + Qos *int64 `json:"qos,omitempty"` + Retain *MqttRetainType `json:"retain,omitempty"` + SessionExpirySeconds *int64 `json:"sessionExpirySeconds,omitempty"` + Tls *TlsProperties `json:"tls,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointmqttauthentication.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointmqttauthentication.go new file mode 100644 index 000000000000..6ee01fb8408a --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointmqttauthentication.go @@ -0,0 +1,12 @@ +package dataflowendpoint + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataflowEndpointMqttAuthentication struct { + Method MqttAuthMethod `json:"method"` + ServiceAccountTokenSettings *DataflowEndpointAuthenticationServiceAccountToken `json:"serviceAccountTokenSettings,omitempty"` + SystemAssignedManagedIdentitySettings *DataflowEndpointAuthenticationSystemAssignedManagedIdentity `json:"systemAssignedManagedIdentitySettings,omitempty"` + UserAssignedManagedIdentitySettings *DataflowEndpointAuthenticationUserAssignedManagedIdentity `json:"userAssignedManagedIdentitySettings,omitempty"` + X509CertificateSettings *DataflowEndpointAuthenticationX509 `json:"x509CertificateSettings,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointproperties.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointproperties.go new file mode 100644 index 000000000000..943da8ad81ec --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointproperties.go @@ -0,0 +1,15 @@ +package dataflowendpoint + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataflowEndpointProperties struct { + DataExplorerSettings *DataflowEndpointDataExplorer `json:"dataExplorerSettings,omitempty"` + DataLakeStorageSettings *DataflowEndpointDataLakeStorage `json:"dataLakeStorageSettings,omitempty"` + EndpointType EndpointType `json:"endpointType"` + FabricOneLakeSettings *DataflowEndpointFabricOneLake `json:"fabricOneLakeSettings,omitempty"` + KafkaSettings *DataflowEndpointKafka `json:"kafkaSettings,omitempty"` + LocalStorageSettings *DataflowEndpointLocalStorage `json:"localStorageSettings,omitempty"` + MqttSettings *DataflowEndpointMqtt `json:"mqttSettings,omitempty"` + ProvisioningState *ProvisioningState `json:"provisioningState,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointresource.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointresource.go new file mode 100644 index 000000000000..072d6504d990 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_dataflowendpointresource.go @@ -0,0 +1,17 @@ +package dataflowendpoint + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataflowEndpointResource struct { + ExtendedLocation ExtendedLocation `json:"extendedLocation"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *DataflowEndpointProperties `json:"properties,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_extendedlocation.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_extendedlocation.go new file mode 100644 index 000000000000..336d74fc87df --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_extendedlocation.go @@ -0,0 +1,9 @@ +package dataflowendpoint + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ExtendedLocation struct { + Name string `json:"name"` + Type ExtendedLocationType `json:"type"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_tlsproperties.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_tlsproperties.go new file mode 100644 index 000000000000..6f74984619d5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/model_tlsproperties.go @@ -0,0 +1,9 @@ +package dataflowendpoint + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type TlsProperties struct { + Mode *OperationalMode `json:"mode,omitempty"` + TrustedCaCertificateConfigMapRef *string `json:"trustedCaCertificateConfigMapRef,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/predicates.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/predicates.go new file mode 100644 index 000000000000..791cf37bf621 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/predicates.go @@ -0,0 +1,27 @@ +package dataflowendpoint + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataflowEndpointResourceOperationPredicate struct { + Id *string + Name *string + Type *string +} + +func (p DataflowEndpointResourceOperationPredicate) Matches(input DataflowEndpointResource) bool { + + if p.Id != nil && (input.Id == nil || *p.Id != *input.Id) { + return false + } + + if p.Name != nil && (input.Name == nil || *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil || *p.Type != *input.Type) { + return false + } + + return true +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/version.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/version.go new file mode 100644 index 000000000000..212fd9c53f56 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint/version.go @@ -0,0 +1,10 @@ +package dataflowendpoint + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2024-11-01" + +func userAgent() string { + return "hashicorp/go-azure-sdk/dataflowendpoint/2024-11-01" +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/README.md b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/README.md new file mode 100644 index 000000000000..dc91a9b49508 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/README.md @@ -0,0 +1,82 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile` Documentation + +The `dataflowprofile` SDK allows for interaction with Azure Resource Manager `iotoperations` (API Version `2024-11-01`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile" +``` + + +### Client Initialization + +```go +client := dataflowprofile.NewDataflowProfileClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `DataflowProfileClient.CreateOrUpdate` + +```go +ctx := context.TODO() +id := dataflowprofile.NewDataflowProfileID("12345678-1234-9876-4563-123456789012", "example-resource-group", "instanceName", "dataflowProfileName") + +payload := dataflowprofile.DataflowProfileResource{ + // ... +} + + +if err := client.CreateOrUpdateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `DataflowProfileClient.Delete` + +```go +ctx := context.TODO() +id := dataflowprofile.NewDataflowProfileID("12345678-1234-9876-4563-123456789012", "example-resource-group", "instanceName", "dataflowProfileName") + +if err := client.DeleteThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `DataflowProfileClient.Get` + +```go +ctx := context.TODO() +id := dataflowprofile.NewDataflowProfileID("12345678-1234-9876-4563-123456789012", "example-resource-group", "instanceName", "dataflowProfileName") + +read, err := client.Get(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `DataflowProfileClient.ListByResourceGroup` + +```go +ctx := context.TODO() +id := dataflowprofile.NewInstanceID("12345678-1234-9876-4563-123456789012", "example-resource-group", "instanceName") + +// alternatively `client.ListByResourceGroup(ctx, id)` can be used to do batched pagination +items, err := client.ListByResourceGroupComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/client.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/client.go new file mode 100644 index 000000000000..363528c0a4e2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/client.go @@ -0,0 +1,26 @@ +package dataflowprofile + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataflowProfileClient struct { + Client *resourcemanager.Client +} + +func NewDataflowProfileClientWithBaseURI(sdkApi sdkEnv.Api) (*DataflowProfileClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "dataflowprofile", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating DataflowProfileClient: %+v", err) + } + + return &DataflowProfileClient{ + Client: client, + }, nil +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/constants.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/constants.go new file mode 100644 index 000000000000..00920bf012d0 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/constants.go @@ -0,0 +1,104 @@ +package dataflowprofile + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ExtendedLocationType string + +const ( + ExtendedLocationTypeCustomLocation ExtendedLocationType = "CustomLocation" +) + +func PossibleValuesForExtendedLocationType() []string { + return []string{ + string(ExtendedLocationTypeCustomLocation), + } +} + +func (s *ExtendedLocationType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseExtendedLocationType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseExtendedLocationType(input string) (*ExtendedLocationType, error) { + vals := map[string]ExtendedLocationType{ + "customlocation": ExtendedLocationTypeCustomLocation, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ExtendedLocationType(input) + return &out, nil +} + +type ProvisioningState string + +const ( + ProvisioningStateAccepted ProvisioningState = "Accepted" + ProvisioningStateCanceled ProvisioningState = "Canceled" + ProvisioningStateDeleting ProvisioningState = "Deleting" + ProvisioningStateFailed ProvisioningState = "Failed" + ProvisioningStateProvisioning ProvisioningState = "Provisioning" + ProvisioningStateSucceeded ProvisioningState = "Succeeded" + ProvisioningStateUpdating ProvisioningState = "Updating" +) + +func PossibleValuesForProvisioningState() []string { + return []string{ + string(ProvisioningStateAccepted), + string(ProvisioningStateCanceled), + string(ProvisioningStateDeleting), + string(ProvisioningStateFailed), + string(ProvisioningStateProvisioning), + string(ProvisioningStateSucceeded), + string(ProvisioningStateUpdating), + } +} + +func (s *ProvisioningState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseProvisioningState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseProvisioningState(input string) (*ProvisioningState, error) { + vals := map[string]ProvisioningState{ + "accepted": ProvisioningStateAccepted, + "canceled": ProvisioningStateCanceled, + "deleting": ProvisioningStateDeleting, + "failed": ProvisioningStateFailed, + "provisioning": ProvisioningStateProvisioning, + "succeeded": ProvisioningStateSucceeded, + "updating": ProvisioningStateUpdating, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ProvisioningState(input) + return &out, nil +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/id_dataflowprofile.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/id_dataflowprofile.go new file mode 100644 index 000000000000..df9e72335ff5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/id_dataflowprofile.go @@ -0,0 +1,139 @@ +package dataflowprofile + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&DataflowProfileId{}) +} + +var _ resourceids.ResourceId = &DataflowProfileId{} + +// DataflowProfileId is a struct representing the Resource ID for a Dataflow Profile +type DataflowProfileId struct { + SubscriptionId string + ResourceGroupName string + InstanceName string + DataflowProfileName string +} + +// NewDataflowProfileID returns a new DataflowProfileId struct +func NewDataflowProfileID(subscriptionId string, resourceGroupName string, instanceName string, dataflowProfileName string) DataflowProfileId { + return DataflowProfileId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + InstanceName: instanceName, + DataflowProfileName: dataflowProfileName, + } +} + +// ParseDataflowProfileID parses 'input' into a DataflowProfileId +func ParseDataflowProfileID(input string) (*DataflowProfileId, error) { + parser := resourceids.NewParserFromResourceIdType(&DataflowProfileId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := DataflowProfileId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseDataflowProfileIDInsensitively parses 'input' case-insensitively into a DataflowProfileId +// note: this method should only be used for API response data and not user input +func ParseDataflowProfileIDInsensitively(input string) (*DataflowProfileId, error) { + parser := resourceids.NewParserFromResourceIdType(&DataflowProfileId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := DataflowProfileId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *DataflowProfileId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.InstanceName, ok = input.Parsed["instanceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "instanceName", input) + } + + if id.DataflowProfileName, ok = input.Parsed["dataflowProfileName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "dataflowProfileName", input) + } + + return nil +} + +// ValidateDataflowProfileID checks that 'input' can be parsed as a Dataflow Profile ID +func ValidateDataflowProfileID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseDataflowProfileID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Dataflow Profile ID +func (id DataflowProfileId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.IoTOperations/instances/%s/dataflowProfiles/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.InstanceName, id.DataflowProfileName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Dataflow Profile ID +func (id DataflowProfileId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftIoTOperations", "Microsoft.IoTOperations", "Microsoft.IoTOperations"), + resourceids.StaticSegment("staticInstances", "instances", "instances"), + resourceids.UserSpecifiedSegment("instanceName", "instanceName"), + resourceids.StaticSegment("staticDataflowProfiles", "dataflowProfiles", "dataflowProfiles"), + resourceids.UserSpecifiedSegment("dataflowProfileName", "dataflowProfileName"), + } +} + +// String returns a human-readable description of this Dataflow Profile ID +func (id DataflowProfileId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Instance Name: %q", id.InstanceName), + fmt.Sprintf("Dataflow Profile Name: %q", id.DataflowProfileName), + } + return fmt.Sprintf("Dataflow Profile (%s)", strings.Join(components, "\n")) +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/id_instance.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/id_instance.go new file mode 100644 index 000000000000..87c90d357fd3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/id_instance.go @@ -0,0 +1,130 @@ +package dataflowprofile + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&InstanceId{}) +} + +var _ resourceids.ResourceId = &InstanceId{} + +// InstanceId is a struct representing the Resource ID for a Instance +type InstanceId struct { + SubscriptionId string + ResourceGroupName string + InstanceName string +} + +// NewInstanceID returns a new InstanceId struct +func NewInstanceID(subscriptionId string, resourceGroupName string, instanceName string) InstanceId { + return InstanceId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + InstanceName: instanceName, + } +} + +// ParseInstanceID parses 'input' into a InstanceId +func ParseInstanceID(input string) (*InstanceId, error) { + parser := resourceids.NewParserFromResourceIdType(&InstanceId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := InstanceId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseInstanceIDInsensitively parses 'input' case-insensitively into a InstanceId +// note: this method should only be used for API response data and not user input +func ParseInstanceIDInsensitively(input string) (*InstanceId, error) { + parser := resourceids.NewParserFromResourceIdType(&InstanceId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := InstanceId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *InstanceId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.InstanceName, ok = input.Parsed["instanceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "instanceName", input) + } + + return nil +} + +// ValidateInstanceID checks that 'input' can be parsed as a Instance ID +func ValidateInstanceID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseInstanceID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Instance ID +func (id InstanceId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.IoTOperations/instances/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.InstanceName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Instance ID +func (id InstanceId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftIoTOperations", "Microsoft.IoTOperations", "Microsoft.IoTOperations"), + resourceids.StaticSegment("staticInstances", "instances", "instances"), + resourceids.UserSpecifiedSegment("instanceName", "instanceName"), + } +} + +// String returns a human-readable description of this Instance ID +func (id InstanceId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Instance Name: %q", id.InstanceName), + } + return fmt.Sprintf("Instance (%s)", strings.Join(components, "\n")) +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/method_createorupdate.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/method_createorupdate.go new file mode 100644 index 000000000000..d60029064611 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/method_createorupdate.go @@ -0,0 +1,75 @@ +package dataflowprofile + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CreateOrUpdateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *DataflowProfileResource +} + +// CreateOrUpdate ... +func (c DataflowProfileClient) CreateOrUpdate(ctx context.Context, id DataflowProfileId, input DataflowProfileResource) (result CreateOrUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPut, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// CreateOrUpdateThenPoll performs CreateOrUpdate then polls until it's completed +func (c DataflowProfileClient) CreateOrUpdateThenPoll(ctx context.Context, id DataflowProfileId, input DataflowProfileResource) error { + result, err := c.CreateOrUpdate(ctx, id, input) + if err != nil { + return fmt.Errorf("performing CreateOrUpdate: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after CreateOrUpdate: %+v", err) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/method_delete.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/method_delete.go new file mode 100644 index 000000000000..650f603556d3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/method_delete.go @@ -0,0 +1,70 @@ +package dataflowprofile + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DeleteOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// Delete ... +func (c DataflowProfileClient) Delete(ctx context.Context, id DataflowProfileId) (result DeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusNoContent, + }, + HttpMethod: http.MethodDelete, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// DeleteThenPoll performs Delete then polls until it's completed +func (c DataflowProfileClient) DeleteThenPoll(ctx context.Context, id DataflowProfileId) error { + result, err := c.Delete(ctx, id) + if err != nil { + return fmt.Errorf("performing Delete: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after Delete: %+v", err) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/method_get.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/method_get.go new file mode 100644 index 000000000000..cccb7a221523 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/method_get.go @@ -0,0 +1,53 @@ +package dataflowprofile + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *DataflowProfileResource +} + +// Get ... +func (c DataflowProfileClient) Get(ctx context.Context, id DataflowProfileId) (result GetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model DataflowProfileResource + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/method_listbyresourcegroup.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/method_listbyresourcegroup.go new file mode 100644 index 000000000000..f2175fbeaeee --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/method_listbyresourcegroup.go @@ -0,0 +1,105 @@ +package dataflowprofile + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListByResourceGroupOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]DataflowProfileResource +} + +type ListByResourceGroupCompleteResult struct { + LatestHttpResponse *http.Response + Items []DataflowProfileResource +} + +type ListByResourceGroupCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ListByResourceGroupCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// ListByResourceGroup ... +func (c DataflowProfileClient) ListByResourceGroup(ctx context.Context, id InstanceId) (result ListByResourceGroupOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &ListByResourceGroupCustomPager{}, + Path: fmt.Sprintf("%s/dataflowProfiles", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]DataflowProfileResource `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ListByResourceGroupComplete retrieves all the results into a single object +func (c DataflowProfileClient) ListByResourceGroupComplete(ctx context.Context, id InstanceId) (ListByResourceGroupCompleteResult, error) { + return c.ListByResourceGroupCompleteMatchingPredicate(ctx, id, DataflowProfileResourceOperationPredicate{}) +} + +// ListByResourceGroupCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c DataflowProfileClient) ListByResourceGroupCompleteMatchingPredicate(ctx context.Context, id InstanceId, predicate DataflowProfileResourceOperationPredicate) (result ListByResourceGroupCompleteResult, err error) { + items := make([]DataflowProfileResource, 0) + + resp, err := c.ListByResourceGroup(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ListByResourceGroupCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/model_dataflowprofileproperties.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/model_dataflowprofileproperties.go new file mode 100644 index 000000000000..9658f7a66cf8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/model_dataflowprofileproperties.go @@ -0,0 +1,10 @@ +package dataflowprofile + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataflowProfileProperties struct { + Diagnostics *ProfileDiagnostics `json:"diagnostics,omitempty"` + InstanceCount *int64 `json:"instanceCount,omitempty"` + ProvisioningState *ProvisioningState `json:"provisioningState,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/model_dataflowprofileresource.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/model_dataflowprofileresource.go new file mode 100644 index 000000000000..2cb10d95c622 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/model_dataflowprofileresource.go @@ -0,0 +1,17 @@ +package dataflowprofile + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataflowProfileResource struct { + ExtendedLocation ExtendedLocation `json:"extendedLocation"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *DataflowProfileProperties `json:"properties,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/model_diagnosticslogs.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/model_diagnosticslogs.go new file mode 100644 index 000000000000..8f07991036c6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/model_diagnosticslogs.go @@ -0,0 +1,8 @@ +package dataflowprofile + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DiagnosticsLogs struct { + Level *string `json:"level,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/model_extendedlocation.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/model_extendedlocation.go new file mode 100644 index 000000000000..9152ed289216 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/model_extendedlocation.go @@ -0,0 +1,9 @@ +package dataflowprofile + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ExtendedLocation struct { + Name string `json:"name"` + Type ExtendedLocationType `json:"type"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/model_metrics.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/model_metrics.go new file mode 100644 index 000000000000..b48a143af6ed --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/model_metrics.go @@ -0,0 +1,8 @@ +package dataflowprofile + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type Metrics struct { + PrometheusPort *int64 `json:"prometheusPort,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/model_profilediagnostics.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/model_profilediagnostics.go new file mode 100644 index 000000000000..822949522df1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/model_profilediagnostics.go @@ -0,0 +1,9 @@ +package dataflowprofile + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProfileDiagnostics struct { + Logs *DiagnosticsLogs `json:"logs,omitempty"` + Metrics *Metrics `json:"metrics,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/predicates.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/predicates.go new file mode 100644 index 000000000000..2b92da58ad2b --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/predicates.go @@ -0,0 +1,27 @@ +package dataflowprofile + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataflowProfileResourceOperationPredicate struct { + Id *string + Name *string + Type *string +} + +func (p DataflowProfileResourceOperationPredicate) Matches(input DataflowProfileResource) bool { + + if p.Id != nil && (input.Id == nil || *p.Id != *input.Id) { + return false + } + + if p.Name != nil && (input.Name == nil || *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil || *p.Type != *input.Type) { + return false + } + + return true +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/version.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/version.go new file mode 100644 index 000000000000..43dadecbb1d2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile/version.go @@ -0,0 +1,10 @@ +package dataflowprofile + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2024-11-01" + +func userAgent() string { + return "hashicorp/go-azure-sdk/dataflowprofile/2024-11-01" +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/README.md b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/README.md new file mode 100644 index 000000000000..2b23d3f025e4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/README.md @@ -0,0 +1,121 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance` Documentation + +The `instance` SDK allows for interaction with Azure Resource Manager `iotoperations` (API Version `2024-11-01`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" +import "github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance" +``` + + +### Client Initialization + +```go +client := instance.NewInstanceClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `InstanceClient.CreateOrUpdate` + +```go +ctx := context.TODO() +id := instance.NewInstanceID("12345678-1234-9876-4563-123456789012", "example-resource-group", "instanceName") + +payload := instance.InstanceResource{ + // ... +} + + +if err := client.CreateOrUpdateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `InstanceClient.Delete` + +```go +ctx := context.TODO() +id := instance.NewInstanceID("12345678-1234-9876-4563-123456789012", "example-resource-group", "instanceName") + +if err := client.DeleteThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `InstanceClient.Get` + +```go +ctx := context.TODO() +id := instance.NewInstanceID("12345678-1234-9876-4563-123456789012", "example-resource-group", "instanceName") + +read, err := client.Get(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `InstanceClient.ListByResourceGroup` + +```go +ctx := context.TODO() +id := commonids.NewResourceGroupID("12345678-1234-9876-4563-123456789012", "example-resource-group") + +// alternatively `client.ListByResourceGroup(ctx, id)` can be used to do batched pagination +items, err := client.ListByResourceGroupComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `InstanceClient.ListBySubscription` + +```go +ctx := context.TODO() +id := commonids.NewSubscriptionID("12345678-1234-9876-4563-123456789012") + +// alternatively `client.ListBySubscription(ctx, id)` can be used to do batched pagination +items, err := client.ListBySubscriptionComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `InstanceClient.Update` + +```go +ctx := context.TODO() +id := instance.NewInstanceID("12345678-1234-9876-4563-123456789012", "example-resource-group", "instanceName") + +payload := instance.InstancePatchModel{ + // ... +} + + +read, err := client.Update(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/client.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/client.go new file mode 100644 index 000000000000..4c3d55961547 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/client.go @@ -0,0 +1,26 @@ +package instance + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type InstanceClient struct { + Client *resourcemanager.Client +} + +func NewInstanceClientWithBaseURI(sdkApi sdkEnv.Api) (*InstanceClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "instance", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating InstanceClient: %+v", err) + } + + return &InstanceClient{ + Client: client, + }, nil +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/constants.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/constants.go new file mode 100644 index 000000000000..5ae5391a8634 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/constants.go @@ -0,0 +1,104 @@ +package instance + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ExtendedLocationType string + +const ( + ExtendedLocationTypeCustomLocation ExtendedLocationType = "CustomLocation" +) + +func PossibleValuesForExtendedLocationType() []string { + return []string{ + string(ExtendedLocationTypeCustomLocation), + } +} + +func (s *ExtendedLocationType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseExtendedLocationType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseExtendedLocationType(input string) (*ExtendedLocationType, error) { + vals := map[string]ExtendedLocationType{ + "customlocation": ExtendedLocationTypeCustomLocation, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ExtendedLocationType(input) + return &out, nil +} + +type ProvisioningState string + +const ( + ProvisioningStateAccepted ProvisioningState = "Accepted" + ProvisioningStateCanceled ProvisioningState = "Canceled" + ProvisioningStateDeleting ProvisioningState = "Deleting" + ProvisioningStateFailed ProvisioningState = "Failed" + ProvisioningStateProvisioning ProvisioningState = "Provisioning" + ProvisioningStateSucceeded ProvisioningState = "Succeeded" + ProvisioningStateUpdating ProvisioningState = "Updating" +) + +func PossibleValuesForProvisioningState() []string { + return []string{ + string(ProvisioningStateAccepted), + string(ProvisioningStateCanceled), + string(ProvisioningStateDeleting), + string(ProvisioningStateFailed), + string(ProvisioningStateProvisioning), + string(ProvisioningStateSucceeded), + string(ProvisioningStateUpdating), + } +} + +func (s *ProvisioningState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseProvisioningState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseProvisioningState(input string) (*ProvisioningState, error) { + vals := map[string]ProvisioningState{ + "accepted": ProvisioningStateAccepted, + "canceled": ProvisioningStateCanceled, + "deleting": ProvisioningStateDeleting, + "failed": ProvisioningStateFailed, + "provisioning": ProvisioningStateProvisioning, + "succeeded": ProvisioningStateSucceeded, + "updating": ProvisioningStateUpdating, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ProvisioningState(input) + return &out, nil +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/id_instance.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/id_instance.go new file mode 100644 index 000000000000..d98c7b20a3e3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/id_instance.go @@ -0,0 +1,130 @@ +package instance + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&InstanceId{}) +} + +var _ resourceids.ResourceId = &InstanceId{} + +// InstanceId is a struct representing the Resource ID for a Instance +type InstanceId struct { + SubscriptionId string + ResourceGroupName string + InstanceName string +} + +// NewInstanceID returns a new InstanceId struct +func NewInstanceID(subscriptionId string, resourceGroupName string, instanceName string) InstanceId { + return InstanceId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + InstanceName: instanceName, + } +} + +// ParseInstanceID parses 'input' into a InstanceId +func ParseInstanceID(input string) (*InstanceId, error) { + parser := resourceids.NewParserFromResourceIdType(&InstanceId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := InstanceId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseInstanceIDInsensitively parses 'input' case-insensitively into a InstanceId +// note: this method should only be used for API response data and not user input +func ParseInstanceIDInsensitively(input string) (*InstanceId, error) { + parser := resourceids.NewParserFromResourceIdType(&InstanceId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := InstanceId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *InstanceId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.InstanceName, ok = input.Parsed["instanceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "instanceName", input) + } + + return nil +} + +// ValidateInstanceID checks that 'input' can be parsed as a Instance ID +func ValidateInstanceID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseInstanceID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Instance ID +func (id InstanceId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.IoTOperations/instances/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.InstanceName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Instance ID +func (id InstanceId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftIoTOperations", "Microsoft.IoTOperations", "Microsoft.IoTOperations"), + resourceids.StaticSegment("staticInstances", "instances", "instances"), + resourceids.UserSpecifiedSegment("instanceName", "instanceName"), + } +} + +// String returns a human-readable description of this Instance ID +func (id InstanceId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Instance Name: %q", id.InstanceName), + } + return fmt.Sprintf("Instance (%s)", strings.Join(components, "\n")) +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/method_createorupdate.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/method_createorupdate.go new file mode 100644 index 000000000000..6ecb502d1237 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/method_createorupdate.go @@ -0,0 +1,75 @@ +package instance + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CreateOrUpdateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *InstanceResource +} + +// CreateOrUpdate ... +func (c InstanceClient) CreateOrUpdate(ctx context.Context, id InstanceId, input InstanceResource) (result CreateOrUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPut, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// CreateOrUpdateThenPoll performs CreateOrUpdate then polls until it's completed +func (c InstanceClient) CreateOrUpdateThenPoll(ctx context.Context, id InstanceId, input InstanceResource) error { + result, err := c.CreateOrUpdate(ctx, id, input) + if err != nil { + return fmt.Errorf("performing CreateOrUpdate: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after CreateOrUpdate: %+v", err) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/method_delete.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/method_delete.go new file mode 100644 index 000000000000..978dbb2c0ccb --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/method_delete.go @@ -0,0 +1,70 @@ +package instance + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DeleteOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// Delete ... +func (c InstanceClient) Delete(ctx context.Context, id InstanceId) (result DeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusNoContent, + }, + HttpMethod: http.MethodDelete, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// DeleteThenPoll performs Delete then polls until it's completed +func (c InstanceClient) DeleteThenPoll(ctx context.Context, id InstanceId) error { + result, err := c.Delete(ctx, id) + if err != nil { + return fmt.Errorf("performing Delete: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after Delete: %+v", err) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/method_get.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/method_get.go new file mode 100644 index 000000000000..c14419b99cc8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/method_get.go @@ -0,0 +1,53 @@ +package instance + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *InstanceResource +} + +// Get ... +func (c InstanceClient) Get(ctx context.Context, id InstanceId) (result GetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model InstanceResource + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/method_listbyresourcegroup.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/method_listbyresourcegroup.go new file mode 100644 index 000000000000..6148a54ff571 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/method_listbyresourcegroup.go @@ -0,0 +1,106 @@ +package instance + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListByResourceGroupOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]InstanceResource +} + +type ListByResourceGroupCompleteResult struct { + LatestHttpResponse *http.Response + Items []InstanceResource +} + +type ListByResourceGroupCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ListByResourceGroupCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// ListByResourceGroup ... +func (c InstanceClient) ListByResourceGroup(ctx context.Context, id commonids.ResourceGroupId) (result ListByResourceGroupOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &ListByResourceGroupCustomPager{}, + Path: fmt.Sprintf("%s/providers/Microsoft.IoTOperations/instances", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]InstanceResource `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ListByResourceGroupComplete retrieves all the results into a single object +func (c InstanceClient) ListByResourceGroupComplete(ctx context.Context, id commonids.ResourceGroupId) (ListByResourceGroupCompleteResult, error) { + return c.ListByResourceGroupCompleteMatchingPredicate(ctx, id, InstanceResourceOperationPredicate{}) +} + +// ListByResourceGroupCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c InstanceClient) ListByResourceGroupCompleteMatchingPredicate(ctx context.Context, id commonids.ResourceGroupId, predicate InstanceResourceOperationPredicate) (result ListByResourceGroupCompleteResult, err error) { + items := make([]InstanceResource, 0) + + resp, err := c.ListByResourceGroup(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ListByResourceGroupCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/method_listbysubscription.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/method_listbysubscription.go new file mode 100644 index 000000000000..e5d9429d7398 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/method_listbysubscription.go @@ -0,0 +1,106 @@ +package instance + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListBySubscriptionOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]InstanceResource +} + +type ListBySubscriptionCompleteResult struct { + LatestHttpResponse *http.Response + Items []InstanceResource +} + +type ListBySubscriptionCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ListBySubscriptionCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// ListBySubscription ... +func (c InstanceClient) ListBySubscription(ctx context.Context, id commonids.SubscriptionId) (result ListBySubscriptionOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &ListBySubscriptionCustomPager{}, + Path: fmt.Sprintf("%s/providers/Microsoft.IoTOperations/instances", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]InstanceResource `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ListBySubscriptionComplete retrieves all the results into a single object +func (c InstanceClient) ListBySubscriptionComplete(ctx context.Context, id commonids.SubscriptionId) (ListBySubscriptionCompleteResult, error) { + return c.ListBySubscriptionCompleteMatchingPredicate(ctx, id, InstanceResourceOperationPredicate{}) +} + +// ListBySubscriptionCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c InstanceClient) ListBySubscriptionCompleteMatchingPredicate(ctx context.Context, id commonids.SubscriptionId, predicate InstanceResourceOperationPredicate) (result ListBySubscriptionCompleteResult, err error) { + items := make([]InstanceResource, 0) + + resp, err := c.ListBySubscription(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ListBySubscriptionCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/method_update.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/method_update.go new file mode 100644 index 000000000000..9d6c5a93dd62 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/method_update.go @@ -0,0 +1,57 @@ +package instance + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type UpdateOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *InstanceResource +} + +// Update ... +func (c InstanceClient) Update(ctx context.Context, id InstanceId, input InstancePatchModel) (result UpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPatch, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model InstanceResource + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/model_extendedlocation.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/model_extendedlocation.go new file mode 100644 index 000000000000..64b2e054802d --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/model_extendedlocation.go @@ -0,0 +1,9 @@ +package instance + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ExtendedLocation struct { + Name string `json:"name"` + Type ExtendedLocationType `json:"type"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/model_instancepatchmodel.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/model_instancepatchmodel.go new file mode 100644 index 000000000000..ed8d146e0ce3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/model_instancepatchmodel.go @@ -0,0 +1,13 @@ +package instance + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/identity" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type InstancePatchModel struct { + Identity *identity.LegacySystemAndUserAssignedMap `json:"identity,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/model_instanceproperties.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/model_instanceproperties.go new file mode 100644 index 000000000000..458aa8d97b97 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/model_instanceproperties.go @@ -0,0 +1,11 @@ +package instance + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type InstanceProperties struct { + Description *string `json:"description,omitempty"` + ProvisioningState *ProvisioningState `json:"provisioningState,omitempty"` + SchemaRegistryRef SchemaRegistryRef `json:"schemaRegistryRef"` + Version *string `json:"version,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/model_instanceresource.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/model_instanceresource.go new file mode 100644 index 000000000000..38267f6e96af --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/model_instanceresource.go @@ -0,0 +1,21 @@ +package instance + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/identity" + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type InstanceResource struct { + ExtendedLocation ExtendedLocation `json:"extendedLocation"` + Id *string `json:"id,omitempty"` + Identity *identity.LegacySystemAndUserAssignedMap `json:"identity,omitempty"` + Location string `json:"location"` + Name *string `json:"name,omitempty"` + Properties *InstanceProperties `json:"properties,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/model_schemaregistryref.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/model_schemaregistryref.go new file mode 100644 index 000000000000..a28098a12a72 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/model_schemaregistryref.go @@ -0,0 +1,8 @@ +package instance + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SchemaRegistryRef struct { + ResourceId string `json:"resourceId"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/predicates.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/predicates.go new file mode 100644 index 000000000000..dd0b945fe16c --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/predicates.go @@ -0,0 +1,32 @@ +package instance + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type InstanceResourceOperationPredicate struct { + Id *string + Location *string + Name *string + Type *string +} + +func (p InstanceResourceOperationPredicate) Matches(input InstanceResource) bool { + + if p.Id != nil && (input.Id == nil || *p.Id != *input.Id) { + return false + } + + if p.Location != nil && *p.Location != input.Location { + return false + } + + if p.Name != nil && (input.Name == nil || *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil || *p.Type != *input.Type) { + return false + } + + return true +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/version.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/version.go new file mode 100644 index 000000000000..03be36971849 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance/version.go @@ -0,0 +1,10 @@ +package instance + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2024-11-01" + +func userAgent() string { + return "hashicorp/go-azure-sdk/instance/2024-11-01" +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 0097c0b21563..37ea45a69016 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -639,6 +639,14 @@ github.com/hashicorp/go-azure-sdk/resource-manager/insights/2023-03-11/datacolle github.com/hashicorp/go-azure-sdk/resource-manager/insights/2023-03-15-preview/scheduledqueryrules github.com/hashicorp/go-azure-sdk/resource-manager/insights/2023-04-03/azuremonitorworkspaces github.com/hashicorp/go-azure-sdk/resource-manager/iotcentral/2021-11-01-preview/apps +github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/broker +github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthentication +github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerauthorization +github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/brokerlistener +github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflow +github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowendpoint +github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/dataflowprofile +github.com/hashicorp/go-azure-sdk/resource-manager/iotoperations/2024-11-01/instance github.com/hashicorp/go-azure-sdk/resource-manager/keyvault/2023-02-01/vaults github.com/hashicorp/go-azure-sdk/resource-manager/keyvault/2023-07-01/managedhsms github.com/hashicorp/go-azure-sdk/resource-manager/keyvault/2023-07-01/vaults diff --git a/website/allowed-subcategories b/website/allowed-subcategories index f0a62168b131..8f758d791ffb 100644 --- a/website/allowed-subcategories +++ b/website/allowed-subcategories @@ -62,6 +62,7 @@ Healthcare Hybrid Compute IoT Central IoT Hub +IoT Operations Key Vault Lighthouse Load Balancer diff --git a/website/docs/r/iotoperations_broker.html.markdown b/website/docs/r/iotoperations_broker.html.markdown new file mode 100644 index 000000000000..b0d4c4391384 --- /dev/null +++ b/website/docs/r/iotoperations_broker.html.markdown @@ -0,0 +1,520 @@ +--- +subcategory: "IoT Operations" +layout: "azurerm" +page_title: "Azure Resource Manager: azurerm_iotoperations_broker" +description: |- + Manages an Azure IoT Operations Broker. +--- + +# azurerm_iotoperations_broker + +Manages an Azure IoT Operations Broker. + +An IoT Operations Broker is the central message routing component that handles MQTT communication between IoT devices and applications. It provides high-performance message routing with configurable scaling, diagnostics, security, and storage options. + +## Example Usage + +### Basic Broker + +```hcl +resource "azurerm_resource_group" "example" { + name = "example-resources" + location = "West Europe" +} + +resource "azurerm_iotoperations_instance" "example" { + name = "example-instance" + resource_group_name = azurerm_resource_group.example.name + location = azurerm_resource_group.example.location + extended_location_name = "microsoftiotoperations" + extended_location_type = "CustomLocation" +} + +resource "azurerm_iotoperations_broker" "example" { + name = "example-broker" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = azurerm_iotoperations_instance.example.extended_location_type + } +} +``` + +### Broker with Performance Tuning + +```hcl +resource "azurerm_iotoperations_broker" "high_performance" { + name = "high-perf-broker" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = azurerm_iotoperations_instance.example.extended_location_type + } + + properties { + memory_profile = "High" + + cardinality { + backend_chain { + partitions = 8 + redundancy_factor = 3 + workers = 4 + } + + frontend { + replicas = 4 + workers = 2 + } + } + + generate_resource_limits { + cpu = "Enabled" + } + } +} +``` + +### Broker with Advanced Configuration + +```hcl +resource "azurerm_iotoperations_broker" "advanced" { + name = "advanced-broker" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = azurerm_iotoperations_instance.example.extended_location_type + } + + properties { + memory_profile = "Medium" + + advanced { + encrypt_internal_traffic = "Enabled" + + clients { + max_session_expiry_seconds = 3600 + max_message_expiry_seconds = 1800 + max_packet_size_bytes = 1048576 + max_receive_maximum = 100 + max_keep_alive_seconds = 300 + + subscriber_queue_limit { + length = 1000 + strategy = "DropOldest" + } + } + + internal_certs { + duration = "8760h" # 1 year + renew_before = "720h" # 30 days + + private_key { + algorithm = "Rsa2048" + rotation_policy = "Always" + } + } + } + + diagnostics { + logs { + level = "info" + } + + metrics { + prometheus_port = 9090 + } + + self_check { + mode = "Enabled" + interval_seconds = 30 + timeout_seconds = 10 + } + + traces { + mode = "Enabled" + cache_size_megabytes = 16 + span_channel_capacity = 1000 + + self_tracing { + mode = "Enabled" + interval_seconds = 30 + } + } + } + } +} +``` + +### Broker with Persistent Storage + +```hcl +resource "azurerm_iotoperations_broker" "with_storage" { + name = "storage-broker" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = azurerm_iotoperations_instance.example.extended_location_type + } + + properties { + memory_profile = "Medium" + + disk_backed_message_buffer { + max_size = "1Gi" + + persistent_volume_claim_spec { + access_modes = ["ReadWriteOnce"] + storage_class_name = "fast-ssd" + volume_mode = "Filesystem" + + resources { + requests = { + storage = "10Gi" + } + limits = { + storage = "50Gi" + } + } + + selector { + match_labels = { + tier = "storage" + type = "fast" + } + + match_expressions { + key = "environment" + operator = "In" + values = ["production", "staging"] + } + } + } + } + } +} +``` + +### Broker with Ephemeral Storage + +```hcl +resource "azurerm_iotoperations_broker" "ephemeral_storage" { + name = "ephemeral-broker" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = azurerm_iotoperations_instance.example.extended_location_type + } + + properties { + disk_backed_message_buffer { + max_size = "512Mi" + + ephemeral_volume_claim_spec { + access_modes = ["ReadWriteOnce"] + volume_mode = "Filesystem" + + resources { + requests = { + storage = "1Gi" + } + } + + data_source { + api_group = "snapshot.storage.k8s.io" + kind = "VolumeSnapshot" + name = "broker-snapshot" + } + } + } + } +} +``` + +## Arguments Reference + +The following arguments are supported: + +* `name` - (Required) The name of the Broker. Must be between 3-63 characters, lowercase alphanumeric with dashes, starting and ending with alphanumeric characters. Changing this forces a new resource to be created. + +* `resource_group_name` - (Required) The name of the Resource Group where the IoT Operations Broker should exist. Must be between 1-90 characters. Changing this forces a new resource to be created. + +* `instance_name` - (Required) The name of the IoT Operations Instance. Changing this forces a new resource to be created. + +* `extended_location` - (Required) An `extended_location` block as defined below. Changing this forces a new resource to be created. + +* `properties` - (Optional) A `properties` block as defined below for configuring broker behavior. + +--- + +An `extended_location` block supports the following: + +* `name` - (Required) The extended location name where the IoT Operations Broker should be deployed. + +* `type` - (Required) The extended location type. + +--- + +A `properties` block supports the following: + +* `memory_profile` - (Optional) The memory profile for the broker. Possible values are `Tiny`, `Low`, `Medium`, and `High`. + +* `advanced` - (Optional) An `advanced` block as defined below for advanced broker configuration. + +* `cardinality` - (Optional) A `cardinality` block as defined below for scaling configuration. + +* `diagnostics` - (Optional) A `diagnostics` block as defined below for monitoring and logging. + +* `disk_backed_message_buffer` - (Optional) A `disk_backed_message_buffer` block as defined below for persistent message storage. + +* `generate_resource_limits` - (Optional) A `generate_resource_limits` block as defined below for automatic resource limit generation. + +--- + +An `advanced` block supports the following: + +* `encrypt_internal_traffic` - (Optional) Whether to encrypt internal traffic between broker components. + +* `clients` - (Optional) A `clients` block as defined below for client connection settings. + +* `internal_certs` - (Optional) An `internal_certs` block as defined below for internal certificate configuration. + +--- + +A `clients` block supports the following: + +* `max_session_expiry_seconds` - (Optional) Maximum session expiry time in seconds. + +* `max_message_expiry_seconds` - (Optional) Maximum message expiry time in seconds. + +* `max_packet_size_bytes` - (Optional) Maximum MQTT packet size in bytes. + +* `max_receive_maximum` - (Optional) Maximum number of QoS 1 and QoS 2 publications that the client is willing to process concurrently. + +* `max_keep_alive_seconds` - (Optional) Maximum keep alive time in seconds. + +* `subscriber_queue_limit` - (Optional) A `subscriber_queue_limit` block as defined below. + +--- + +A `subscriber_queue_limit` block supports the following: + +* `length` - (Optional) Maximum number of messages in the subscriber queue. + +* `strategy` - (Optional) Strategy for handling queue overflow. Possible values include message drop strategies. + +--- + +An `internal_certs` block supports the following: + +* `duration` - (Optional) The duration of the certificate validity period (e.g., "8760h" for 1 year). + +* `renew_before` - (Optional) The time before expiry when the certificate should be renewed (e.g., "720h" for 30 days). + +* `private_key` - (Optional) A `private_key` block as defined below. + +--- + +A `private_key` block supports the following: + +* `algorithm` - (Optional) The algorithm for the private key. + +* `rotation_policy` - (Optional) The rotation policy for the private key. + +--- + +A `cardinality` block supports the following: + +* `backend_chain` - (Optional) A `backend_chain` block as defined below for backend scaling configuration. + +* `frontend` - (Optional) A `frontend` block as defined below for frontend scaling configuration. + +--- + +A `backend_chain` block supports the following: + +* `partitions` - (Optional) Number of partitions for the backend chain. Must be between 1-16. + +* `redundancy_factor` - (Optional) Redundancy factor for the backend chain. Must be between 1-5. + +* `workers` - (Optional) Number of worker threads for the backend chain. Must be between 1-16. + +--- + +A `frontend` block supports the following: + +* `replicas` - (Optional) Number of frontend replicas. Must be between 1-16. + +* `workers` - (Optional) Number of worker threads per frontend replica. Must be between 1-16. + +--- + +A `diagnostics` block supports the following: + +* `logs` - (Optional) A `logs` block as defined below for logging configuration. + +* `metrics` - (Optional) A `metrics` block as defined below for metrics collection. + +* `self_check` - (Optional) A `self_check` block as defined below for health checking. + +* `traces` - (Optional) A `traces` block as defined below for distributed tracing. + +--- + +A `logs` block supports the following: + +* `level` - (Optional) The logging level (e.g., "info", "debug", "warn", "error"). + +--- + +A `metrics` block supports the following: + +* `prometheus_port` - (Optional) Port number for Prometheus metrics endpoint. Must be between 0-65535. + +--- + +A `self_check` block supports the following: + +* `mode` - (Optional) Whether self-checking is enabled. + +* `interval_seconds` - (Optional) Interval between self-checks in seconds. + +* `timeout_seconds` - (Optional) Timeout for self-check operations in seconds. + +--- + +A `traces` block supports the following: + +* `mode` - (Optional) Whether distributed tracing is enabled. + +* `cache_size_megabytes` - (Optional) Size of the trace cache in megabytes. + +* `span_channel_capacity` - (Optional) Capacity of the span channel. + +* `self_tracing` - (Optional) A `self_tracing` block as defined below. + +--- + +A `self_tracing` block supports the following: + +* `mode` - (Optional) Whether self-tracing is enabled. + +* `interval_seconds` - (Optional) Interval between self-trace operations in seconds. + +--- + +A `disk_backed_message_buffer` block supports the following: + +* `max_size` - (Optional) Maximum size of the message buffer (e.g., "1Gi", "512Mi"). + +* `ephemeral_volume_claim_spec` - (Optional) A `volume_claim_spec` block as defined below for ephemeral storage. + +* `persistent_volume_claim_spec` - (Optional) A `volume_claim_spec` block as defined below for persistent storage. + +--- + +A `volume_claim_spec` block supports the following: + +* `volume_name` - (Optional) Name of the volume. + +* `volume_mode` - (Optional) Volume mode (e.g., "Filesystem", "Block"). + +* `storage_class_name` - (Optional) Storage class name for the volume. + +* `access_modes` - (Optional) List of access modes for the volume (e.g., ["ReadWriteOnce", "ReadOnlyMany"]). + +* `data_source` - (Optional) A `data_source` block as defined below. + +* `data_source_ref` - (Optional) A `data_source_ref` block as defined below. + +* `resources` - (Optional) A `resources` block as defined below for resource requirements. + +* `selector` - (Optional) A `selector` block as defined below for volume selection. + +--- + +A `data_source` block supports the following: + +* `api_group` - (Optional) API group of the data source. + +* `kind` - (Optional) Kind of the data source. + +* `name` - (Optional) Name of the data source. + +--- + +A `data_source_ref` block supports the following: + +* `api_group` - (Optional) API group of the data source reference. + +* `kind` - (Optional) Kind of the data source reference. + +* `name` - (Optional) Name of the data source reference. + +* `namespace` - (Optional) Namespace of the data source reference. + +--- + +A `resources` block supports the following: + +* `limits` - (Optional) Map of resource limits (e.g., `{"storage" = "10Gi", "cpu" = "500m"}`). + +* `requests` - (Optional) Map of resource requests (e.g., `{"storage" = "5Gi", "memory" = "1Gi"}`). + +--- + +A `selector` block supports the following: + +* `match_labels` - (Optional) Map of labels to match. + +* `match_expressions` - (Optional) List of `match_expression` blocks as defined below. + +--- + +A `match_expression` block supports the following: + +* `key` - (Optional) The label key. + +* `operator` - (Optional) The operator for matching (e.g., "In", "NotIn", "Exists", "DoesNotExist"). + +* `values` - (Optional) List of values to match against. + +--- + +A `generate_resource_limits` block supports the following: + +* `cpu` - (Optional) Whether to automatically generate CPU resource limits. + +## Attributes Reference + +In addition to the Arguments listed above, the following Attributes are exported: + +* `id` - The ID of the IoT Operations Broker. + +* `provisioning_state` - The provisioning state of the Broker. + +## Timeouts + +The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/language/resources/syntax#operation-timeouts) for certain actions: + +* `create` - (Defaults to 30 minutes) Used when creating the IoT Operations Broker. +* `read` - (Defaults to 5 minutes) Used when retrieving the IoT Operations Broker. +* `update` - (Defaults to 30 minutes) Used when updating the IoT Operations Broker. +* `delete` - (Defaults to 30 minutes) Used when deleting the IoT Operations Broker. + +## Import + +An IoT Operations Broker can be imported using the `resource id`, e.g. + +```shell +terraform import azurerm_iotoperations_broker.example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resourceGroup1/providers/Microsoft.IoTOperations/instances/instance1/brokers/broker1 +``` diff --git a/website/docs/r/iotoperations_broker_listener.html.markdown b/website/docs/r/iotoperations_broker_listener.html.markdown new file mode 100644 index 000000000000..afb7bcf17719 --- /dev/null +++ b/website/docs/r/iotoperations_broker_listener.html.markdown @@ -0,0 +1,293 @@ +--- +subcategory: "IoT Operations" +layout: "azurerm" +page_title: "Azure Resource Manager: azurerm_iotoperations_broker_listener" +description: |- + Manages an Azure IoT Operations Broker Listener. +--- + +# azurerm_iotoperations_broker_listener + +Manages an Azure IoT Operations Broker Listener. + +A Broker Listener defines how clients can connect to the IoT Operations broker, including network configuration, authentication, and TLS settings. It provides network endpoints for MQTT and WebSocket protocols with configurable security options. + +## Example Usage + +### Basic Broker Listener + +```hcl +resource "azurerm_resource_group" "example" { + name = "example-resources" + location = "West Europe" +} + +resource "azurerm_iotoperations_instance" "example" { + name = "example-instance" + resource_group_name = azurerm_resource_group.example.name + location = azurerm_resource_group.example.location + extended_location_name = "microsoftiotoperations" + extended_location_type = "CustomLocation" +} + +resource "azurerm_iotoperations_broker" "example" { + name = "example-broker" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + extended_location_name = azurerm_iotoperations_instance.example.extended_location_name + extended_location_type = azurerm_iotoperations_instance.example.extended_location_type +} + +resource "azurerm_iotoperations_broker_listener" "example" { + name = "example-listener" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + broker_name = azurerm_iotoperations_broker.example.name + extended_location_name = azurerm_iotoperations_instance.example.extended_location_name + service_name = "example-service" + service_type = "ClusterIp" + + ports { + port = 1883 + protocol = "MQTT" + } + + ports { + port = 8883 + protocol = "MQTT" + + tls { + mode = "Manual" + + manual { + secret_ref = "example-tls-secret" + } + } + } +} +``` + +### Broker Listener with Automatic TLS using Cert Manager + +```hcl +resource "azurerm_iotoperations_broker_listener" "example_auto_tls" { + name = "example-listener-auto-tls" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + broker_name = azurerm_iotoperations_broker.example.name + extended_location_name = azurerm_iotoperations_instance.example.extended_location_name + service_name = "example-service-tls" + service_type = "LoadBalancer" + + ports { + port = 8883 + node_port = 30883 + protocol = "MQTT" + + tls { + mode = "Automatic" + + cert_manager_certificate_spec { + duration = "8760h" # 1 year + secret_name = "broker-tls-secret" + renew_before = "720h" # 30 days + + issuer_ref { + group = "cert-manager.io" + kind = "ClusterIssuer" + name = "letsencrypt-prod" + } + + private_key { + algorithm = "Rsa2048" + rotation_policy = "Always" + } + + san { + dns = ["broker.example.com", "*.broker.example.com"] + ip = ["192.168.1.100", "10.0.0.100"] + } + } + } + } + + ports { + port = 9001 + protocol = "WebSockets" + + tls { + mode = "Automatic" + + cert_manager_certificate_spec { + issuer_ref { + group = "cert-manager.io" + kind = "ClusterIssuer" + name = "letsencrypt-prod" + } + + san { + dns = ["websocket.example.com"] + ip = ["192.168.1.101"] + } + } + } + } +} +``` + +### Broker Listener with Authentication and Authorization + +```hcl +resource "azurerm_iotoperations_broker_listener" "example_auth" { + name = "example-listener-auth" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + broker_name = azurerm_iotoperations_broker.example.name + extended_location_name = azurerm_iotoperations_instance.example.extended_location_name + service_name = "auth-service" + service_type = "NodePort" + + ports { + port = 1883 + node_port = 31883 + protocol = "MQTT" + authentication_ref = "example-auth-method" + authorization_ref = "example-authz-policy" + } + + ports { + port = 8883 + node_port = 31884 + protocol = "MQTT" + authentication_ref = "example-auth-method" + authorization_ref = "example-authz-policy" + + tls { + mode = "Manual" + + manual { + secret_ref = "mqtt-tls-secret" + } + } + } +} +``` + +## Arguments Reference + +The following arguments are supported: + +* `name` - (Required) The name of the Broker Listener. Must be between 3-63 characters, lowercase alphanumeric with dashes, starting and ending with alphanumeric characters. Changing this forces a new resource to be created. + +* `resource_group_name` - (Required) The name of the Resource Group where the IoT Operations Broker Listener should exist. Changing this forces a new resource to be created. + +* `instance_name` - (Required) The name of the IoT Operations Instance. Must be between 3-63 characters, lowercase alphanumeric with dashes, starting and ending with alphanumeric characters. Changing this forces a new resource to be created. + +* `broker_name` - (Required) The name of the IoT Operations Broker. Must be between 3-63 characters, lowercase alphanumeric with dashes, starting and ending with alphanumeric characters. Changing this forces a new resource to be created. + +* `extended_location_name` - (Required) The extended location name where the IoT Operations Broker Listener should be deployed. Changing this forces a new resource to be created. + +* `ports` - (Required) A list of `ports` blocks as defined below. At least one port must be configured. + +* `service_name` - (Optional) The name of the Kubernetes service. Must be between 1-63 characters. + +* `service_type` - (Optional) The type of Kubernetes service. Possible values are `LoadBalancer`, `NodePort`, and `ClusterIp`. Defaults to `ClusterIp`. + +--- + +A `ports` block supports the following: + +* `port` - (Required) The port number for the broker listener. Must be between 1-65535. + +* `node_port` - (Optional) The node port number when `service_type` is set to `NodePort` or `LoadBalancer`. Must be between 30000-32767. + +* `protocol` - (Optional) The protocol for the port. Possible values are `MQTT` and `WebSockets`. Defaults to `MQTT`. + +* `authentication_ref` - (Optional) Reference to the authentication method to use for this port. Must be between 1-253 characters. + +* `authorization_ref` - (Optional) Reference to the authorization policy to use for this port. Must be between 1-253 characters. + +* `tls` - (Optional) A `tls` block as defined below for configuring TLS settings. + +--- + +A `tls` block supports the following: + +* `mode` - (Required) The TLS mode for the port. Possible values are `Automatic` and `Manual`. + +* `cert_manager_certificate_spec` - (Optional) A `cert_manager_certificate_spec` block as defined below. This is required when `mode` is set to `Automatic`. + +* `manual` - (Optional) A `manual` block as defined below. This is required when `mode` is set to `Manual`. + +--- + +A `cert_manager_certificate_spec` block supports the following: + +* `issuer_ref` - (Required) An `issuer_ref` block as defined below specifying the certificate issuer. + +* `duration` - (Optional) The duration of the certificate validity period. Must be between 1-50 characters (e.g., "8760h" for 1 year). + +* `secret_name` - (Optional) The name of the Kubernetes secret where the certificate will be stored. Must be between 1-253 characters. + +* `renew_before` - (Optional) The time before expiry when the certificate should be renewed. Must be between 1-50 characters (e.g., "720h" for 30 days). + +* `private_key` - (Optional) A `private_key` block as defined below for private key configuration. + +* `san` - (Optional) A `san` block as defined below for Subject Alternative Names. + +--- + +An `issuer_ref` block supports the following: + +* `group` - (Required) The API group of the certificate issuer. Must be between 1-253 characters (typically "cert-manager.io"). + +* `kind` - (Required) The kind of certificate issuer. Possible values are `ClusterIssuer` and `Issuer`. + +* `name` - (Required) The name of the certificate issuer. Must be between 1-253 characters. + +--- + +A `private_key` block supports the following: + +* `algorithm` - (Required) The algorithm for the private key. Possible values are `Rsa2048`, `Rsa4096`, `Rsa8192`, `Ec256`, `Ec384`, `Ec521`, and `Ed25519`. + +* `rotation_policy` - (Required) The rotation policy for the private key. Possible values are `Always` and `Never`. + +--- + +A `san` block supports the following: + +* `dns` - (Required) A list of DNS names to include in the certificate's Subject Alternative Names. Each DNS name must be between 1-253 characters. + +* `ip` - (Required) A list of IP addresses to include in the certificate's Subject Alternative Names. Each IP must be a valid IP address. + +--- + +A `manual` block supports the following: + +* `secret_ref` - (Required) Reference to the Kubernetes secret containing the TLS certificate. Must be between 1-253 characters. + +## Attributes Reference + +In addition to the Arguments listed above, the following Attributes are exported: + +* `id` - The ID of the IoT Operations Broker Listener. + +* `provisioning_state` - The provisioning state of the Broker Listener. + +## Timeouts + +The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/language/resources/syntax#operation-timeouts) for certain actions: + +* `create` - (Defaults to 30 minutes) Used when creating the IoT Operations Broker Listener. +* `read` - (Defaults to 5 minutes) Used when retrieving the IoT Operations Broker Listener. +* `update` - (Defaults to 30 minutes) Used when updating the IoT Operations Broker Listener. +* `delete` - (Defaults to 30 minutes) Used when deleting the IoT Operations Broker Listener. + +## Import + +An IoT Operations Broker Listener can be imported using the `resource id`, e.g. + +```shell +terraform import azurerm_iotoperations_broker_listener.example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resourceGroup1/providers/Microsoft.IoTOperations/instances/instance1/brokers/broker1/listeners/listener1 +``` diff --git a/website/docs/r/iotoperations_brokerauthentication.html.markdown b/website/docs/r/iotoperations_brokerauthentication.html.markdown new file mode 100644 index 000000000000..05a6a34f8f9b --- /dev/null +++ b/website/docs/r/iotoperations_brokerauthentication.html.markdown @@ -0,0 +1,395 @@ +--- +subcategory: "IoT Operations" +layout: "azurerm" +page_title: "Azure Resource Manager: azurerm_iotoperations_broker_authentication" +description: |- + Manages an Azure IoT Operations Broker Authentication. +--- + +# azurerm_iotoperations_broker_authentication + +Manages an Azure IoT Operations Broker Authentication. + +A Broker Authentication defines how clients authenticate with the IoT Operations broker. It supports multiple authentication methods including X.509 certificates, service account tokens, and custom authentication providers, providing flexible security options for different client types and deployment scenarios. + +## Example Usage + +### X.509 Certificate Authentication + +```hcl +resource "azurerm_resource_group" "example" { + name = "example-resources" + location = "West Europe" +} + +resource "azurerm_iotoperations_instance" "example" { + name = "example-instance" + resource_group_name = azurerm_resource_group.example.name + location = azurerm_resource_group.example.location + extended_location_name = "microsoftiotoperations" + extended_location_type = "CustomLocation" +} + +resource "azurerm_iotoperations_broker" "example" { + name = "example-broker" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = azurerm_iotoperations_instance.example.extended_location_type + } +} + +resource "azurerm_iotoperations_broker_authentication" "x509_auth" { + name = "x509-auth" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + broker_name = azurerm_iotoperations_broker.example.name + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = "CustomLocation" + } + + authentication_methods { + method = "X509" + + x509_settings { + trusted_client_ca_cert = "ca-certificates" + + authorization_attributes { + name = "device-id" + subject = "CN" + attributes = { + "device-type" = "sensor" + "location" = "factory-floor" + } + } + + authorization_attributes { + name = "organization" + subject = "O" + attributes = { + "department" = "manufacturing" + "access" = "read-write" + } + } + } + } +} +``` + +### Service Account Token Authentication + +```hcl +resource "azurerm_iotoperations_broker_authentication" "sat_auth" { + name = "sat-auth" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + broker_name = azurerm_iotoperations_broker.example.name + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = "CustomLocation" + } + + authentication_methods { + method = "ServiceAccountToken" + + service_account_token_settings { + audiences = [ + "iotoperations.azure.com", + "mqtt.broker.local", + "https://management.azure.com/" + ] + } + } +} +``` + +### Custom Authentication Provider + +```hcl +resource "azurerm_iotoperations_broker_authentication" "custom_auth" { + name = "custom-auth" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + broker_name = azurerm_iotoperations_broker.example.name + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = "CustomLocation" + } + + authentication_methods { + method = "Custom" + + custom_settings { + endpoint = "https://auth.example.com/validate" + + auth { + x509 { + secret_ref = "auth-server-cert" + } + } + + ca_cert_config_map = "custom-ca-certs" + + headers = { + "Content-Type" = "application/json" + "X-API-Version" = "v1" + "X-Client-ID" = "iot-broker" + } + } + } +} +``` + +### Multiple Authentication Methods + +```hcl +resource "azurerm_iotoperations_broker_authentication" "multi_auth" { + name = "multi-auth" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + broker_name = azurerm_iotoperations_broker.example.name + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = "CustomLocation" + } + + # X.509 certificate authentication for devices + authentication_methods { + method = "X509" + + x509_settings { + trusted_client_ca_cert = "device-ca-certs" + + authorization_attributes { + name = "device-type" + subject = "CN" + attributes = { + "category" = "iot-device" + "protocol" = "mqtt" + } + } + } + } + + # Service account token for applications + authentication_methods { + method = "ServiceAccountToken" + + service_account_token_settings { + audiences = [ + "iotoperations.azure.com", + "application.local" + ] + } + } + + # Custom authentication for special clients + authentication_methods { + method = "Custom" + + custom_settings { + endpoint = "https://auth.enterprise.com/oauth/validate" + ca_cert_config_map = "enterprise-ca" + + auth { + x509 { + secret_ref = "oauth-client-cert" + } + } + + headers = { + "Authorization" = "Bearer ${var.api_token}" + "Accept" = "application/json" + } + } + } +} +``` + +### Enterprise X.509 Authentication with Multiple Attributes + +```hcl +resource "azurerm_iotoperations_broker_authentication" "enterprise_x509" { + name = "enterprise-x509" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + broker_name = azurerm_iotoperations_broker.example.name + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = "CustomLocation" + } + + authentication_methods { + method = "X509" + + x509_settings { + trusted_client_ca_cert = "enterprise-root-ca" + + # Device identification + authorization_attributes { + name = "device-identity" + subject = "CN" + attributes = { + "device-id" = "unique-identifier" + "manufacturer" = "company-name" + "model" = "device-model" + "firmware" = "version-info" + } + } + + # Location-based access + authorization_attributes { + name = "location-access" + subject = "L" + attributes = { + "facility" = "building-name" + "floor" = "level-number" + "zone" = "security-zone" + "access-level" = "restricted" + } + } + + # Organizational unit + authorization_attributes { + name = "department" + subject = "OU" + attributes = { + "division" = "business-unit" + "team" = "operational-group" + "role" = "function-type" + "clearance" = "security-level" + } + } + + # Organization details + authorization_attributes { + name = "organization" + subject = "O" + attributes = { + "company" = "corporation-name" + "subsidiary" = "regional-office" + "contract" = "agreement-type" + "tier" = "service-level" + } + } + } + } +} +``` + +## Arguments Reference + +The following arguments are supported: + +* `name` - (Required) The name of the Broker Authentication. Must be between 3-63 characters, lowercase alphanumeric with dashes, starting and ending with alphanumeric characters. Changing this forces a new resource to be created. + +* `resource_group_name` - (Required) The name of the Resource Group where the IoT Operations Broker Authentication should exist. Must be between 1-90 characters. Changing this forces a new resource to be created. + +* `instance_name` - (Required) The name of the IoT Operations Instance. Must be between 3-63 characters, lowercase alphanumeric with dashes, starting and ending with alphanumeric characters. Changing this forces a new resource to be created. + +* `broker_name` - (Required) The name of the IoT Operations Broker. Must be between 3-63 characters, lowercase alphanumeric with dashes, starting and ending with alphanumeric characters. Changing this forces a new resource to be created. + +* `extended_location` - (Required) An `extended_location` block as defined below. Changing this forces a new resource to be created. + +* `authentication_methods` - (Required) A list of `authentication_methods` blocks as defined below. At least one authentication method must be configured. + +--- + +An `extended_location` block supports the following: + +* `name` - (Required) The extended location name where the IoT Operations Broker Authentication should be deployed. + +* `type` - (Optional) The extended location type. Defaults to `CustomLocation`. + +--- + +An `authentication_methods` block supports the following: + +* `method` - (Required) The authentication method type. Possible values are `Custom`, `ServiceAccountToken`, and `X509`. + +* `custom_settings` - (Optional) A `custom_settings` block as defined below. Required when `method` is set to `Custom`. + +* `service_account_token_settings` - (Optional) A `service_account_token_settings` block as defined below. Required when `method` is set to `ServiceAccountToken`. + +* `x509_settings` - (Optional) An `x509_settings` block as defined below. Required when `method` is set to `X509`. + +--- + +A `custom_settings` block supports the following: + +* `endpoint` - (Required) The URL of the custom authentication endpoint. + +* `auth` - (Optional) An `auth` block as defined below for authenticating with the custom endpoint. + +* `ca_cert_config_map` - (Optional) Name of the ConfigMap containing CA certificates for verifying the custom authentication endpoint. + +* `headers` - (Optional) A map of HTTP headers to include in requests to the custom authentication endpoint. + +--- + +An `auth` block supports the following: + +* `x509` - (Required) An `x509` block as defined below for X.509 certificate authentication. + +--- + +An `x509` block supports the following: + +* `secret_ref` - (Required) Reference to the Kubernetes secret containing the X.509 certificate for authenticating with the custom endpoint. + +--- + +A `service_account_token_settings` block supports the following: + +* `audiences` - (Required) List of acceptable audiences for the service account token. At least one audience must be specified. + +--- + +An `x509_settings` block supports the following: + +* `trusted_client_ca_cert` - (Optional) Name of the ConfigMap containing trusted client CA certificates for X.509 authentication. + +* `authorization_attributes` - (Optional) A list of `authorization_attributes` blocks as defined below for mapping certificate attributes to authorization properties. + +--- + +An `authorization_attributes` block supports the following: + +* `name` - (Required) The name of the authorization attribute mapping. + +* `subject` - (Required) The X.509 certificate subject field to extract (e.g., "CN", "O", "OU", "L", "C"). + +* `attributes` - (Required) A map of attribute key-value pairs to associate with clients matching this certificate subject. + +## Attributes Reference + +In addition to the Arguments listed above, the following Attributes are exported: + +* `id` - The ID of the IoT Operations Broker Authentication. + +* `provisioning_state` - The provisioning state of the Broker Authentication. + +## Timeouts + +The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/language/resources/syntax#operation-timeouts) for certain actions: + +* `create` - (Defaults to 30 minutes) Used when creating the IoT Operations Broker Authentication. +* `read` - (Defaults to 5 minutes) Used when retrieving the IoT Operations Broker Authentication. +* `update` - (Defaults to 30 minutes) Used when updating the IoT Operations Broker Authentication. +* `delete` - (Defaults to 30 minutes) Used when deleting the IoT Operations Broker Authentication. + +## Import + +An IoT Operations Broker Authentication can be imported using the `resource id`, e.g. + +```shell +terraform import azurerm_iotoperations_broker_authentication.example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resourceGroup1/providers/Microsoft.IoTOperations/instances/instance1/brokers/broker1/authentications/authentication1 +``` diff --git a/website/docs/r/iotoperations_brokerauthorization.html.markdown b/website/docs/r/iotoperations_brokerauthorization.html.markdown new file mode 100644 index 000000000000..86941229aacb --- /dev/null +++ b/website/docs/r/iotoperations_brokerauthorization.html.markdown @@ -0,0 +1,699 @@ +--- +subcategory: "IoT Operations" +layout: "azurerm" +page_title: "Azure Resource Manager: azurerm_iotoperations_broker_authorization" +description: |- + Manages an Azure IoT Operations Broker Authorization. +--- + +# azurerm_iotoperations_broker_authorization + +Manages an Azure IoT Operations Broker Authorization. + +A Broker Authorization defines access control policies for the IoT Operations broker, determining what actions authenticated clients can perform. It supports fine-grained permissions for MQTT operations (connect, publish, subscribe) and state store access, with flexible principal matching based on client IDs, usernames, or custom attributes. + +## Example Usage + +### Basic MQTT Authorization + +```hcl +resource "azurerm_resource_group" "example" { + name = "example-resources" + location = "West Europe" +} + +resource "azurerm_iotoperations_instance" "example" { + name = "example-instance" + resource_group_name = azurerm_resource_group.example.name + location = azurerm_resource_group.example.location + extended_location_name = "microsoftiotoperations" + extended_location_type = "CustomLocation" +} + +resource "azurerm_iotoperations_broker" "example" { + name = "example-broker" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = azurerm_iotoperations_instance.example.extended_location_type + } +} + +resource "azurerm_iotoperations_broker_authorization" "basic_mqtt" { + name = "basic-mqtt-authz" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + broker_name = azurerm_iotoperations_broker.example.name + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = "CustomLocation" + } + + authorization_policies { + cache = "Enabled" + + rules { + # Allow device connection + broker_resources { + method = "Connect" + } + + # Allow publishing to device topics + broker_resources { + method = "Publish" + topics = [ + "devices/+/telemetry", + "devices/+/status" + ] + } + + # Allow subscribing to command topics + broker_resources { + method = "Subscribe" + topics = [ + "devices/+/commands", + "devices/+/config" + ] + } + + principals { + clients = [ + "device-*", + "sensor-*" + ] + } + } + } +} +``` + +### Role-Based Authorization + +```hcl +resource "azurerm_iotoperations_broker_authorization" "role_based" { + name = "role-based-authz" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + broker_name = azurerm_iotoperations_broker.example.name + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = "CustomLocation" + } + + authorization_policies { + cache = "Enabled" + + # Admin users - full access + rules { + broker_resources { + method = "Connect" + } + + broker_resources { + method = "Publish" + topics = ["*"] + } + + broker_resources { + method = "Subscribe" + topics = ["*"] + } + + principals { + usernames = [ + "admin", + "system-operator" + ] + } + } + + # Device operators - device management + rules { + broker_resources { + method = "Connect" + } + + broker_resources { + method = "Publish" + topics = [ + "devices/+/commands", + "devices/+/config", + "management/+" + ] + } + + broker_resources { + method = "Subscribe" + topics = [ + "devices/+/telemetry", + "devices/+/status", + "devices/+/alerts" + ] + } + + principals { + usernames = [ + "device-operator", + "maintenance-tech" + ] + } + } + + # Read-only monitoring users + rules { + broker_resources { + method = "Connect" + } + + broker_resources { + method = "Subscribe" + topics = [ + "devices/+/telemetry", + "devices/+/status", + "system/metrics" + ] + } + + principals { + usernames = [ + "monitor-user", + "dashboard-service" + ] + } + } + } +} +``` + +### Attribute-Based Authorization + +```hcl +resource "azurerm_iotoperations_broker_authorization" "attribute_based" { + name = "attribute-based-authz" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + broker_name = azurerm_iotoperations_broker.example.name + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = "CustomLocation" + } + + authorization_policies { + cache = "Enabled" + + # Manufacturing floor devices + rules { + broker_resources { + method = "Connect" + } + + broker_resources { + method = "Publish" + topics = [ + "factory/floor1/+/data", + "factory/floor1/+/status" + ] + } + + broker_resources { + method = "Subscribe" + topics = [ + "factory/floor1/+/commands", + "factory/broadcast" + ] + } + + principals { + attributes = [ + { + "location" = "floor1" + "device-type" = "sensor" + "access" = "production" + } + ] + } + } + + # Quality control systems + rules { + broker_resources { + method = "Connect" + } + + broker_resources { + method = "Subscribe" + topics = [ + "factory/+/+/data", + "quality/+/reports" + ] + } + + broker_resources { + method = "Publish" + topics = [ + "quality/+/alerts", + "quality/+/reports" + ] + } + + principals { + attributes = [ + { + "department" = "quality-control" + "clearance" = "level-2" + } + ] + } + } + + # Maintenance team access + rules { + broker_resources { + method = "Connect" + } + + broker_resources { + method = "Publish" + topics = [ + "maintenance/+/schedule", + "maintenance/+/reports" + ] + } + + broker_resources { + method = "Subscribe" + topics = [ + "factory/+/+/diagnostics", + "maintenance/+/alerts" + ] + } + + principals { + attributes = [ + { + "team" = "maintenance" + "shift" = "day" + "level" = "technician" + }, + { + "team" = "maintenance" + "shift" = "night" + "level" = "supervisor" + } + ] + } + } + } +} +``` + +### State Store Authorization + +```hcl +resource "azurerm_iotoperations_broker_authorization" "state_store" { + name = "state-store-authz" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + broker_name = azurerm_iotoperations_broker.example.name + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = "CustomLocation" + } + + authorization_policies { + cache = "Disabled" # Disable caching for dynamic permissions + + # Application state management + rules { + broker_resources { + method = "Connect" + } + + # State store access for application data + state_store_resources { + key_type = "String" + keys = [ + "app.config.*", + "app.state.*", + "app.cache.*" + ] + method = "ReadWrite" + } + + # Binary data access + state_store_resources { + key_type = "Binary" + keys = [ + "blobs/images/*", + "blobs/documents/*" + ] + method = "ReadWrite" + } + + # Pattern-based access for dynamic keys + state_store_resources { + key_type = "Pattern" + keys = [ + "user\\..*\\.settings", + "session\\..*\\.data" + ] + method = "ReadWrite" + } + + principals { + clients = [ + "application-*", + "service-*" + ] + } + } + + # Read-only analytics access + rules { + broker_resources { + method = "Connect" + } + + # Read-only access to analytics data + state_store_resources { + key_type = "String" + keys = [ + "analytics.*", + "reports.*", + "metrics.*" + ] + method = "Read" + } + + principals { + usernames = [ + "analytics-service", + "reporting-engine" + ] + attributes = [ + { + "service-type" = "analytics" + "access-level" = "read-only" + } + ] + } + } + + # Device configuration management + rules { + broker_resources { + method = "Connect" + } + + broker_resources { + method = "Subscribe" + topics = [ + "devices/+/config-request" + ] + } + + broker_resources { + method = "Publish" + topics = [ + "devices/+/config-response" + ] + } + + # Device-specific configuration access + state_store_resources { + key_type = "Pattern" + keys = [ + "device\\..+\\.config", + "device\\..+\\.firmware" + ] + method = "Write" + } + + principals { + clients = [ + "config-manager" + ] + attributes = [ + { + "service" = "device-management" + "role" = "configurator" + } + ] + } + } + } +} +``` + +### Multi-Tenant Authorization + +```hcl +resource "azurerm_iotoperations_broker_authorization" "multi_tenant" { + name = "multi-tenant-authz" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + broker_name = azurerm_iotoperations_broker.example.name + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = "CustomLocation" + } + + authorization_policies { + cache = "Enabled" + + # Tenant A - restricted to their namespace + rules { + broker_resources { + method = "Connect" + } + + broker_resources { + method = "Publish" + topics = [ + "tenants/tenant-a/+", + "tenants/tenant-a/+/+" + ] + } + + broker_resources { + method = "Subscribe" + topics = [ + "tenants/tenant-a/+", + "tenants/tenant-a/+/+", + "shared/announcements" + ] + } + + state_store_resources { + key_type = "Pattern" + keys = [ + "tenant-a\\..*" + ] + method = "ReadWrite" + } + + principals { + attributes = [ + { + "tenant-id" = "tenant-a" + "org" = "company-alpha" + } + ] + clients = [ + "tenant-a-*" + ] + } + } + + # Tenant B - restricted to their namespace + rules { + broker_resources { + method = "Connect" + } + + broker_resources { + method = "Publish" + topics = [ + "tenants/tenant-b/+", + "tenants/tenant-b/+/+" + ] + } + + broker_resources { + method = "Subscribe" + topics = [ + "tenants/tenant-b/+", + "tenants/tenant-b/+/+", + "shared/announcements" + ] + } + + state_store_resources { + key_type = "Pattern" + keys = [ + "tenant-b\\..*" + ] + method = "ReadWrite" + } + + principals { + attributes = [ + { + "tenant-id" = "tenant-b" + "org" = "company-beta" + } + ] + clients = [ + "tenant-b-*" + ] + } + } + + # Platform administrators - cross-tenant access + rules { + broker_resources { + method = "Connect" + } + + broker_resources { + method = "Publish" + topics = [ + "*" + ] + } + + broker_resources { + method = "Subscribe" + topics = [ + "*" + ] + } + + state_store_resources { + key_type = "String" + keys = [ + "*" + ] + method = "ReadWrite" + } + + principals { + usernames = [ + "platform-admin", + "system-monitor" + ] + attributes = [ + { + "role" = "platform-admin" + "level" = "global" + } + ] + } + } + } +} +``` + +## Arguments Reference + +The following arguments are supported: + +* `name` - (Required) The name of the Broker Authorization. Must be between 3-63 characters, lowercase alphanumeric with dashes, starting and ending with alphanumeric characters. Changing this forces a new resource to be created. + +* `resource_group_name` - (Required) The name of the Resource Group where the IoT Operations Broker Authorization should exist. Must be between 1-90 characters. Changing this forces a new resource to be created. + +* `instance_name` - (Required) The name of the IoT Operations Instance. Must be between 3-63 characters, lowercase alphanumeric with dashes, starting and ending with alphanumeric characters. Changing this forces a new resource to be created. + +* `broker_name` - (Required) The name of the IoT Operations Broker. Must be between 3-63 characters, lowercase alphanumeric with dashes, starting and ending with alphanumeric characters. Changing this forces a new resource to be created. + +* `extended_location` - (Required) An `extended_location` block as defined below. Changing this forces a new resource to be created. + +* `authorization_policies` - (Required) An `authorization_policies` block as defined below for configuring access control policies. + +--- + +An `extended_location` block supports the following: + +* `name` - (Required) The extended location name where the IoT Operations Broker Authorization should be deployed. + +* `type` - (Optional) The extended location type. Defaults to `CustomLocation`. + +--- + +An `authorization_policies` block supports the following: + +* `cache` - (Optional) Whether to cache authorization decisions for performance. Possible values are `Enabled` and `Disabled`. Defaults to `Enabled`. + +* `rules` - (Required) A list of `rules` blocks as defined below. At least one rule must be configured. + +--- + +A `rules` block supports the following: + +* `broker_resources` - (Required) A list of `broker_resources` blocks as defined below for MQTT operation permissions. At least one broker resource rule must be configured. + +* `principals` - (Required) A `principals` block as defined below for identifying which clients this rule applies to. + +* `state_store_resources` - (Optional) A list of `state_store_resources` blocks as defined below for state store access permissions. + +--- + +A `broker_resources` block supports the following: + +* `method` - (Required) The MQTT operation method. Possible values are `Connect`, `Publish`, and `Subscribe`. + +* `clients` - (Optional) List of client ID patterns that this rule applies to. Supports wildcards with `+` (single level) and `*` (multi-level). + +* `topics` - (Optional) List of MQTT topic patterns that this rule applies to. Required for `Publish` and `Subscribe` methods. Supports wildcards with `+` (single level) and `*` (multi-level). + +--- + +A `principals` block supports the following: + +* `clients` - (Optional) List of client ID patterns to match against connecting clients. + +* `usernames` - (Optional) List of usernames to match against authenticated clients. + +* `attributes` - (Optional) List of attribute maps to match against client authentication attributes. Each map contains key-value pairs that must all match the client's attributes. + +--- + +A `state_store_resources` block supports the following: + +* `key_type` - (Required) The type of key matching to perform. Possible values are `Binary`, `Pattern`, and `String`. + +* `keys` - (Required) List of keys or key patterns to match. At least one key must be specified. For `Pattern` type, supports regular expressions. + +* `method` - (Required) The access method for the state store resource. Possible values are `Read`, `ReadWrite`, and `Write`. + +## Attributes Reference + +In addition to the Arguments listed above, the following Attributes are exported: + +* `id` - The ID of the IoT Operations Broker Authorization. + +* `provisioning_state` - The provisioning state of the Broker Authorization. + +## Timeouts + +The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/language/resources/syntax#operation-timeouts) for certain actions: + +* `create` - (Defaults to 30 minutes) Used when creating the IoT Operations Broker Authorization. +* `read` - (Defaults to 5 minutes) Used when retrieving the IoT Operations Broker Authorization. +* `update` - (Defaults to 30 minutes) Used when updating the IoT Operations Broker Authorization. +* `delete` - (Defaults to 30 minutes) Used when deleting the IoT Operations Broker Authorization. + +## Import + +An IoT Operations Broker Authorization can be imported using the `resource id`, e.g. + +```shell +terraform import azurerm_iotoperations_broker_authorization.example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resourceGroup1/providers/Microsoft.IoTOperations/instances/instance1/brokers/broker1/authorizations/authorization1 +``` diff --git a/website/docs/r/iotoperations_dataflow.html.markdown b/website/docs/r/iotoperations_dataflow.html.markdown new file mode 100644 index 000000000000..c977bde03396 --- /dev/null +++ b/website/docs/r/iotoperations_dataflow.html.markdown @@ -0,0 +1,724 @@ +--- +subcategory: "IoT Operations" +layout: "azurerm" +page_title: "Azure Resource Manager: azurerm_iotoperations_dataflow" +description: |- + Manages an Azure IoT Operations Dataflow. +--- + +# azurerm_iotoperations_dataflow + +Manages an Azure IoT Operations Dataflow. + +A Dataflow defines data processing pipelines that move and transform data between sources and destinations in IoT Operations. It supports complex data transformations including filtering, mapping, and dataset creation with multiple serialization formats and built-in transformation capabilities. + +## Example Usage + +### Basic Data Pipeline + +```hcl +resource "azurerm_resource_group" "example" { + name = "example-resources" + location = "West Europe" +} + +resource "azurerm_iotoperations_instance" "example" { + name = "example-instance" + resource_group_name = azurerm_resource_group.example.name + location = azurerm_resource_group.example.location + extended_location_name = "microsoftiotoperations" + extended_location_type = "CustomLocation" +} + +resource "azurerm_iotoperations_dataflow_profile" "example" { + name = "example-profile" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = azurerm_iotoperations_instance.example.extended_location_type + } +} + +resource "azurerm_iotoperations_dataflow" "basic_pipeline" { + name = "basic-pipeline" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + dataflow_profile_name = azurerm_iotoperations_dataflow_profile.example.name + mode = "Enabled" + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = "CustomLocation" + } + + # Source operation - read from MQTT endpoint + operations { + name = "mqtt-source" + operation_type = "Source" + + source_settings { + data_sources = [ + "azure-iot-operations/data/thermostat" + ] + endpoint_ref = "mqtt-endpoint" + serialization_format = "Json" + } + } + + # Destination operation - write to Event Hub + operations { + name = "eventhub-destination" + operation_type = "Destination" + + destination_settings { + data_destination = "processed-telemetry" + endpoint_ref = "eventhub-endpoint" + } + } +} +``` + +### Complex Transformation Pipeline + +```hcl +resource "azurerm_iotoperations_dataflow" "transformation_pipeline" { + name = "transformation-pipeline" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + dataflow_profile_name = azurerm_iotoperations_dataflow_profile.example.name + mode = "Enabled" + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = "CustomLocation" + } + + # Source operation + operations { + name = "sensor-data-source" + operation_type = "Source" + + source_settings { + data_sources = [ + "sensors/temperature/+", + "sensors/humidity/+", + "sensors/pressure/+" + ] + endpoint_ref = "mqtt-broker" + asset_ref = "factory-sensors" + schema_ref = "sensor-schema" + serialization_format = "Json" + } + } + + # Transformation operation + operations { + name = "data-processing" + operation_type = "BuiltInTransformation" + + built_in_transformation_settings { + schema_ref = "processed-schema" + serialization_format = "Json" + + # Filter out invalid readings + filter { + description = "Remove readings with invalid temperature values" + expression = "temperature >= -50 && temperature <= 150" + inputs = ["temperature"] + type = "Filter" + } + + # Filter out test devices + filter { + description = "Exclude test devices from production data" + expression = "!deviceId.startsWith('test-')" + inputs = ["deviceId"] + type = "Filter" + } + + # Convert temperature units + map { + description = "Convert Celsius to Fahrenheit" + expression = "(temperature * 9/5) + 32" + inputs = ["temperature"] + output = "temperatureF" + type = "Compute" + } + + # Add timestamp + map { + description = "Add processing timestamp" + expression = "now()" + inputs = [] + output = "processedAt" + type = "BuiltInFunction" + } + + # Rename fields + map { + description = "Standardize field names" + inputs = ["deviceId"] + output = "device_identifier" + type = "Rename" + } + + # Pass through existing fields + map { + description = "Keep humidity as-is" + inputs = ["humidity"] + output = "humidity" + type = "PassThrough" + } + + # Add new calculated properties + map { + description = "Add device metadata" + expression = "{'location': location, 'type': 'environmental_sensor'}" + inputs = ["location"] + output = "metadata" + type = "NewProperties" + } + + # Create datasets for different consumers + datasets { + key = "alerts" + description = "High-priority sensor alerts" + expression = "temperature > 100 || humidity > 90" + inputs = ["temperature", "humidity", "device_identifier"] + schema_ref = "alert-schema" + } + + datasets { + key = "metrics" + description = "Aggregated sensor metrics" + expression = "{'avg_temp': avg(temperature), 'max_humidity': max(humidity)}" + inputs = ["temperature", "humidity"] + schema_ref = "metrics-schema" + } + } + } + + # Destination operation + operations { + name = "cloud-storage" + operation_type = "Destination" + + destination_settings { + data_destination = "processed-sensor-data" + endpoint_ref = "adls-endpoint" + } + } +} +``` + +### Multi-Source Analytics Pipeline + +```hcl +resource "azurerm_iotoperations_dataflow" "analytics_pipeline" { + name = "analytics-pipeline" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + dataflow_profile_name = azurerm_iotoperations_dataflow_profile.example.name + mode = "Enabled" + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = "CustomLocation" + } + + # Manufacturing line data source + operations { + name = "manufacturing-source" + operation_type = "Source" + + source_settings { + data_sources = [ + "factory/line1/machines/+/telemetry", + "factory/line1/machines/+/status", + "factory/line1/quality/+" + ] + endpoint_ref = "factory-mqtt" + asset_ref = "production-line-1" + serialization_format = "Json" + } + } + + # Quality control data source + operations { + name = "quality-source" + operation_type = "Source" + + source_settings { + data_sources = [ + "quality/inspections/+", + "quality/defects/+", + "quality/reports/+" + ] + endpoint_ref = "quality-mqtt" + asset_ref = "quality-systems" + serialization_format = "Json" + } + } + + # Advanced transformation + operations { + name = "advanced-analytics" + operation_type = "BuiltInTransformation" + + built_in_transformation_settings { + serialization_format = "Parquet" + + # Filter for production hours only + filter { + description = "Include only production shift data" + expression = "hour(timestamp) >= 6 && hour(timestamp) <= 22" + inputs = ["timestamp"] + type = "Filter" + } + + # Filter out maintenance modes + filter { + description = "Exclude machines in maintenance mode" + expression = "machineStatus != 'maintenance'" + inputs = ["machineStatus"] + type = "Filter" + } + + # Calculate efficiency metrics + map { + description = "Calculate Overall Equipment Effectiveness (OEE)" + expression = "(actualOutput / plannedOutput) * (operatingTime / plannedTime) * (goodParts / totalParts)" + inputs = ["actualOutput", "plannedOutput", "operatingTime", "plannedTime", "goodParts", "totalParts"] + output = "oee_score" + type = "Compute" + } + + # Categorize performance + map { + description = "Categorize machine performance" + expression = "oee_score >= 0.85 ? 'excellent' : oee_score >= 0.65 ? 'good' : oee_score >= 0.40 ? 'fair' : 'poor'" + inputs = ["oee_score"] + output = "performance_category" + type = "Compute" + } + + # Add analytics metadata + map { + description = "Add analytics processing metadata" + expression = "{'pipeline_version': '2.1', 'processed_by': 'advanced-analytics'}" + inputs = [] + output = "analytics_metadata" + type = "NewProperties" + } + + # Normalize machine IDs + map { + description = "Standardize machine identifiers" + expression = "upper(replace(machineId, '-', '_'))" + inputs = ["machineId"] + output = "normalized_machine_id" + type = "Compute" + } + + # Create performance dataset + datasets { + key = "performance_summary" + description = "Machine performance summary for dashboards" + expression = "{'machine': normalized_machine_id, 'oee': oee_score, 'category': performance_category, 'shift': shift}" + inputs = ["normalized_machine_id", "oee_score", "performance_category", "shift"] + schema_ref = "performance-schema" + } + + # Create quality dataset + datasets { + key = "quality_metrics" + description = "Quality control metrics for analysis" + expression = "{'defect_rate': defectCount / totalParts, 'quality_grade': qualityScore}" + inputs = ["defectCount", "totalParts", "qualityScore"] + schema_ref = "quality-schema" + } + + # Create alerts dataset + datasets { + key = "production_alerts" + description = "Production issues requiring immediate attention" + expression = "oee_score < 0.40 || defectCount > 10" + inputs = ["oee_score", "defectCount", "normalized_machine_id"] + schema_ref = "alert-schema" + } + } + } + + # Data lake destination + operations { + name = "datalake-analytics" + operation_type = "Destination" + + destination_settings { + data_destination = "manufacturing-analytics" + endpoint_ref = "datalake-endpoint" + } + } +} +``` + +### Real-time Streaming Pipeline + +```hcl +resource "azurerm_iotoperations_dataflow" "streaming_pipeline" { + name = "streaming-pipeline" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + dataflow_profile_name = azurerm_iotoperations_dataflow_profile.example.name + mode = "Enabled" + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = "CustomLocation" + } + + # High-frequency sensor data + operations { + name = "streaming-source" + operation_type = "Source" + + source_settings { + data_sources = [ + "realtime/vibration/+", + "realtime/temperature/+", + "realtime/pressure/+" + ] + endpoint_ref = "edge-mqtt" + serialization_format = "Json" + } + } + + # Real-time processing + operations { + name = "realtime-transform" + operation_type = "BuiltInTransformation" + + built_in_transformation_settings { + serialization_format = "Delta" + + # Filter for anomalous readings + filter { + description = "Detect vibration anomalies" + expression = "vibrationLevel > threshold * 1.5" + inputs = ["vibrationLevel", "threshold"] + type = "Filter" + } + + # Calculate moving averages + map { + description = "Calculate 5-minute moving average" + expression = "avg(temperature, 300)" # 5-minute window + inputs = ["temperature"] + output = "temperature_5min_avg" + type = "BuiltInFunction" + } + + # Detect trends + map { + description = "Detect increasing temperature trend" + expression = "slope(temperature, 600) > 0.1" # 10-minute trend + inputs = ["temperature"] + output = "temperature_rising" + type = "BuiltInFunction" + } + + # Add severity levels + map { + description = "Assign alert severity" + expression = "vibrationLevel > criticalThreshold ? 'critical' : vibrationLevel > warningThreshold ? 'warning' : 'info'" + inputs = ["vibrationLevel", "criticalThreshold", "warningThreshold"] + output = "alert_severity" + type = "Compute" + } + + # Create streaming datasets + datasets { + key = "critical_alerts" + description = "Critical condition alerts for immediate response" + expression = "alert_severity == 'critical' || temperature_rising == true" + inputs = ["alert_severity", "temperature_rising", "deviceId", "timestamp"] + } + + datasets { + key = "trend_analysis" + description = "Trend data for predictive maintenance" + expression = "{'device': deviceId, 'trend': slope(temperature, 3600), 'variance': variance(vibrationLevel, 1800)}" + inputs = ["deviceId", "temperature", "vibrationLevel"] + } + } + } + + # Real-time destination + operations { + name = "realtime-output" + operation_type = "Destination" + + destination_settings { + data_destination = "realtime-alerts" + endpoint_ref = "stream-analytics" + } + } +} +``` + +### Multi-Format Data Integration + +```hcl +resource "azurerm_iotoperations_dataflow" "integration_pipeline" { + name = "integration-pipeline" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + dataflow_profile_name = azurerm_iotoperations_dataflow_profile.example.name + mode = "Enabled" + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = "CustomLocation" + } + + # JSON data source + operations { + name = "json-source" + operation_type = "Source" + + source_settings { + data_sources = [ + "legacy/systems/+/data" + ] + endpoint_ref = "legacy-endpoint" + schema_ref = "legacy-json-schema" + serialization_format = "Json" + } + } + + # Format transformation and standardization + operations { + name = "format-standardization" + operation_type = "BuiltInTransformation" + + built_in_transformation_settings { + schema_ref = "unified-schema" + serialization_format = "Parquet" + + # Standardize timestamps + map { + description = "Convert various timestamp formats to ISO 8601" + expression = "parseTimestamp(timestamp_field, timestamp_format)" + inputs = ["timestamp_field", "timestamp_format"] + output = "standardized_timestamp" + type = "BuiltInFunction" + } + + # Normalize measurement units + map { + description = "Convert all temperatures to Celsius" + expression = "unit == 'F' ? (value - 32) * 5/9 : value" + inputs = ["value", "unit"] + output = "temperature_celsius" + type = "Compute" + } + + # Standardize device identifiers + map { + description = "Create unified device ID format" + expression = "concat(location, '_', deviceType, '_', serialNumber)" + inputs = ["location", "deviceType", "serialNumber"] + output = "unified_device_id" + type = "BuiltInFunction" + } + + # Data quality scoring + map { + description = "Calculate data quality score" + expression = "(timestamp_valid ? 25 : 0) + (value_in_range ? 25 : 0) + (device_id_valid ? 25 : 0) + (schema_compliant ? 25 : 0)" + inputs = ["timestamp_valid", "value_in_range", "device_id_valid", "schema_compliant"] + output = "data_quality_score" + type = "Compute" + } + + # Filter high-quality data + filter { + description = "Include only high-quality data records" + expression = "data_quality_score >= 75" + inputs = ["data_quality_score"] + type = "Filter" + } + + # Create integration datasets + datasets { + key = "master_data" + description = "Master dataset with all standardized records" + inputs = ["unified_device_id", "standardized_timestamp", "temperature_celsius", "data_quality_score"] + schema_ref = "master-data-schema" + } + + datasets { + key = "data_quality_report" + description = "Data quality metrics for monitoring" + expression = "{'avg_quality': avg(data_quality_score), 'record_count': count(), 'processing_time': now()}" + inputs = ["data_quality_score"] + schema_ref = "quality-report-schema" + } + } + } + + # Unified destination + operations { + name = "unified-destination" + operation_type = "Destination" + + destination_settings { + data_destination = "enterprise-data-hub" + endpoint_ref = "enterprise-endpoint" + } + } +} +``` + +## Arguments Reference + +The following arguments are supported: + +* `name` - (Required) The name of the Dataflow. Must be between 3-63 characters, lowercase alphanumeric with dashes, starting and ending with alphanumeric characters. Changing this forces a new resource to be created. + +* `resource_group_name` - (Required) The name of the Resource Group where the IoT Operations Dataflow should exist. Must be between 1-90 characters. Changing this forces a new resource to be created. + +* `instance_name` - (Required) The name of the IoT Operations Instance. Must be between 3-63 characters, lowercase alphanumeric with dashes, starting and ending with alphanumeric characters. Changing this forces a new resource to be created. + +* `dataflow_profile_name` - (Required) The name of the Dataflow Profile that this Dataflow belongs to. Must be between 3-63 characters, lowercase alphanumeric with dashes, starting and ending with alphanumeric characters. Changing this forces a new resource to be created. + +* `mode` - (Optional) The operational mode of the dataflow. Possible values are `Enabled` and `Disabled`. Defaults to `Enabled`. + +* `operations` - (Required) A list of `operations` blocks as defined below. At least one operation must be configured. + +* `extended_location` - (Required) An `extended_location` block as defined below. Changing this forces a new resource to be created. + +--- + +An `extended_location` block supports the following: + +* `name` - (Required) The extended location name where the IoT Operations Dataflow should be deployed. + +* `type` - (Required) The extended location type. Must be `CustomLocation`. + +--- + +An `operations` block supports the following: + +* `operation_type` - (Required) The type of operation. Possible values are `Source`, `Destination`, and `BuiltInTransformation`. + +* `name` - (Optional) The name of the operation. Must be between 1-63 characters. + +* `source_settings` - (Optional) A `source_settings` block as defined below. Required when `operation_type` is `Source`. + +* `destination_settings` - (Optional) A `destination_settings` block as defined below. Required when `operation_type` is `Destination`. + +* `built_in_transformation_settings` - (Optional) A `built_in_transformation_settings` block as defined below. Required when `operation_type` is `BuiltInTransformation`. + +--- + +A `source_settings` block supports the following: + +* `data_sources` - (Required) List of data source identifiers. At least one data source must be specified. Must be between 1-253 characters each. + +* `endpoint_ref` - (Required) Reference to the endpoint configuration. Must be between 1-253 characters. + +* `asset_ref` - (Optional) Reference to the asset configuration. Must be between 1-253 characters. + +* `schema_ref` - (Optional) Reference to the schema definition. Must be between 1-253 characters. + +* `serialization_format` - (Optional) The data serialization format. Currently only `Json` is supported. + +--- + +A `destination_settings` block supports the following: + +* `data_destination` - (Required) The data destination identifier. Must be between 1-253 characters. + +* `endpoint_ref` - (Required) Reference to the endpoint configuration. Must be between 1-253 characters. + +--- + +A `built_in_transformation_settings` block supports the following: + +* `schema_ref` - (Optional) Reference to the output schema definition. Must be between 1-253 characters. + +* `serialization_format` - (Optional) The output serialization format. Possible values are `Delta`, `Json`, and `Parquet`. + +* `datasets` - (Optional) A list of `datasets` blocks as defined below for creating named datasets. + +* `filter` - (Optional) A list of `filter` blocks as defined below for data filtering operations. + +* `map` - (Optional) A list of `map` blocks as defined below for data transformation operations. + +--- + +A `datasets` block supports the following: + +* `key` - (Required) The unique key identifier for the dataset. Must be between 1-253 characters. + +* `inputs` - (Required) List of input field names for the dataset. + +* `description` - (Optional) Description of the dataset. Must be between 1-500 characters. + +* `expression` - (Optional) Expression for creating the dataset. Must be between 1-1000 characters. + +* `schema_ref` - (Optional) Reference to the dataset schema definition. Must be between 1-253 characters. + +--- + +A `filter` block supports the following: + +* `expression` - (Required) The filter expression. Must be between 1-1000 characters. + +* `inputs` - (Required) List of input field names for the filter. + +* `description` - (Optional) Description of the filter operation. Must be between 1-500 characters. + +* `type` - (Optional) The type of filter. Currently only `Filter` is supported. + +--- + +A `map` block supports the following: + +* `inputs` - (Required) List of input field names for the mapping operation. + +* `output` - (Required) The output field name. Must be between 1-253 characters. + +* `description` - (Optional) Description of the mapping operation. Must be between 1-500 characters. + +* `expression` - (Optional) The transformation expression. Must be between 1-1000 characters. + +* `type` - (Optional) The type of mapping operation. Possible values are `BuiltInFunction`, `Compute`, `NewProperties`, `PassThrough`, and `Rename`. + +## Attributes Reference + +In addition to the Arguments listed above, the following Attributes are exported: + +* `id` - The ID of the IoT Operations Dataflow. + +* `provisioning_state` - The provisioning state of the Dataflow. + +## Timeouts + +The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/language/resources/syntax#operation-timeouts) for certain actions: + +* `create` - (Defaults to 30 minutes) Used when creating the IoT Operations Dataflow. +* `read` - (Defaults to 5 minutes) Used when retrieving the IoT Operations Dataflow. +* `update` - (Defaults to 30 minutes) Used when updating the IoT Operations Dataflow. +* `delete` - (Defaults to 30 minutes) Used when deleting the IoT Operations Dataflow. + +## Import + +An IoT Operations Dataflow can be imported using the `resource id`, e.g. + +```shell +terraform import azurerm_iotoperations_dataflow.example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resourceGroup1/providers/Microsoft.IoTOperations/instances/instance1/dataflowProfiles/profile1/dataflows/dataflow1 +``` diff --git a/website/docs/r/iotoperations_dataflowendpoint.html.markdown b/website/docs/r/iotoperations_dataflowendpoint.html.markdown new file mode 100644 index 000000000000..de90ffc41013 --- /dev/null +++ b/website/docs/r/iotoperations_dataflowendpoint.html.markdown @@ -0,0 +1,599 @@ +--- +subcategory: "IoT Operations" +layout: "azurerm" +page_title: "Azure Resource Manager: azurerm_iotoperations_dataflow_endpoint" +description: |- + Manages an Azure IoT Operations Dataflow Endpoint. +--- + +# azurerm_iotoperations_dataflow_endpoint + +Manages an Azure IoT Operations Dataflow Endpoint. + +A Dataflow Endpoint defines connectivity and authentication settings for data sources and destinations used in IoT Operations dataflows. It supports multiple endpoint types including MQTT brokers, Kafka clusters, Azure Data Explorer, Data Lake Storage, Fabric OneLake, and local storage with various authentication methods and configuration options. + +## Example Usage + +### MQTT Endpoint with TLS + +```hcl +resource "azurerm_resource_group" "example" { + name = "example-resources" + location = "West Europe" +} + +resource "azurerm_iotoperations_instance" "example" { + name = "example-instance" + resource_group_name = azurerm_resource_group.example.name + location = azurerm_resource_group.example.location + extended_location_name = "microsoftiotoperations" + extended_location_type = "CustomLocation" +} + +resource "azurerm_iotoperations_dataflow_endpoint" "mqtt_endpoint" { + name = "mqtt-broker-endpoint" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + endpoint_type = "Mqtt" + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = "CustomLocation" + } + + mqtt_settings { + host = "broker.example.com:8883" + protocol = "Mqtt" + keep_alive_seconds = 60 + session_expiry_seconds = 3600 + max_inflight_messages = 100 + qos = 1 + retain = "Keep" + client_id_prefix = "iot-dataflow" + + tls_settings { + mode = "Enabled" + trusted_ca_certificate_config_map_ref = "mqtt-ca-certs" + } + + authentication { + method = "X509Certificate" + + x509_certificate_settings { + secret_ref = "mqtt-client-cert" + } + } + } +} +``` + +### Kafka Endpoint with SASL Authentication + +```hcl +resource "azurerm_iotoperations_dataflow_endpoint" "kafka_endpoint" { + name = "kafka-cluster-endpoint" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + endpoint_type = "Kafka" + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = "CustomLocation" + } + + kafka_settings { + host = "kafka-cluster.example.com:9092" + consumer_group_id = "iot-consumer-group" + compression = "Gzip" + partition_strategy = "Static" + kafka_acks = "All" + copy_mqtt_properties = "Enabled" + + batching { + mode = "Enabled" + latency_ms = 1000 + max_bytes = 1048576 # 1MB + max_messages = 1000 + } + + tls_settings { + mode = "Enabled" + trusted_ca_certificate_config_map_ref = "kafka-ca-certs" + } + + authentication { + method = "Sasl" + + sasl_settings { + sasl_type = "ScramSha256" + secret_ref = "kafka-credentials" + } + } + } +} +``` + +### Azure Data Explorer Endpoint with Managed Identity + +```hcl +resource "azurerm_iotoperations_dataflow_endpoint" "adx_endpoint" { + name = "data-explorer-endpoint" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + endpoint_type = "DataExplorer" + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = "CustomLocation" + } + + data_explorer_settings { + host = "https://mycluster.westeurope.kusto.windows.net" + database = "IotTelemetry" + + batching { + latency_seconds = 60 + max_messages = 10000 + } + + authentication { + method = "SystemAssignedManagedIdentity" + + system_assigned_managed_identity_settings { + audience = "https://management.azure.com/" + } + } + } +} +``` + +### Data Lake Storage Endpoint with User-Assigned Managed Identity + +```hcl +resource "azurerm_user_assigned_identity" "example" { + name = "dataflow-identity" + resource_group_name = azurerm_resource_group.example.name + location = azurerm_resource_group.example.location +} + +resource "azurerm_iotoperations_dataflow_endpoint" "adls_endpoint" { + name = "data-lake-endpoint" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + endpoint_type = "DataLakeStorage" + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = "CustomLocation" + } + + data_lake_storage_settings { + host = "https://mystorage.dfs.core.windows.net" + + batching { + latency_seconds = 300 # 5 minutes + max_messages = 50000 + } + + authentication { + method = "UserAssignedManagedIdentity" + + user_assigned_managed_identity_settings { + client_id = azurerm_user_assigned_identity.example.client_id + audience = "https://storage.azure.com/" + } + } + } +} +``` + +### Fabric OneLake Endpoint + +```hcl +resource "azurerm_iotoperations_dataflow_endpoint" "fabric_endpoint" { + name = "fabric-onelake-endpoint" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + endpoint_type = "FabricOneLake" + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = "CustomLocation" + } + + fabric_one_lake_settings { + host = "https://onelake.dfs.fabric.microsoft.com" + one_lake_path_type = "Tables" + workspace = "manufacturing-workspace" + names = ["sensor-data-lakehouse", "analytics-lakehouse"] + + batching { + latency_seconds = 120 + max_messages = 25000 + } + + authentication { + method = "SystemAssignedManagedIdentity" + + system_assigned_managed_identity_settings { + audience = "https://storage.azure.com/" + } + } + } +} +``` + +### Local Storage Endpoint + +```hcl +resource "azurerm_iotoperations_dataflow_endpoint" "local_storage_endpoint" { + name = "local-storage-endpoint" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + endpoint_type = "LocalStorage" + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = "CustomLocation" + } + + local_storage_settings { + path = "/mnt/data/iot-operations" + } +} +``` + +### Multi-Protocol MQTT Endpoint with Service Account Token + +```hcl +resource "azurerm_iotoperations_dataflow_endpoint" "websocket_mqtt_endpoint" { + name = "websocket-mqtt-endpoint" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + endpoint_type = "Mqtt" + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = "CustomLocation" + } + + mqtt_settings { + host = "wss://mqtt.example.com:443" + protocol = "WebSockets" + keep_alive_seconds = 30 + session_expiry_seconds = 7200 + max_inflight_messages = 50 + qos = 2 + retain = "Never" + client_id_prefix = "websocket-client" + cloud_event_attributes = "Propagate" + + authentication { + method = "ServiceAccountToken" + + service_account_token_settings { + audience = "iotoperations.azure.com" + } + } + } +} +``` + +### Enterprise Kafka with Advanced Configuration + +```hcl +resource "azurerm_iotoperations_dataflow_endpoint" "enterprise_kafka" { + name = "enterprise-kafka-endpoint" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + endpoint_type = "Kafka" + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = "CustomLocation" + } + + kafka_settings { + host = "kafka.enterprise.com:9093" + consumer_group_id = "enterprise-analytics" + compression = "Lz4" + partition_strategy = "Topic" + kafka_acks = "Leader" + copy_mqtt_properties = "Disabled" + cloud_event_attributes = "CreateOrRemap" + + batching { + mode = "Enabled" + latency_ms = 500 + max_bytes = 2097152 # 2MB + max_messages = 5000 + } + + tls_settings { + mode = "Enabled" + trusted_ca_certificate_config_map_ref = "enterprise-ca-bundle" + } + + authentication { + method = "UserAssignedManagedIdentity" + + user_assigned_managed_identity_settings { + client_id = azurerm_user_assigned_identity.example.client_id + audience = "https://eventhubs.azure.net/" + } + } + } +} +``` + +### Data Lake with Access Token Authentication + +```hcl +resource "azurerm_iotoperations_dataflow_endpoint" "adls_token_endpoint" { + name = "adls-token-endpoint" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + endpoint_type = "DataLakeStorage" + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = "CustomLocation" + } + + data_lake_storage_settings { + host = "https://analytics.dfs.core.windows.net" + + batching { + latency_seconds = 180 + max_messages = 75000 + } + + authentication { + method = "AccessToken" + + access_token_settings { + secret_ref = "adls-access-token" + } + } + } +} +``` + +## Arguments Reference + +The following arguments are supported: + +* `name` - (Required) The name of the Dataflow Endpoint. Must be between 3-63 characters, lowercase alphanumeric with dashes, starting and ending with alphanumeric characters. Changing this forces a new resource to be created. + +* `resource_group_name` - (Required) The name of the Resource Group where the IoT Operations Dataflow Endpoint should exist. Must be between 1-90 characters. Changing this forces a new resource to be created. + +* `instance_name` - (Required) The name of the IoT Operations Instance. Must be between 3-63 characters, lowercase alphanumeric with dashes, starting and ending with alphanumeric characters. Changing this forces a new resource to be created. + +* `endpoint_type` - (Required) The type of the endpoint. Possible values are `DataExplorer`, `DataLakeStorage`, `FabricOneLake`, `Kafka`, `LocalStorage`, and `Mqtt`. Changing this forces a new resource to be created. + +* `extended_location` - (Required) An `extended_location` block as defined below. Changing this forces a new resource to be created. + +* `data_explorer_settings` - (Optional) A `data_explorer_settings` block as defined below. Required when `endpoint_type` is `DataExplorer`. Cannot be used with other endpoint settings. + +* `data_lake_storage_settings` - (Optional) A `data_lake_storage_settings` block as defined below. Required when `endpoint_type` is `DataLakeStorage`. Cannot be used with other endpoint settings. + +* `fabric_one_lake_settings` - (Optional) A `fabric_one_lake_settings` block as defined below. Required when `endpoint_type` is `FabricOneLake`. Cannot be used with other endpoint settings. + +* `kafka_settings` - (Optional) A `kafka_settings` block as defined below. Required when `endpoint_type` is `Kafka`. Cannot be used with other endpoint settings. + +* `local_storage_settings` - (Optional) A `local_storage_settings` block as defined below. Required when `endpoint_type` is `LocalStorage`. Cannot be used with other endpoint settings. + +* `mqtt_settings` - (Optional) An `mqtt_settings` block as defined below. Required when `endpoint_type` is `Mqtt`. Cannot be used with other endpoint settings. + +--- + +An `extended_location` block supports the following: + +* `name` - (Required) The extended location name where the IoT Operations Dataflow Endpoint should be deployed. + +* `type` - (Required) The extended location type. Must be `CustomLocation`. + +--- + +A `data_explorer_settings` block supports the following: + +* `host` - (Required) The Data Explorer cluster URL. Must be between 1-253 characters. + +* `database` - (Required) The Data Explorer database name. Must be between 1-253 characters. + +* `authentication` - (Required) An `authentication` block as defined below for Data Explorer authentication. + +* `batching` - (Optional) A `batching` block as defined below for batching configuration. + +--- + +A `data_lake_storage_settings` block supports the following: + +* `host` - (Required) The Data Lake Storage account URL. Must be between 1-253 characters. + +* `authentication` - (Required) An `authentication` block as defined below for Data Lake Storage authentication. + +* `batching` - (Optional) A `batching` block as defined below for batching configuration. + +--- + +A `fabric_one_lake_settings` block supports the following: + +* `host` - (Required) The Fabric OneLake host URL. Must be between 1-253 characters. + +* `workspace` - (Required) The Fabric workspace name. Must be between 1-253 characters. + +* `names` - (Required) List of lakehouse names to connect to. Each name must be between 1-253 characters. + +* `one_lake_path_type` - (Required) The OneLake path type. Possible values are `Files` and `Tables`. + +* `authentication` - (Required) An `authentication` block as defined below for Fabric OneLake authentication. + +* `batching` - (Optional) A `batching` block as defined below for batching configuration. + +--- + +A `kafka_settings` block supports the following: + +* `host` - (Required) The Kafka broker connection string. Must be between 1-253 characters. + +* `authentication` - (Required) An `authentication` block as defined below for Kafka authentication. + +* `consumer_group_id` - (Optional) The Kafka consumer group ID. Must be between 1-253 characters. + +* `compression` - (Optional) The compression type for Kafka messages. Possible values are `None`, `Gzip`, `Snappy`, and `Lz4`. + +* `partition_strategy` - (Optional) The partitioning strategy for Kafka messages. + +* `kafka_acks` - (Optional) The acknowledgment level for Kafka producers. + +* `copy_mqtt_properties` - (Optional) Whether to copy MQTT properties to Kafka headers. + +* `cloud_event_attributes` - (Optional) How to handle CloudEvent attributes. + +* `batching` - (Optional) A `kafka_batching` block as defined below for Kafka-specific batching configuration. + +* `tls_settings` - (Optional) A `tls_settings` block as defined below for TLS configuration. + +--- + +A `local_storage_settings` block supports the following: + +* `path` - (Required) The local storage path. Must be between 1-1000 characters. + +--- + +An `mqtt_settings` block supports the following: + +* `host` - (Required) The MQTT broker connection string. Must be between 1-253 characters. + +* `authentication` - (Required) An `authentication` block as defined below for MQTT authentication. + +* `protocol` - (Optional) The MQTT protocol type. Possible values are `Mqtt` and `WebSockets`. + +* `keep_alive_seconds` - (Optional) The MQTT keep-alive interval in seconds. Must be between 1-65535. + +* `session_expiry_seconds` - (Optional) The MQTT session expiry interval in seconds. Must be between 1-4294967295. + +* `max_inflight_messages` - (Optional) The maximum number of in-flight MQTT messages. Must be between 1-65535. + +* `qos` - (Optional) The Quality of Service level for MQTT messages. Must be between 0-2. + +* `retain` - (Optional) The MQTT retain policy. Possible values are `Keep` and `Never`. + +* `client_id_prefix` - (Optional) The prefix for MQTT client IDs. Must be between 1-253 characters. + +* `cloud_event_attributes` - (Optional) How to handle CloudEvent attributes in MQTT messages. + +* `tls_settings` - (Optional) A `tls_settings` block as defined below for TLS configuration. + +--- + +A `batching` block supports the following: + +* `latency_seconds` - (Optional) The maximum latency in seconds before sending a batch. Must be between 1-3600. + +* `max_messages` - (Optional) The maximum number of messages in a batch. Must be between 1-1000000. + +--- + +A `kafka_batching` block supports the following: + +* `mode` - (Optional) Whether batching is enabled. Possible values are `Enabled` and `Disabled`. + +* `latency_ms` - (Optional) The maximum latency in milliseconds before sending a batch. Must be between 0-3600000. + +* `max_bytes` - (Optional) The maximum size of a batch in bytes. Must be between 1-1073741824. + +* `max_messages` - (Optional) The maximum number of messages in a batch. Must be between 1-1000000. + +--- + +A `tls_settings` block supports the following: + +* `mode` - (Required) Whether TLS is enabled. Possible values are `Enabled` and `Disabled`. + +* `trusted_ca_certificate_config_map_ref` - (Optional) Reference to the ConfigMap containing trusted CA certificates. Must be between 1-253 characters. + +--- + +An `authentication` block supports the following: + +* `method` - (Required) The authentication method. Possible values are `SystemAssignedManagedIdentity`, `UserAssignedManagedIdentity`, `ServiceAccountToken`, `X509Certificate`, `AccessToken`, and `Sasl`. + +* `system_assigned_managed_identity_settings` - (Optional) A `system_assigned_managed_identity_settings` block as defined below. Required when `method` is `SystemAssignedManagedIdentity`. + +* `user_assigned_managed_identity_settings` - (Optional) A `user_assigned_managed_identity_settings` block as defined below. Required when `method` is `UserAssignedManagedIdentity`. + +* `service_account_token_settings` - (Optional) A `service_account_token_settings` block as defined below. Required when `method` is `ServiceAccountToken`. + +* `x509_certificate_settings` - (Optional) An `x509_certificate_settings` block as defined below. Required when `method` is `X509Certificate`. + +* `access_token_settings` - (Optional) An `access_token_settings` block as defined below. Required when `method` is `AccessToken`. + +* `sasl_settings` - (Optional) A `sasl_settings` block as defined below. Required when `method` is `Sasl`. + +--- + +A `system_assigned_managed_identity_settings` block supports the following: + +* `audience` - (Required) The audience for the managed identity token. Must be between 1-253 characters. + +--- + +A `user_assigned_managed_identity_settings` block supports the following: + +* `client_id` - (Required) The client ID of the user-assigned managed identity. + +* `audience` - (Required) The audience for the managed identity token. Must be between 1-253 characters. + +--- + +A `service_account_token_settings` block supports the following: + +* `audience` - (Required) The audience for the service account token. Must be between 1-253 characters. + +--- + +An `x509_certificate_settings` block supports the following: + +* `secret_ref` - (Required) Reference to the Kubernetes secret containing the X.509 certificate. Must be between 1-253 characters. + +--- + +An `access_token_settings` block supports the following: + +* `secret_ref` - (Required) Reference to the Kubernetes secret containing the access token. Must be between 1-253 characters. + +--- + +A `sasl_settings` block supports the following: + +* `sasl_type` - (Required) The SASL mechanism type. Possible values are `Plain`, `ScramSha256`, and `ScramSha512`. + +* `secret_ref` - (Required) Reference to the Kubernetes secret containing SASL credentials. Must be between 1-253 characters. + +## Attributes Reference + +In addition to the Arguments listed above, the following Attributes are exported: + +* `id` - The ID of the IoT Operations Dataflow Endpoint. + +* `provisioning_state` - The provisioning state of the Dataflow Endpoint. + +## Timeouts + +The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/language/resources/syntax#operation-timeouts) for certain actions: + +* `create` - (Defaults to 30 minutes) Used when creating the IoT Operations Dataflow Endpoint. +* `read` - (Defaults to 5 minutes) Used when retrieving the IoT Operations Dataflow Endpoint. +* `update` - (Defaults to 30 minutes) Used when updating the IoT Operations Dataflow Endpoint. +* `delete` - (Defaults to 30 minutes) Used when deleting the IoT Operations Dataflow Endpoint. + +## Import + +An IoT Operations Dataflow Endpoint can be imported using the `resource id`, e.g. + +```shell +terraform import azurerm_iotoperations_dataflow_endpoint.example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resourceGroup1/providers/Microsoft.IoTOperations/instances/instance1/dataflowEndpoints/endpoint1 +``` diff --git a/website/docs/r/iotoperations_dataflowprofile.html.markdown b/website/docs/r/iotoperations_dataflowprofile.html.markdown new file mode 100644 index 000000000000..8412e308ca2d --- /dev/null +++ b/website/docs/r/iotoperations_dataflowprofile.html.markdown @@ -0,0 +1,370 @@ +--- +subcategory: "IoT Operations" +layout: "azurerm" +page_title: "Azure Resource Manager: azurerm_iotoperations_dataflow_profile" +description: |- + Manages an Azure IoT Operations Dataflow Profile. +--- + +# azurerm_iotoperations_dataflow_profile + +Manages an Azure IoT Operations Dataflow Profile. + +A Dataflow Profile defines the runtime configuration and scaling parameters for dataflow instances in IoT Operations. It controls how many dataflow instances run, their logging levels, and monitoring capabilities, providing a template for dataflow execution environments. + +## Example Usage + +### Basic Dataflow Profile + +```hcl +resource "azurerm_resource_group" "example" { + name = "example-resources" + location = "West Europe" +} + +resource "azurerm_iotoperations_instance" "example" { + name = "example-instance" + resource_group_name = azurerm_resource_group.example.name + location = azurerm_resource_group.example.location + extended_location_name = "microsoftiotoperations" + extended_location_type = "CustomLocation" +} + +resource "azurerm_iotoperations_dataflow_profile" "example" { + name = "example-profile" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = "CustomLocation" + } +} +``` + +### High-Scale Dataflow Profile + +```hcl +resource "azurerm_iotoperations_dataflow_profile" "high_scale" { + name = "high-scale-profile" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + instance_count = 10 + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = "CustomLocation" + } + + diagnostics { + logs { + level = "info" + } + + metrics { + prometheus_port = 9090 + } + } +} +``` + +### Development Dataflow Profile with Debug Logging + +```hcl +resource "azurerm_iotoperations_dataflow_profile" "development" { + name = "dev-profile" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + instance_count = 2 + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = "CustomLocation" + } + + diagnostics { + logs { + level = "debug" + } + + metrics { + prometheus_port = 9091 + } + } +} +``` + +### Production Dataflow Profile with Minimal Logging + +```hcl +resource "azurerm_iotoperations_dataflow_profile" "production" { + name = "prod-profile" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + instance_count = 50 + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = "CustomLocation" + } + + diagnostics { + logs { + level = "error" + } + + metrics { + prometheus_port = 9092 + } + } +} +``` + +### Edge Computing Dataflow Profile + +```hcl +resource "azurerm_iotoperations_dataflow_profile" "edge_computing" { + name = "edge-profile" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + instance_count = 3 + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = "CustomLocation" + } + + diagnostics { + logs { + level = "warn" + } + + metrics { + prometheus_port = 9093 + } + } +} +``` + +### Monitoring-Focused Profile + +```hcl +resource "azurerm_iotoperations_dataflow_profile" "monitoring" { + name = "monitoring-profile" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + instance_count = 5 + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = "CustomLocation" + } + + diagnostics { + logs { + level = "trace" + } + + metrics { + prometheus_port = 9094 + } + } +} +``` + +### Multi-Environment Setup + +```hcl +# Development environment +resource "azurerm_iotoperations_dataflow_profile" "dev_environment" { + name = "dev-env-profile" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + instance_count = 1 + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = "CustomLocation" + } + + diagnostics { + logs { + level = "debug" + } + + metrics { + prometheus_port = 9100 + } + } +} + +# Staging environment +resource "azurerm_iotoperations_dataflow_profile" "staging_environment" { + name = "staging-env-profile" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + instance_count = 5 + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = "CustomLocation" + } + + diagnostics { + logs { + level = "info" + } + + metrics { + prometheus_port = 9101 + } + } +} + +# Production environment +resource "azurerm_iotoperations_dataflow_profile" "production_environment" { + name = "prod-env-profile" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + instance_count = 25 + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = "CustomLocation" + } + + diagnostics { + logs { + level = "warn" + } + + metrics { + prometheus_port = 9102 + } + } +} +``` + +### High-Throughput Profile for Analytics Workloads + +```hcl +resource "azurerm_iotoperations_dataflow_profile" "analytics_workload" { + name = "analytics-profile" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + instance_count = 100 # Maximum scaling for high throughput + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = "CustomLocation" + } + + diagnostics { + logs { + level = "error" # Minimal logging for performance + } + + metrics { + prometheus_port = 9200 # Dedicated metrics port + } + } +} +``` + +### Resource-Constrained Profile for Edge Devices + +```hcl +resource "azurerm_iotoperations_dataflow_profile" "edge_constrained" { + name = "edge-constrained-profile" + resource_group_name = azurerm_resource_group.example.name + instance_name = azurerm_iotoperations_instance.example.name + instance_count = 1 # Minimal instances for resource constraints + + extended_location { + name = azurerm_iotoperations_instance.example.extended_location_name + type = "CustomLocation" + } + + diagnostics { + logs { + level = "error" # Only critical errors to save resources + } + + # No metrics configuration to save resources + } +} +``` + +## Arguments Reference + +The following arguments are supported: + +* `name` - (Required) The name of the Dataflow Profile. Must be between 3-63 characters, lowercase alphanumeric with dashes, starting and ending with alphanumeric characters. Changing this forces a new resource to be created. + +* `resource_group_name` - (Required) The name of the Resource Group where the IoT Operations Dataflow Profile should exist. Must be between 1-90 characters. Changing this forces a new resource to be created. + +* `instance_name` - (Required) The name of the IoT Operations Instance. Must be between 3-63 characters, lowercase alphanumeric with dashes, starting and ending with alphanumeric characters. Changing this forces a new resource to be created. + +* `extended_location` - (Required) An `extended_location` block as defined below. Changing this forces a new resource to be created. + +* `instance_count` - (Optional) The number of dataflow instances to run. Must be between 1-1000. This determines the parallel processing capacity and scaling of dataflow operations. + +* `diagnostics` - (Optional) A `diagnostics` block as defined below for configuring logging and monitoring. + +--- + +An `extended_location` block supports the following: + +* `name` - (Required) The extended location name where the IoT Operations Dataflow Profile should be deployed. + +* `type` - (Required) The extended location type. Must be `CustomLocation`. + +--- + +A `diagnostics` block supports the following: + +* `logs` - (Optional) A `logs` block as defined below for logging configuration. + +* `metrics` - (Optional) A `metrics` block as defined below for metrics collection configuration. + +--- + +A `logs` block supports the following: + +* `level` - (Optional) The logging level for dataflow operations. Possible values are `trace`, `debug`, `info`, `warn`, and `error`. + - `trace`: Most detailed logging, includes all operations and data flow tracing + - `debug`: Detailed debugging information for troubleshooting + - `info`: General operational information and status updates + - `warn`: Warning messages about potential issues + - `error`: Only error conditions and failures + +--- + +A `metrics` block supports the following: + +* `prometheus_port` - (Optional) The port number for Prometheus metrics endpoint. Must be between 1-65535. This enables monitoring and observability of dataflow performance and health. + +## Attributes Reference + +In addition to the Arguments listed above, the following Attributes are exported: + +* `id` - The ID of the IoT Operations Dataflow Profile. + +* `provisioning_state` - The provisioning state of the Dataflow Profile. + +## Timeouts + +The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/language/resources/syntax#operation-timeouts) for certain actions: + +* `create` - (Defaults to 30 minutes) Used when creating the IoT Operations Dataflow Profile. +* `read` - (Defaults to 5 minutes) Used when retrieving the IoT Operations Dataflow Profile. +* `update` - (Defaults to 30 minutes) Used when updating the IoT Operations Dataflow Profile. +* `delete` - (Defaults to 30 minutes) Used when deleting the IoT Operations Dataflow Profile. + +## Import + +An IoT Operations Dataflow Profile can be imported using the `resource id`, e.g. + +```shell +terraform import azurerm_iotoperations_dataflow_profile.example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resourceGroup1/providers/Microsoft.IoTOperations/instances/instance1/dataflowProfiles/profile1 +``` diff --git a/website/docs/r/iotoperations_instance.html.markdown b/website/docs/r/iotoperations_instance.html.markdown new file mode 100644 index 000000000000..5de34e6eff3c --- /dev/null +++ b/website/docs/r/iotoperations_instance.html.markdown @@ -0,0 +1,296 @@ +--- +subcategory: "IoT Operations" +layout: "azurerm" +page_title: "Azure Resource Manager: azurerm_iotoperations_instance" +description: |- + Manages an Azure IoT Operations Instance. +--- + +# azurerm_iotoperations_instance + +Manages an Azure IoT Operations Instance. + +An IoT Operations Instance is the core management resource that provides orchestration and runtime capabilities for IoT workloads. It serves as the foundational platform for deploying brokers, dataflows, and other IoT components, managing schema registries, and coordinating edge computing operations. + +## Example Usage + +### Basic IoT Operations Instance + +```hcl +resource "azurerm_resource_group" "example" { + name = "example-resources" + location = "West Europe" +} + +resource "azurerm_iotoperations_instance" "example" { + name = "example-instance" + resource_group_name = azurerm_resource_group.example.name + location = azurerm_resource_group.example.location + schema_registry_ref = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/example-resources/providers/Microsoft.DeviceRegistry/schemaRegistries/example-registry" + extended_location_name = "microsoftiotoperations" + extended_location_type = "CustomLocation" +} +``` + +### IoT Operations Instance with Description and Version + +```hcl +resource "azurerm_iotoperations_instance" "production" { + name = "production-instance" + resource_group_name = azurerm_resource_group.example.name + location = azurerm_resource_group.example.location + schema_registry_ref = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/example-resources/providers/Microsoft.DeviceRegistry/schemaRegistries/prod-registry" + extended_location_name = "production-edge-cluster" + extended_location_type = "CustomLocation" + + description = "Production IoT Operations instance for manufacturing edge computing" + version = "1.0.0" + + tags = { + Environment = "Production" + Department = "Manufacturing" + CostCenter = "CC-1234" + } +} +``` + +### Development Environment Instance + +```hcl +resource "azurerm_iotoperations_instance" "development" { + name = "dev-instance" + resource_group_name = azurerm_resource_group.example.name + location = azurerm_resource_group.example.location + schema_registry_ref = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/example-resources/providers/Microsoft.DeviceRegistry/schemaRegistries/dev-registry" + extended_location_name = "development-edge-cluster" + extended_location_type = "CustomLocation" + + description = "Development environment for IoT Operations testing and validation" + + tags = { + Environment = "Development" + Team = "IoT-Development" + Project = "EdgeComputing" + } +} +``` + +### Multi-Region Manufacturing Setup + +```hcl +# Primary manufacturing site +resource "azurerm_iotoperations_instance" "manufacturing_primary" { + name = "mfg-primary-instance" + resource_group_name = azurerm_resource_group.example.name + location = azurerm_resource_group.example.location + schema_registry_ref = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/example-resources/providers/Microsoft.DeviceRegistry/schemaRegistries/manufacturing-registry" + extended_location_name = "primary-manufacturing-site" + extended_location_type = "CustomLocation" + + description = "Primary manufacturing site IoT Operations instance" + version = "2.0.0" + + tags = { + Environment = "Production" + Site = "Primary" + Region = "WestEurope" + Application = "Manufacturing" + } +} + +# Secondary manufacturing site +resource "azurerm_iotoperations_instance" "manufacturing_secondary" { + name = "mfg-secondary-instance" + resource_group_name = azurerm_resource_group.example.name + location = "East US" + schema_registry_ref = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/example-resources/providers/Microsoft.DeviceRegistry/schemaRegistries/manufacturing-registry" + extended_location_name = "secondary-manufacturing-site" + extended_location_type = "CustomLocation" + + description = "Secondary manufacturing site IoT Operations instance" + version = "2.0.0" + + tags = { + Environment = "Production" + Site = "Secondary" + Region = "EastUS" + Application = "Manufacturing" + } +} +``` + +### Edge Computing Instance for Retail + +```hcl +resource "azurerm_iotoperations_instance" "retail_edge" { + name = "retail-edge-instance" + resource_group_name = azurerm_resource_group.example.name + location = azurerm_resource_group.example.location + schema_registry_ref = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/example-resources/providers/Microsoft.DeviceRegistry/schemaRegistries/retail-registry" + extended_location_name = "retail-store-cluster" + extended_location_type = "CustomLocation" + + description = "Retail edge computing instance for in-store IoT operations" + + tags = { + Environment = "Production" + Industry = "Retail" + StoreType = "Flagship" + Location = "Downtown" + } +} +``` + +### Smart City Infrastructure Instance + +```hcl +resource "azurerm_iotoperations_instance" "smart_city" { + name = "smartcity-instance" + resource_group_name = azurerm_resource_group.example.name + location = azurerm_resource_group.example.location + schema_registry_ref = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/example-resources/providers/Microsoft.DeviceRegistry/schemaRegistries/smartcity-registry" + extended_location_name = "city-infrastructure-cluster" + extended_location_type = "CustomLocation" + + description = "Smart city infrastructure management and monitoring instance" + version = "3.0.0" + + tags = { + Environment = "Production" + Application = "SmartCity" + Municipality = "CityName" + Department = "PublicWorks" + Compliance = "SOC2" + } +} +``` + +### Healthcare IoT Instance + +```hcl +resource "azurerm_iotoperations_instance" "healthcare" { + name = "healthcare-instance" + resource_group_name = azurerm_resource_group.example.name + location = azurerm_resource_group.example.location + schema_registry_ref = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/example-resources/providers/Microsoft.DeviceRegistry/schemaRegistries/healthcare-registry" + extended_location_name = "hospital-edge-cluster" + extended_location_type = "CustomLocation" + + description = "Healthcare IoT Operations for medical device management and patient monitoring" + + tags = { + Environment = "Production" + Industry = "Healthcare" + Compliance = "HIPAA" + Facility = "HospitalMain" + Department = "IT" + } +} +``` + +### Energy Management Instance + +```hcl +resource "azurerm_iotoperations_instance" "energy_management" { + name = "energy-mgmt-instance" + resource_group_name = azurerm_resource_group.example.name + location = azurerm_resource_group.example.location + schema_registry_ref = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/example-resources/providers/Microsoft.DeviceRegistry/schemaRegistries/energy-registry" + extended_location_name = "power-plant-cluster" + extended_location_type = "CustomLocation" + + description = "Energy management and grid monitoring IoT Operations instance" + version = "1.5.0" + + tags = { + Environment = "Production" + Industry = "Energy" + FacilityType = "PowerPlant" + GridZone = "Zone-A" + Criticality = "High" + } +} +``` + +### Multi-Tenant SaaS Instance + +```hcl +resource "azurerm_iotoperations_instance" "saas_platform" { + name = "saas-platform-instance" + resource_group_name = azurerm_resource_group.example.name + location = azurerm_resource_group.example.location + schema_registry_ref = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/example-resources/providers/Microsoft.DeviceRegistry/schemaRegistries/saas-registry" + extended_location_name = "saas-edge-cluster" + extended_location_type = "CustomLocation" + + description = "Multi-tenant SaaS platform IoT Operations instance" + + tags = { + Environment = "Production" + ServiceType = "SaaS" + Tier = "Premium" + Scaling = "Auto" + Monitoring = "24x7" + } +} +``` + +### Minimal Configuration for Testing + +```hcl +resource "azurerm_iotoperations_instance" "minimal" { + name = "test-instance" + resource_group_name = azurerm_resource_group.example.name + location = azurerm_resource_group.example.location + schema_registry_ref = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/example-resources/providers/Microsoft.DeviceRegistry/schemaRegistries/test-registry" + + # Minimal configuration without extended location for basic testing +} +``` + +## Arguments Reference + +The following arguments are supported: + +* `name` - (Required) The name of the IoT Operations Instance. Must be between 3-63 characters, lowercase alphanumeric with dashes, starting and ending with alphanumeric characters. Changing this forces a new resource to be created. + +* `resource_group_name` - (Required) The name of the Resource Group where the IoT Operations Instance should exist. Must be between 1-90 characters. Changing this forces a new resource to be created. + +* `location` - (Required) The Azure Region where the IoT Operations Instance should exist. Changing this forces a new resource to be created. + +* `schema_registry_ref` - (Required) The resource ID reference to the Device Registry Schema Registry that this IoT Operations Instance will use for schema management. This registry stores and manages schemas for data validation and transformation. Changing this forces a new resource to be created. + +* `description` - (Optional) A description of the IoT Operations Instance. Use this to document the purpose, environment, or specific use case of the instance. + +* `version` - (Optional) The version of the IoT Operations Instance. If not specified, Azure will assign a default version and may update it during the resource lifecycle. + +* `extended_location_name` - (Optional) The name of the extended location where the IoT Operations Instance should be deployed. This is typically a Custom Location representing an edge cluster or on-premises infrastructure. Changing this forces a new resource to be created. + +* `extended_location_type` - (Optional) The type of the extended location. Must be `CustomLocation` when specified. Changing this forces a new resource to be created. + +* `tags` - (Optional) A mapping of tags to assign to the IoT Operations Instance. + +## Attributes Reference + +In addition to the Arguments listed above, the following Attributes are exported: + +* `id` - The ID of the IoT Operations Instance. + +* `provisioning_state` - The provisioning state of the IoT Operations Instance. Possible values include `Succeeded`, `Failed`, `Canceled`, `Creating`, `Updating`, and `Deleting`. + +## Timeouts + +The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/language/resources/syntax#operation-timeouts) for certain actions: + +* `create` - (Defaults to 30 minutes) Used when creating the IoT Operations Instance. +* `read` - (Defaults to 5 minutes) Used when retrieving the IoT Operations Instance. +* `update` - (Defaults to 30 minutes) Used when updating the IoT Operations Instance. +* `delete` - (Defaults to 30 minutes) Used when deleting the IoT Operations Instance. + +## Import + +An IoT Operations Instance can be imported using the `resource id`, e.g. + +```shell +terraform import azurerm_iotoperations_instance.example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resourceGroup1/providers/Microsoft.IoTOperations/instances/instance1 +```