diff --git a/azure/examples/enterprise/.terraform.lock.hcl b/azure/examples/enterprise/.terraform.lock.hcl new file mode 100644 index 00000000..df92477c --- /dev/null +++ b/azure/examples/enterprise/.terraform.lock.hcl @@ -0,0 +1,220 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/alekc/kubectl" { + version = "2.1.5" + constraints = "~> 2.0" + hashes = [ + "h1:ZMqwfwUL49Nvp5LZims1nt7zdE2Lj9rtVHkRobYWBCs=", + "zh:11d6c7e429d013ce4b224a8e778fa1e272743dd131696039c65f8238d0c5afee", + "zh:39e55dc62ee6c31032e0ccd4f4d87d28f62ecf2025b726b705327b91235bb5d7", + "zh:3ef4cc118f1e9586bf794dcc9d0ed7912815df182239c28111fa2d67d4d9f264", + "zh:4183684cbd5a12c5b716b5d349178ac7cedda277ed0bc5cca2a03e33c0c6ff2d", + "zh:79e2d64ad8035a4254d40bc0f5064c3bb3a3d38e4e6d969c810f6512a50106f7", + "zh:91961239727bd01fd145752d5f42877bf8b8087a095ab68c39e3068941cdc09c", + "zh:a94b91e527d68a85aa5ab020c27d383bfa4aa61bf585248f3591808c8e29841d", + "zh:b0cdb266cc46d58ef3558f86e94d3d3718c6bb1281f2357c50bd7d2b6828e11a", + "zh:c6b12ca20041ed7b3cb86ff15cf69f8c6c4ccfe94640b8f45386ebcf26adfc31", + "zh:d7424e9d3511af81174549716c71da175529e439525fa44746b69ca514ab9022", + "zh:decb856ecf18a5a51775953233d3203ad2468456a6a28a4ff5243ed2d9a3241e", + "zh:e2b176e5cd860ea2559965d7bab34ccb6f2edcc6c8b92c1d96853ac1368f0415", + "zh:f780ee23ce21fb674d57f60320a284cdaf40d3baba4e3f8a274ad4601c989317", + ] +} + +provider "registry.terraform.io/azure/azapi" { + version = "2.8.0" + constraints = "~> 2.0, ~> 2.4, ~> 2.5" + hashes = [ + "h1:gJpLLWwcTgHPyyqDNaXZkCfFCAywGMAgVlqg7QHRRBk=", + "zh:048fa67ba123c6da65a7af12453328e36e1783cac1dbecc905d44ee7a1daa41c", + "zh:08dfb8c493a99aa54ea0c00f5d2e2389aac55d70b31bfc50a38e4ab61800aca8", + "zh:0d5bf53f356864567bf0855eb90b0b9aa4619b60fd1469210461ad88c0508a6f", + "zh:221cc52181d81bd741e8624ba9619ae20438f7a13828b72aa138a51b57bc1483", + "zh:51e7485e4f502cbbefe9b4ea991961eb9b19f41862593150905197bbb37cc6fb", + "zh:6e2d0986176bbeabdfa7dc3d1bf37d0a24549ebff29a3c9e8c5082e03cc38247", + "zh:87e46ceddcd3a4b7ed16f6b853c286840753d8af8ae8df0618ab5f29e950976b", + "zh:894998419943fadb3b85d1469665e9b7cdf492e6dc30907a77e32043e1d52b6a", + "zh:9f1efae3ad37510d947e7a27118a84bae55e35681b047d939781da96dd6ab6c7", + "zh:a201371f6c4c65b6976a8a360223c188ea91b7a33078fdd3a5f5f0ac7b438d35", + "zh:af3cc16bdfc545e61ce66449b9daaebfaa0c5e495777241c9414671a31e37ffa", + "zh:dbbb263a5f4c40624823fd3e68dc046b1f00325548393557384f0914a4694278", + ] +} + +provider "registry.terraform.io/azure/modtm" { + version = "0.3.5" + constraints = "~> 0.3" + hashes = [ + "h1:RmCHYU3U3jDGYruN3Q7PiQqwqg7U4WP3dUDbx1PsyQ4=", + "zh:02a54109f2bd30a089a0681eaba8ef9d30b0402a51795597ee7b067f04952417", + "zh:0a15492a7257a0979d1f1d501168d1a38ec8c65b11d89d9423349f143d7b7e67", + "zh:4ae1d114aec1625f192eb2055eb7301774a8f79340085fbbe7c2d11284ba4cb7", + "zh:599201c19e82a227f0739be2150779e42903ba0aa147e96ef219c7f32f926053", + "zh:747b1189e679cd7cf77f76fd09609db0ac1ef7189ec3c64accd37af7d0ebe449", + "zh:859bc8739ceb9049e7cd98284f22eb9d503cc5b80f9452ee28a518080ebf3903", + "zh:8f97c0876b30967b47dfd63546f3843368bc3bc90e98bb42bd33c00ffe2d0b2c", + "zh:91183bbea386e6013d0b2a3b1d36a7bfe1595d45f4ee1f4f693d6254d017d334", + "zh:ae16303a74c83e0d8f4413d568eaf04c3c0d2b07250dbd7ae07bffae01197f36", + "zh:db155386bb65a7fd5569b7d3331de65a259638e8e1c8f8896db969f4599504a9", + "zh:e39e6089c8a17a4b26b59c95050bd0e19fc0a09a14314cfa139053269b6d5f8d", + "zh:ec880b514fc3bd8d07e5d66a0c528fd6d83ae62d6588df4939b1f6ea509f0b24", + ] +} + +provider "registry.terraform.io/hashicorp/azuread" { + version = "3.8.0" + constraints = ">= 2.45.0" + hashes = [ + "h1:E2YWNE3Qry4bQMlmmZ33X4hLY5hOGrEZrlRg4anI2uw=", + "zh:0d26cfbf9417acd1c2295ccd5b0052abeac85ad1c3f6422ff09bf6a1ce16f00d", + "zh:144d4ea92fed541a6376bc76ad65ba4738dfd7bcab4c9d6cc20d35001338d06d", + "zh:1c3e89cf19118fc07d7b04257251fc9897e722c16e0a0df7b07fcd261f8c12e7", + "zh:2061d2cb64d8167d0af37e6610d0dc051977bd1ccc0e5cdd5ab02525ee239f96", + "zh:75562fdc3b313b7e538907199bfa588a1fcbc40113b0f3b7bfb496fbc358a32f", + "zh:78bef022ae9b1b0c636b7dd32bcda13fb273023f3a888cc005f3aa20e365b417", + "zh:ad8dbee59843154f8e93b24db9939a4257d13c7c86331eb93f1691294bc4e31f", + "zh:b3d83d7ac57073631704336a188cb746c473f728fc7ccb76abecb520e83fdf65", + "zh:c0bf9e0be73843de9089597be2720e4093b3ba320fbad99ab86da47681e77949", + "zh:c9a4c27d2b0800d3f4ece19d66c1fa574f7cd4ff66277af8f120d65e8f03f48e", + "zh:cd9ad8c848e17d9824045c33132cc0e87aa4d58cdb7bee6c0f6c3f9bc27892d5", + "zh:fbcafc21cdd19451274b905f9ee8a5b758ca63cc231e7544c815642e4a399c6d", + ] +} + +provider "registry.terraform.io/hashicorp/azurerm" { + version = "4.54.0" + constraints = ">= 3.75.0, ~> 4.0, 4.54.0" + hashes = [ + "h1:AeE+jsY9HfzMrTLjQZZ8IWtI/XxqBxbd3BRDSbGU2oM=", + "zh:0adda2cfb2ae9ec394943164cbd5ab1f1fac89a0125ad3966a97363b06b1bd11", + "zh:23dcc71a1586c2b8644476ccd3b4d4d22aa651d6ceb03d32f801bb7ecb09c84f", + "zh:4573833c692a87df167e3adf71c4291879e1a5d2e430ba5255509d3510c7a2f5", + "zh:49132e138bb28b02aa36a00fdcfcf818c4a6d150e3b5148e4d910efac5aaf1bf", + "zh:5dda12ad7f69f91847b99365f66b8dfb1d6ea913d2d06fadbabcea236cc1b346", + "zh:6e45c59dbc54c56c1255f4bb45db15a2ec75dcb2a9125adfa812a667132b332a", + "zh:76802f69f1fa8e894e9c96d6f7098698d1f9c036f30b46a40207fce5ed373ef0", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:846e7222bdeee0150830d82cd2f09619e2239347eba1d05f0409c78a684502d8", + "zh:8822918829f89354ab65b1d588d3185191bbd81e3479510dcbec801d3e3617b0", + "zh:901074c726047a141e256e3229f3e55a5dd4033fec57f889c0118b71e818331b", + "zh:a240979f94f50d2f6ceda2651e5146652468f312f03691f0949876524d160a9d", + ] +} + +provider "registry.terraform.io/hashicorp/external" { + version = "2.3.5" + constraints = ">= 2.3.4" + hashes = [ + "h1:FnUk98MI5nOh3VJ16cHf8mchQLewLfN1qZG/MqNgPrI=", + "zh:6e89509d056091266532fa64de8c06950010498adf9070bf6ff85bc485a82562", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:86868aec05b58dc0aa1904646a2c26b9367d69b890c9ad70c33c0d3aa7b1485a", + "zh:a2ce38fda83a62fa5fb5a70e6ca8453b168575feb3459fa39803f6f40bd42154", + "zh:a6c72798f4a9a36d1d1433c0372006cc9b904e8cfd60a2ae03ac5b7d2abd2398", + "zh:a8a3141d2fc71c86bf7f3c13b0b3be8a1b0f0144a47572a15af4dfafc051e28a", + "zh:aa20a1242eb97445ad26ebcfb9babf2cd675bdb81cac5f989268ebefa4ef278c", + "zh:b58a22445fb8804e933dcf835ab06c29a0f33148dce61316814783ee7f4e4332", + "zh:cb5626a661ee761e0576defb2a2d75230a3244799d380864f3089c66e99d0dcc", + "zh:d1acb00d20445f682c4e705c965e5220530209c95609194c2dc39324f3d4fcce", + "zh:d91a254ba77b69a29d8eae8ed0e9367cbf0ea6ac1a85b58e190f8cb096a40871", + "zh:f6592327673c9f85cdb6f20336faef240abae7621b834f189c4a62276ea5db41", + ] +} + +provider "registry.terraform.io/hashicorp/helm" { + version = "2.17.0" + constraints = "~> 2.0, >= 2.5.0" + hashes = [ + "h1:kQMkcPVvHOguOqnxoEU2sm1ND9vCHiT8TvZ2x6v/Rsw=", + "zh:06fb4e9932f0afc1904d2279e6e99353c2ddac0d765305ce90519af410706bd4", + "zh:104eccfc781fc868da3c7fec4385ad14ed183eb985c96331a1a937ac79c2d1a7", + "zh:129345c82359837bb3f0070ce4891ec232697052f7d5ccf61d43d818912cf5f3", + "zh:3956187ec239f4045975b35e8c30741f701aa494c386aaa04ebabffe7749f81c", + "zh:66a9686d92a6b3ec43de3ca3fde60ef3d89fb76259ed3313ca4eb9bb8c13b7dd", + "zh:88644260090aa621e7e8083585c468c8dd5e09a3c01a432fb05da5c4623af940", + "zh:a248f650d174a883b32c5b94f9e725f4057e623b00f171936dcdcc840fad0b3e", + "zh:aa498c1f1ab93be5c8fbf6d48af51dc6ef0f10b2ea88d67bcb9f02d1d80d3930", + "zh:bf01e0f2ec2468c53596e027d376532a2d30feb72b0b5b810334d043109ae32f", + "zh:c46fa84cc8388e5ca87eb575a534ebcf68819c5a5724142998b487cb11246654", + "zh:d0c0f15ffc115c0965cbfe5c81f18c2e114113e7a1e6829f6bfd879ce5744fbb", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + ] +} + +provider "registry.terraform.io/hashicorp/kubernetes" { + version = "2.38.0" + constraints = "~> 2.0, >= 2.10.0" + hashes = [ + "h1:soK8Lt0SZ6dB+HsypFRDzuX/npqlMU6M0fvyaR1yW0k=", + "zh:0af928d776eb269b192dc0ea0f8a3f0f5ec117224cd644bdacdc682300f84ba0", + "zh:1be998e67206f7cfc4ffe77c01a09ac91ce725de0abaec9030b22c0a832af44f", + "zh:326803fe5946023687d603f6f1bab24de7af3d426b01d20e51d4e6fbe4e7ec1b", + "zh:4a99ec8d91193af961de1abb1f824be73df07489301d62e6141a656b3ebfff12", + "zh:5136e51765d6a0b9e4dbcc3b38821e9736bd2136cf15e9aac11668f22db117d2", + "zh:63fab47349852d7802fb032e4f2b6a101ee1ce34b62557a9ad0f0f0f5b6ecfdc", + "zh:924fb0257e2d03e03e2bfe9c7b99aa73c195b1f19412ca09960001bee3c50d15", + "zh:b63a0be5e233f8f6727c56bed3b61eb9456ca7a8bb29539fba0837f1badf1396", + "zh:d39861aa21077f1bc899bc53e7233262e530ba8a3a2d737449b100daeb303e4d", + "zh:de0805e10ebe4c83ce3b728a67f6b0f9d18be32b25146aa89116634df5145ad4", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + "zh:faf23e45f0090eef8ba28a8aac7ec5d4fdf11a36c40a8d286304567d71c1e7db", + ] +} + +provider "registry.terraform.io/hashicorp/null" { + version = "3.2.4" + constraints = "~> 3.0" + hashes = [ + "h1:L5V05xwp/Gto1leRryuesxjMfgZwjb7oool4WS1UEFQ=", + "zh:59f6b52ab4ff35739647f9509ee6d93d7c032985d9f8c6237d1f8a59471bbbe2", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:795c897119ff082133150121d39ff26cb5f89a730a2c8c26f3a9c1abf81a9c43", + "zh:7b9c7b16f118fbc2b05a983817b8ce2f86df125857966ad356353baf4bff5c0a", + "zh:85e33ab43e0e1726e5f97a874b8e24820b6565ff8076523cc2922ba671492991", + "zh:9d32ac3619cfc93eb3c4f423492a8e0f79db05fec58e449dee9b2d5873d5f69f", + "zh:9e15c3c9dd8e0d1e3731841d44c34571b6c97f5b95e8296a45318b94e5287a6e", + "zh:b4c2ab35d1b7696c30b64bf2c0f3a62329107bd1a9121ce70683dec58af19615", + "zh:c43723e8cc65bcdf5e0c92581dcbbdcbdcf18b8d2037406a5f2033b1e22de442", + "zh:ceb5495d9c31bfb299d246ab333f08c7fb0d67a4f82681fbf47f2a21c3e11ab5", + "zh:e171026b3659305c558d9804062762d168f50ba02b88b231d20ec99578a6233f", + "zh:ed0fe2acdb61330b01841fa790be00ec6beaac91d41f311fb8254f74eb6a711f", + ] +} + +provider "registry.terraform.io/hashicorp/random" { + version = "3.8.1" + constraints = ">= 3.0.0, ~> 3.0, ~> 3.1, >= 3.5.0, ~> 3.5" + hashes = [ + "h1:u8AKlWVDTH5r9YLSeswoVEjiY72Rt4/ch7U+61ZDkiQ=", + "zh:08dd03b918c7b55713026037c5400c48af5b9f468f483463321bd18e17b907b4", + "zh:0eee654a5542dc1d41920bbf2419032d6f0d5625b03bd81339e5b33394a3e0ae", + "zh:229665ddf060aa0ed315597908483eee5b818a17d09b6417a0f52fd9405c4f57", + "zh:2469d2e48f28076254a2a3fc327f184914566d9e40c5780b8d96ebf7205f8bc0", + "zh:37d7eb334d9561f335e748280f5535a384a88675af9a9eac439d4cfd663bcb66", + "zh:741101426a2f2c52dee37122f0f4a2f2d6af6d852cb1db634480a86398fa3511", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:a902473f08ef8df62cfe6116bd6c157070a93f66622384300de235a533e9d4a9", + "zh:b85c511a23e57a2147355932b3b6dce2a11e856b941165793a0c3d7578d94d05", + "zh:c5172226d18eaac95b1daac80172287b69d4ce32750c82ad77fa0768be4ea4b8", + "zh:dab4434dba34aad569b0bc243c2d3f3ff86dd7740def373f2a49816bd2ff819b", + "zh:f49fd62aa8c5525a5c17abd51e27ca5e213881d58882fd42fec4a545b53c9699", + ] +} + +provider "registry.terraform.io/isometry/deepmerge" { + version = "1.2.1" + constraints = "~> 1.0" + hashes = [ + "h1:+GBRWyzNYKj47qmSBcV28lrIIgk3Gusj48maI+jrL0Q=", + "zh:13be4c31971addc10e26a003e22b8867dba41737ffbc9de86ed84555c4a539b7", + "zh:320a939a594c8a2563f4c11108e02428e7fda3bd51a4fc2298089299cf23f516", + "zh:890df766e9b839623b1f0437355032a3c006226a6c200cd911e15ee1a9014e9f", + "zh:9407fe1f9d332ecbd5252faa5b04f62b7fa47f19efe35a9e92d30ef2c603db45", + "zh:a0af6ad4d4a52a1355df5b73fe38e406f50d9fd5c8af165929a602f906de0ff7", + "zh:a44628f6e9bc612dbdcb013a8cf33aca3893f1697c4492847859f7db5b59c4dd", + "zh:abaaa7d71f7975505824adcebed320aae43fc7e13c901ffc51448f6eb59585fe", + "zh:ad2b1b3f348ad478ee64d294a85835625e560324e58a826530b13f49fafa9bac", + "zh:e8bec252634868283e47ef1208cb89b53c1e0cdd2ad804acf91d0ff368048416", + ] +} diff --git a/azure/examples/enterprise/README.md b/azure/examples/enterprise/README.md new file mode 100644 index 00000000..b9f5b246 --- /dev/null +++ b/azure/examples/enterprise/README.md @@ -0,0 +1,131 @@ +# Example: Enterprise Materialize Deployment on Azure with Ory (OIDC/SAML) + +This example extends the [simple deployment](../simple/) with **Ory Kratos** (identity management) and **Ory Hydra** (OAuth2/OIDC provider) for enterprise authentication via OIDC and SAML. + +--- + +## What Gets Created + +Everything from the [simple example](../simple/README.md), plus: + +### Ory Database +- **Azure PostgreSQL Flexible Server** (separate instance from Materialize): Version 15 +- **SKU**: B_Standard_B1ms (burstable, suitable for Ory workloads) +- **Databases**: `kratos` and `hydra` on the same server +- **Network Access**: Private only, same subnet as the Materialize database + +### Ory Kratos (Identity Management) +- **Helm release**: Deployed in the `ory` namespace +- **Replicas**: 2 (with PodDisruptionBudget) +- **Resources**: 250m CPU request / 256Mi memory (request & limit) +- **Purpose**: Manages user identities, login/registration flows, supports OIDC and SAML providers + +### Ory Hydra (OAuth2 & OIDC Provider) +- **Helm release**: Deployed in the `ory` namespace (shared with Kratos) +- **Replicas**: 2 (with PodDisruptionBudget) +- **Resources**: 250m CPU request / 256Mi memory (request & limit) +- **Maester**: Enabled (CRD controller for managing OAuth2 clients via Kubernetes resources) +- **Purpose**: Issues OAuth2 tokens, provides OIDC discovery endpoint, delegates login/consent to Kratos + +--- + +## Getting Started + +### Step 1: Set Required Variables + +Create a `terraform.tfvars` file: + +```hcl +subscription_id = "12345678-1234-1234-1234-123456789012" +resource_group_name = "materialize-enterprise-rg" +name_prefix = "enterprise-demo" +location = "westus2" +license_key = "your-materialize-license-key" +ory_issuer_url = "https://auth.example.com/" +tags = { + environment = "demo" +} +``` + +**Required Variables:** +- `subscription_id`: Azure subscription ID +- `resource_group_name`: Name for the resource group (will be created) +- `name_prefix`: Prefix for all resource names +- `location`: Azure region for deployment +- `tags`: Map of tags to apply to resources +- `license_key`: Materialize license key +- `ory_issuer_url`: The public URL where Hydra's OIDC discovery will be available (e.g., `https://auth.example.com/`) + +**Optional Variables:** +- `k8s_apiserver_authorized_networks`: List of authorized IP ranges for AKS API server access (defaults to `["0.0.0.0/0"]`) +- `ingress_cidr_blocks`: List of CIDR blocks allowed to reach the Load Balancer (defaults to `["0.0.0.0/0"]`) +- `internal_load_balancer`: Whether to use an internal load balancer (defaults to `true`) +- `enable_observability`: Enable Prometheus and Grafana monitoring stack (defaults to `false`) + +### Step 2: Deploy + +```bash +terraform init +terraform apply +``` + +### Step 3: Verify Ory Deployment + +```bash +# Check Ory pods +kubectl get pods -n ory + +# Check Kratos health +kubectl port-forward svc/kratos-admin 4434:4434 -n ory +curl http://localhost:4434/health/ready + +# Check Hydra health +kubectl port-forward svc/hydra-admin 4445:4445 -n ory +curl http://localhost:4445/health/ready +``` + +--- + +## Architecture + +``` +┌─────────────────────────────────────────────────────────┐ +│ AKS Cluster │ +│ │ +│ ┌─────────────────────┐ ┌──────────────────────────┐ │ +│ │ Generic Nodes │ │ Materialize Nodes │ │ +│ │ │ │ (tainted) │ │ +│ │ ┌───────────────┐ │ │ ┌────────────────────┐ │ │ +│ │ │ Ory Kratos │ │ │ │ Materialize │ │ │ +│ │ │ (identity) │ │ │ │ Instance │ │ │ +│ │ ├───────────────┤ │ │ └────────────────────┘ │ │ +│ │ │ Ory Hydra │ │ │ │ │ +│ │ │ (OAuth2) │ │ │ │ │ +│ │ ├───────────────┤ │ │ │ │ +│ │ │ Operator │ │ │ │ │ +│ │ │ cert-manager │ │ │ │ │ +│ │ └───────────────┘ │ │ │ │ +│ └─────────────────────┘ └──────────────────────────┘ │ +└─────────────────────────────────────────────────────────┘ + │ │ + ┌────┴────┐ ┌─────┴─────┐ + │ Ory DB │ │ MZ DB │ + │ (B1ms) │ │ (D2s_v3) │ + │ kratos │ │materialize│ + │ hydra │ │ │ + └─────────┘ └───────────┘ +``` + +--- + +## Notes + +- Ory Kratos and Hydra share a namespace (`ory`) but use separate databases on the same Postgres instance +- The Ory Postgres instance uses a smaller SKU (`B_Standard_B1ms`) since Ory workloads are lightweight +- Both Ory components are scheduled on generic nodes (not the Materialize-dedicated node pool) +- For production, configure identity schemas for Kratos and register OAuth2 clients in Hydra via the `helm_values` override or Hydra Maester CRDs +- Don't forget to destroy resources when finished: + +```bash +terraform destroy +``` diff --git a/azure/examples/enterprise/main.tf b/azure/examples/enterprise/main.tf new file mode 100644 index 00000000..64bdf7dd --- /dev/null +++ b/azure/examples/enterprise/main.tf @@ -0,0 +1,642 @@ +provider "azurerm" { + # Set the Azure subscription ID here or use the AZURE_SUBSCRIPTION_ID environment variable + subscription_id = var.subscription_id + + features { + resource_group { + prevent_deletion_if_contains_resources = false + } + key_vault { + purge_soft_delete_on_destroy = true + recover_soft_deleted_key_vaults = false + } + } +} + +provider "kubernetes" { + host = module.aks.cluster_endpoint + client_certificate = base64decode(module.aks.kube_config[0].client_certificate) + client_key = base64decode(module.aks.kube_config[0].client_key) + cluster_ca_certificate = base64decode(module.aks.kube_config[0].cluster_ca_certificate) +} + +provider "helm" { + kubernetes { + host = module.aks.cluster_endpoint + client_certificate = base64decode(module.aks.kube_config[0].client_certificate) + client_key = base64decode(module.aks.kube_config[0].client_key) + cluster_ca_certificate = base64decode(module.aks.kube_config[0].cluster_ca_certificate) + } +} + +provider "kubectl" { + host = module.aks.cluster_endpoint + client_certificate = base64decode(module.aks.kube_config[0].client_certificate) + client_key = base64decode(module.aks.kube_config[0].client_key) + cluster_ca_certificate = base64decode(module.aks.kube_config[0].cluster_ca_certificate) + + load_config_file = false +} + + +locals { + vnet_config = { + address_space = "20.0.0.0/16" + aks_subnet_cidr = "20.0.0.0/20" + postgres_subnet_cidr = "20.0.16.0/24" + enable_api_server_vnet_integration = true + api_server_subnet_cidr = "20.0.32.0/27" # keeping atleast 32 IPs reserved for API server and related services used in delegation might reduce it later. + } + + aks_config = { + kubernetes_version = "1.33" + service_cidr = "20.1.0.0/16" + enable_azure_monitor = false + log_analytics_workspace_id = null + } + + node_pool_config = { + vm_size = "Standard_E4pds_v6" + auto_scaling_enabled = true + min_nodes = 2 + max_nodes = 5 + node_count = null + disk_size_gb = 100 + swap_enabled = true + } + + database_config = { + sku_name = "GP_Standard_D2s_v3" + postgres_version = "15" + storage_mb = 32768 + backup_retention_days = 7 + administrator_login = "materialize" + administrator_password = null # Will generate random password + database_name = "materialize" + public_network_access_enabled = false + } + + # Ory database configuration (separate Postgres instance) + ory_database_config = { + sku_name = "B_Standard_B1ms" + postgres_version = "15" + storage_mb = 32768 + backup_retention_days = 7 + administrator_login = "oryadmin" + administrator_password = null # Will generate random password + public_network_access_enabled = false + } + + storage_container_name = "materialize" + + database_statement_timeout = "15min" + + metadata_backend_url = format( + "postgres://%s:%s@%s/%s?sslmode=require&options=-c%%20statement_timeout%%3D%s", + module.database.administrator_login, + urlencode(module.database.administrator_password), + module.database.server_fqdn, + local.database_config.database_name, + local.database_statement_timeout + ) + + persist_backend_url = format( + "%s%s", + module.storage.primary_blob_endpoint, + module.storage.container_name, + ) + + materialize_instance_namespace = "materialize-environment" + materialize_instance_name = "main" + + # Common node scheduling configuration + generic_node_labels = { + "workload" = "generic" + } + + materialize_node_labels = { + "workload" = "materialize-instance" + } + + materialize_node_taints = [ + { + key = "materialize.cloud/workload" + value = "materialize-instance" + effect = "NoSchedule" + } + ] + + materialize_tolerations = [ + { + key = "materialize.cloud/workload" + value = "materialize-instance" + operator = "Equal" + effect = "NoSchedule" + } + ] + + # https://learn.microsoft.com/en-us/azure/aks/concepts-storage#storage-classes + storage_class = "managed-csi" + + # Ory database DSNs + ory_kratos_dsn = format( + "postgres://%s:%s@%s/%s?sslmode=require", + module.ory_database.administrator_login, + urlencode(module.ory_database.administrator_password), + module.ory_database.server_fqdn, + "kratos" + ) + + ory_hydra_dsn = format( + "postgres://%s:%s@%s/%s?sslmode=require", + module.ory_database.administrator_login, + urlencode(module.ory_database.administrator_password), + module.ory_database.server_fqdn, + "hydra" + ) +} + + +resource "azurerm_resource_group" "materialize" { + name = var.resource_group_name + location = var.location +} + + +module "networking" { + source = "../../modules/networking" + + resource_group_name = azurerm_resource_group.materialize.name + location = var.location + prefix = var.name_prefix + vnet_address_space = local.vnet_config.address_space + aks_subnet_cidr = local.vnet_config.aks_subnet_cidr + postgres_subnet_cidr = local.vnet_config.postgres_subnet_cidr + enable_api_server_vnet_integration = local.vnet_config.enable_api_server_vnet_integration + api_server_subnet_cidr = local.vnet_config.api_server_subnet_cidr + + tags = var.tags + + depends_on = [azurerm_resource_group.materialize] +} + +# AKS Cluster with Default Node Pool +module "aks" { + source = "../../modules/aks" + + resource_group_name = azurerm_resource_group.materialize.name + kubernetes_version = local.aks_config.kubernetes_version + service_cidr = local.aks_config.service_cidr + location = var.location + prefix = var.name_prefix + vnet_name = module.networking.vnet_name + subnet_name = module.networking.aks_subnet_name + subnet_id = module.networking.aks_subnet_id + + enable_api_server_vnet_integration = local.vnet_config.enable_api_server_vnet_integration + k8s_apiserver_authorized_networks = concat(var.k8s_apiserver_authorized_networks, ["${module.networking.nat_gateway_public_ip}/32"]) + api_server_subnet_id = module.networking.api_server_subnet_id + + # Default node pool with autoscaling (runs all workloads except Materialize) + default_node_pool_vm_size = "Standard_D4pds_v6" + default_node_pool_enable_auto_scaling = true + default_node_pool_min_count = 2 + default_node_pool_max_count = 5 + default_node_pool_node_labels = local.generic_node_labels + + # Optional: Enable monitoring + enable_azure_monitor = local.aks_config.enable_azure_monitor + log_analytics_workspace_id = local.aks_config.log_analytics_workspace_id + + tags = var.tags + + depends_on = [azurerm_resource_group.materialize] +} + +# Materialize-dedicated node pool with taints (via labels on Azure) +module "materialize_nodepool" { + source = "../../modules/nodepool" + + prefix = var.name_prefix + cluster_id = module.aks.cluster_id + subnet_id = module.networking.aks_subnet_id + + # Workload-specific configuration + autoscaling_config = { + enabled = local.node_pool_config.auto_scaling_enabled + min_nodes = local.node_pool_config.min_nodes + max_nodes = local.node_pool_config.max_nodes + node_count = local.node_pool_config.node_count + } + + vm_size = local.node_pool_config.vm_size + disk_size_gb = local.node_pool_config.disk_size_gb + swap_enabled = local.node_pool_config.swap_enabled + + labels = local.materialize_node_labels + + # Materialize-specific taint to isolate workloads + # https://github.com/Azure/AKS/issues/2934 + # Note: Once applied, these cannot be manually removed due to AKS webhook restrictions + node_taints = local.materialize_node_taints + + tags = var.tags + + depends_on = [azurerm_resource_group.materialize] +} + + +module "database" { + source = "../../modules/database" + + depends_on = [module.networking] + + # Database configuration using new structure + databases = [ + { + name = local.database_config.database_name + charset = "UTF8" + collation = "en_US.utf8" + } + ] + + # Administrator configuration + administrator_login = local.database_config.administrator_login + + # Infrastructure configuration + resource_group_name = azurerm_resource_group.materialize.name + location = var.location + prefix = var.name_prefix + subnet_id = module.networking.postgres_subnet_id + private_dns_zone_id = module.networking.private_dns_zone_id + + # Database server configuration + sku_name = local.database_config.sku_name + postgres_version = local.database_config.postgres_version + storage_mb = local.database_config.storage_mb + backup_retention_days = local.database_config.backup_retention_days + public_network_access_enabled = local.database_config.public_network_access_enabled + + tags = var.tags +} + +# Separate Postgres instance for Ory (Kratos + Hydra) +module "ory_database" { + source = "../../modules/database" + + depends_on = [module.networking] + + databases = [ + { + name = "kratos" + charset = "UTF8" + collation = "en_US.utf8" + }, + { + name = "hydra" + charset = "UTF8" + collation = "en_US.utf8" + } + ] + + administrator_login = local.ory_database_config.administrator_login + + resource_group_name = azurerm_resource_group.materialize.name + location = var.location + prefix = "${var.name_prefix}-ory" + subnet_id = module.networking.postgres_subnet_id + private_dns_zone_id = module.networking.private_dns_zone_id + + sku_name = local.ory_database_config.sku_name + postgres_version = local.ory_database_config.postgres_version + storage_mb = local.ory_database_config.storage_mb + backup_retention_days = local.ory_database_config.backup_retention_days + public_network_access_enabled = local.ory_database_config.public_network_access_enabled + + tags = var.tags +} + +# Enable PostgreSQL extensions required by Ory Kratos migrations (pg_trgm + btree_gin for GIN indexes) +resource "azurerm_postgresql_flexible_server_configuration" "ory_extensions" { + name = "azure.extensions" + server_id = module.ory_database.server_id + value = "btree_gin,pg_trgm,uuid-ossp" + + depends_on = [module.ory_database] +} + +module "storage" { + source = "../../modules/storage" + + resource_group_name = azurerm_resource_group.materialize.name + location = var.location + prefix = var.name_prefix + workload_identity_principal_id = module.aks.workload_identity_principal_id + subnets = [module.networking.aks_subnet_id] + container_name = local.storage_container_name + + # Workload identity federation configuration + workload_identity_id = module.aks.workload_identity_id + oidc_issuer_url = module.aks.cluster_oidc_issuer_url + service_account_namespace = local.materialize_instance_namespace + service_account_name = local.materialize_instance_name + + storage_account_tags = var.tags + + depends_on = [azurerm_resource_group.materialize] +} + +resource "random_password" "external_login_password_mz_system" { + length = 16 + special = true + override_special = "!#$%&*()-_=+[]{}<>:?" +} + +# Deploy custom CoreDNS with TTL 0 (AKS's coredns doesn't support disabling caching) +module "coredns" { + source = "../../../kubernetes/modules/coredns" + node_selector = local.generic_node_labels + kubeconfig_data = module.aks.kube_config_raw + depends_on = [ + module.aks, + module.networking, + ] +} + +module "cert_manager" { + source = "../../../kubernetes/modules/cert-manager" + + node_selector = local.generic_node_labels + + depends_on = [ + module.aks, + module.networking, + module.coredns, + ] +} + +module "self_signed_cluster_issuer" { + source = "../../../kubernetes/modules/self-signed-cluster-issuer" + + name_prefix = var.name_prefix + + depends_on = [ + module.cert_manager, + ] +} + +module "operator" { + source = "../../modules/operator" + + name_prefix = var.name_prefix + location = var.location + + instance_pod_tolerations = local.materialize_tolerations + instance_node_selector = local.materialize_node_labels + + # node selector for operator and metrics-server workloads + operator_node_selector = local.generic_node_labels + + # Enable Prometheus scrape annotations when observability is enabled + helm_values = var.enable_observability ? { + observability = { + enabled = true + prometheus = { + scrapeAnnotations = { + enabled = true + } + } + } + } : {} + + depends_on = [ + module.aks, + module.database, + module.storage, + module.coredns, + ] +} + +module "prometheus" { + count = var.enable_observability ? 1 : 0 + source = "../../../kubernetes/modules/prometheus" + + namespace = "monitoring" + create_namespace = false # operator creates the "monitoring" namespace + node_selector = local.generic_node_labels + storage_class = local.storage_class + + depends_on = [ + module.operator, + module.aks, + module.coredns, + ] +} + +module "grafana" { + count = var.enable_observability ? 1 : 0 + source = "../../../kubernetes/modules/grafana" + + namespace = "monitoring" + storage_class = local.storage_class + # operator creates the "monitoring" namespace + create_namespace = false + prometheus_url = module.prometheus[0].prometheus_url + node_selector = local.generic_node_labels + + depends_on = [ + module.prometheus, + ] +} + +module "materialize_instance" { + source = "../../../kubernetes/modules/materialize-instance" + instance_name = local.materialize_instance_name + instance_namespace = local.materialize_instance_namespace + metadata_backend_url = local.metadata_backend_url + persist_backend_url = local.persist_backend_url + + # The password for the external login to the Materialize instance + authenticator_kind = "Password" + external_login_password_mz_system = random_password.external_login_password_mz_system.result + + # Azure workload identity annotations for service account + service_account_annotations = { + "azure.workload.identity/client-id" = module.aks.workload_identity_client_id + } + pod_labels = { + "azure.workload.identity/use" = "true" + } + + license_key = var.license_key + + issuer_ref = { + name = module.self_signed_cluster_issuer.issuer_name + kind = "ClusterIssuer" + } + + # System parameters for the Materialize instance + # See: https://materialize.com/docs/self-managed-deployments/configuration-system-parameters/ + system_parameters = {} + + depends_on = [ + module.aks, + module.database, + module.storage, + module.networking, + module.self_signed_cluster_issuer, + module.operator, + module.materialize_nodepool, + module.coredns, + ] +} + +module "load_balancers" { + source = "../../modules/load_balancers" + + instance_name = local.materialize_instance_name + namespace = local.materialize_instance_namespace + resource_id = module.materialize_instance.instance_resource_id + internal = var.internal_load_balancer + ingress_cidr_blocks = var.internal_load_balancer ? null : var.ingress_cidr_blocks + + depends_on = [ + module.materialize_instance, + ] +} + +# ----------------------------------------------------------------------------- +# Ory: Identity & OAuth2 (Kratos + Hydra) +# ----------------------------------------------------------------------------- + +# TODO: Update auth mechanism once Materialize private registry is set up. +resource "kubernetes_namespace" "ory" { + metadata { + name = "ory" + } + + depends_on = [module.aks] +} + +resource "kubernetes_secret" "ory_oel_registry" { + metadata { + name = "ory-oel-registry" + namespace = kubernetes_namespace.ory.metadata[0].name + } + + type = "kubernetes.io/dockerconfigjson" + + data = { + ".dockerconfigjson" = jsonencode({ + auths = { + "europe-docker.pkg.dev" = { + auth = base64encode("_json_key:${file(var.ory_oel_key_file)}") + } + } + }) + } +} + +module "ory_kratos" { + source = "../../../kubernetes/modules/ory-kratos" + + namespace = "ory" + create_namespace = false + dsn = local.ory_kratos_dsn + + # OEL image — registry must be part of repository (Ory Helm chart ignores image.registry) + image_repository = "${var.ory_oel_registry}/ory-enterprise-kratos/kratos-oel" + image_tag = var.ory_oel_image_tag + image_pull_secrets = ["ory-oel-registry"] + + node_selector = local.generic_node_labels + + # Kratos requires at least one identity schema and a default browser return URL + identity_schemas = { + "identity.default.schema.json" = jsonencode({ + "$id" = "https://schemas.ory.sh/presets/kratos/identity.basic.schema.json" + "$schema" = "http://json-schema.org/draft-07/schema#" + title = "Default Identity Schema" + type = "object" + properties = { + traits = { + type = "object" + properties = { + email = { + type = "string" + format = "email" + title = "Email" + "ory.sh/kratos" = { + credentials = { + password = { identifier = true } + } + recovery = { via = "email" } + verification = { via = "email" } + } + } + } + required = ["email"] + } + } + }) + } + + helm_values = { + kratos = { + config = { + selfservice = { + default_browser_return_url = var.ory_issuer_url + } + identity = { + default_schema_id = "default" + schemas = [ + { + id = "default" + url = "file:///etc/config/identity.default.schema.json" + } + ] + } + } + } + } + + depends_on = [ + module.aks, + module.ory_database, + module.coredns, + azurerm_postgresql_flexible_server_configuration.ory_extensions, + kubernetes_secret.ory_oel_registry, + kubernetes_namespace.ory, + ] +} + +module "ory_hydra" { + source = "../../../kubernetes/modules/ory-hydra" + + namespace = "ory" + create_namespace = false + + dsn = local.ory_hydra_dsn + issuer_url = var.ory_issuer_url + + # OEL image — registry must be part of repository (Ory Helm chart ignores image.registry) + image_repository = "${var.ory_oel_registry}/ory-enterprise/hydra-oel" + image_tag = var.ory_oel_image_tag + image_pull_secrets = ["ory-oel-registry"] + + # Point Hydra login/consent flows to Kratos + login_url = "${module.ory_kratos.public_url}/self-service/login/browser" + consent_url = "${module.ory_kratos.public_url}/self-service/login/browser" + logout_url = "${module.ory_kratos.public_url}/self-service/logout/browser" + + node_selector = local.generic_node_labels + + depends_on = [ + module.aks, + module.ory_database, + module.ory_kratos, + module.coredns, + azurerm_postgresql_flexible_server_configuration.ory_extensions, + kubernetes_secret.ory_oel_registry, + ] +} diff --git a/azure/examples/enterprise/outputs.tf b/azure/examples/enterprise/outputs.tf new file mode 100644 index 00000000..08b40254 --- /dev/null +++ b/azure/examples/enterprise/outputs.tf @@ -0,0 +1,205 @@ +# Networking outputs +output "networking" { + description = "Networking details" + value = { + vnet_id = module.networking.vnet_id + vnet_name = module.networking.vnet_name + aks_subnet_id = module.networking.aks_subnet_id + api_server_subnet_id = module.networking.api_server_subnet_id + postgres_subnet_id = module.networking.postgres_subnet_id + private_dns_zone_id = module.networking.private_dns_zone_id + nat_gateway_id = module.networking.nat_gateway_id + nat_gateway_public_ip = module.networking.nat_gateway_public_ip + vnet_address_space = module.networking.vnet_address_space + } +} + + +# Cluster outputs +output "aks_cluster_name" { + description = "The name of the AKS cluster" + value = module.aks.cluster_name +} + +output "aks_cluster_id" { + description = "The ID of the AKS cluster" + value = module.aks.cluster_id +} + +output "aks_cluster_fqdn" { + description = "The FQDN of the AKS cluster" + value = module.aks.cluster_fqdn +} + +output "aks_cluster_private_fqdn" { + description = "The private FQDN of the AKS cluster" + value = module.aks.cluster_private_fqdn +} + +output "aks_cluster_endpoint" { + description = "The endpoint of the AKS cluster" + value = module.aks.cluster_endpoint + sensitive = true +} + +output "aks_kube_config" { + description = "The kube config of the AKS cluster" + value = module.aks.kube_config + sensitive = true +} + +output "aks_oidc_issuer_url" { + description = "The OIDC issuer URL of the AKS cluster" + value = module.aks.cluster_oidc_issuer_url +} + +output "aks_workload_identity_client_id" { + description = "The client ID of the workload identity" + value = module.aks.workload_identity_client_id +} + +output "materialize_nodepool_name" { + description = "The name of the Materialize node pool" + value = module.materialize_nodepool.nodepool_name +} + +output "materialize_nodepool_id" { + description = "The ID of the Materialize node pool" + value = module.materialize_nodepool.nodepool_id +} + +# Database outputs +output "database_endpoint" { + description = "PostgreSQL server endpoint" + value = module.database.server_fqdn +} + +output "database_name" { + description = "PostgreSQL server name" + value = module.database.server_name +} + +output "database_username" { + description = "PostgreSQL administrator username" + value = module.database.administrator_login + sensitive = true +} + +# Storage outputs +output "storage_account_name" { + description = "Name of the storage account" + value = module.storage.storage_account_name +} + +output "storage_primary_blob_endpoint" { + description = "Primary blob endpoint of the storage account" + value = module.storage.primary_blob_endpoint +} + +output "storage_container_name" { + description = "Name of the storage container" + value = module.storage.container_name +} + +# Materialize component outputs +output "operator" { + description = "Materialize operator details" + value = { + namespace = module.operator.operator_namespace + release_name = module.operator.operator_release_name + release_status = module.operator.operator_release_status + } +} + +output "materialize_instance_name" { + description = "Materialize instance name" + value = module.materialize_instance.instance_name +} + +output "materialize_instance_namespace" { + description = "Materialize instance namespace" + value = module.materialize_instance.instance_namespace +} + +output "materialize_instance_resource_id" { + description = "Materialize instance resource ID" + value = module.materialize_instance.instance_resource_id +} + +output "materialize_instance_metadata_backend_url" { + description = "Materialize instance metadata backend URL" + value = module.materialize_instance.metadata_backend_url + sensitive = true +} + +output "materialize_instance_persist_backend_url" { + description = "Materialize instance persist backend URL" + value = module.materialize_instance.persist_backend_url +} + +# Load balancer outputs +output "console_load_balancer_ip" { + description = "IP address of the Materialize console's load balancer." + value = module.load_balancers.console_load_balancer_ip +} + +output "balancerd_load_balancer_ip" { + description = "IP address of the Materialize balancerd's load balancer." + value = module.load_balancers.balancerd_load_balancer_ip +} + +# Azure-specific outputs +output "resource_group_name" { + value = azurerm_resource_group.materialize.name +} + +output "external_login_password_mz_system" { + description = "Password for external login to the Materialize instance" + value = random_password.external_login_password_mz_system.result + sensitive = true +} + +# Observability outputs (only when enabled) +output "prometheus_url" { + description = "Internal URL for Prometheus server" + value = var.enable_observability ? module.prometheus[0].prometheus_url : null +} + +output "grafana_url" { + description = "Internal URL for Grafana" + value = var.enable_observability ? module.grafana[0].grafana_url : null +} + +output "grafana_admin_password" { + description = "`admin` password for Grafana" + value = var.enable_observability ? module.grafana[0].admin_password : null + sensitive = true +} + +# Ory outputs +output "ory_database_endpoint" { + description = "PostgreSQL server endpoint for Ory" + value = module.ory_database.server_fqdn +} + +output "ory_kratos" { + description = "Ory Kratos deployment details" + value = { + public_url = module.ory_kratos.public_url + admin_url = module.ory_kratos.admin_url + namespace = module.ory_kratos.namespace + release_name = module.ory_kratos.release_name + release_status = module.ory_kratos.release_status + } +} + +output "ory_hydra" { + description = "Ory Hydra deployment details" + value = { + public_url = module.ory_hydra.public_url + admin_url = module.ory_hydra.admin_url + namespace = module.ory_hydra.namespace + release_name = module.ory_hydra.release_name + release_status = module.ory_hydra.release_status + } +} diff --git a/azure/examples/enterprise/terraform.tfvars.example b/azure/examples/enterprise/terraform.tfvars.example new file mode 100644 index 00000000..466e94f2 --- /dev/null +++ b/azure/examples/enterprise/terraform.tfvars.example @@ -0,0 +1,10 @@ +subscription_id = "12345678-1234-1234-1234-123456789012" +resource_group_name = "materialize-enterprise-rg" +name_prefix = "mz-enterprise" +location = "westus2" +license_key = "your-materialize-license-key" +ory_issuer_url = "https://auth.example.com/" +tags = { + environment = "demo" + project = "materialize-enterprise" +} diff --git a/azure/examples/enterprise/variables.tf b/azure/examples/enterprise/variables.tf new file mode 100644 index 00000000..40224eef --- /dev/null +++ b/azure/examples/enterprise/variables.tf @@ -0,0 +1,100 @@ +variable "subscription_id" { + description = "The ID of the Azure subscription" + type = string +} + +variable "resource_group_name" { + description = "The name of the resource group which will be created." + type = string +} + +variable "location" { + description = "The location of the Azure subscription" + type = string + default = "westus2" +} + +variable "name_prefix" { + description = "The prefix of the Azure subscription" + type = string +} + +variable "tags" { + description = "Tags to apply to resources created." + type = map(string) +} + +variable "ingress_cidr_blocks" { + description = "CIDR blocks that can reach the Azure LoadBalancer frontends." + type = list(string) + default = ["0.0.0.0/0"] + + validation { + condition = alltrue([ + for cidr in var.ingress_cidr_blocks : can(cidrhost(cidr, 0)) + ]) + error_message = "All ingress_cidr_blocks must be valid CIDR notation (e.g., '10.0.0.0/8' or '0.0.0.0/0')." + } +} + +variable "license_key" { + description = "Materialize license key" + type = string + default = null + sensitive = true +} + +variable "k8s_apiserver_authorized_networks" { + description = "List of authorized IP ranges that can access the Kubernetes API server when public access is available. Defaults to ['0.0.0.0/0'] (allow all). For production, restrict to specific IPs (e.g., ['203.0.113.0/24'])" + type = list(string) + default = ["0.0.0.0/0"] # Explicit default: allow all IPs + nullable = true + + validation { + condition = ( + var.k8s_apiserver_authorized_networks == null || + alltrue([ + for cidr in var.k8s_apiserver_authorized_networks : + can(cidrhost(cidr, 0)) + ]) + ) + error_message = "All k8s_apiserver_authorized_networks must be valid CIDR blocks (e.g., '203.0.113.0/24')." + } +} + + +variable "internal_load_balancer" { + description = "Whether to use an internal load balancer" + type = bool + default = true +} + +variable "enable_observability" { + description = "Enable Prometheus and Grafana monitoring stack for Materialize" + type = bool + default = false +} + +# Ory variables +variable "ory_issuer_url" { + description = "The public URL of the OAuth2 issuer (Hydra). Used for OIDC discovery. Example: https://auth.example.com/" + type = string +} + +variable "ory_oel_registry" { + description = "Base registry URL for Ory Enterprise License images. Example: europe-docker.pkg.dev/ory-artifacts" + type = string +} + +variable "ory_oel_image_tag" { + description = "Image tag for OEL images." + type = string + default = "26.2.3" +} + +# TODO: Update auth mechanism once Materialize private registry is set up. +# Currently uses a GCP service account key file for Ory's Artifact Registry. +variable "ory_oel_key_file" { + description = "Path to the GCP service account JSON key file for pulling OEL images from Ory's Artifact Registry." + type = string +} diff --git a/azure/examples/enterprise/versions.tf b/azure/examples/enterprise/versions.tf new file mode 100644 index 00000000..07aaf9d6 --- /dev/null +++ b/azure/examples/enterprise/versions.tf @@ -0,0 +1,30 @@ +terraform { + required_version = ">= 1.8" + + required_providers { + azurerm = { + source = "hashicorp/azurerm" + version = "4.54.0" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = "~> 2.0" + } + helm = { + source = "hashicorp/helm" + version = "~> 2.0" + } + random = { + source = "hashicorp/random" + version = "~> 3.5" + } + deepmerge = { + source = "isometry/deepmerge" + version = "~> 1.0" + } + kubectl = { + source = "alekc/kubectl" + version = "~> 2.0" + } + } +} diff --git a/azure/modules/database/outputs.tf b/azure/modules/database/outputs.tf index 3dfe1851..ab7169b3 100644 --- a/azure/modules/database/outputs.tf +++ b/azure/modules/database/outputs.tf @@ -1,3 +1,8 @@ +output "server_id" { + description = "The ID of the PostgreSQL server" + value = azurerm_postgresql_flexible_server.postgres.id +} + output "server_name" { description = "The name of the PostgreSQL server" value = azurerm_postgresql_flexible_server.postgres.name diff --git a/kubernetes/modules/ory-hydra/main.tf b/kubernetes/modules/ory-hydra/main.tf index 84cad278..a3c25cd6 100644 --- a/kubernetes/modules/ory-hydra/main.tf +++ b/kubernetes/modules/ory-hydra/main.tf @@ -24,6 +24,18 @@ locals { secrets_system = var.secrets_system != null ? var.secrets_system : random_password.secrets_system[0].result secrets_cookie = var.secrets_cookie != null ? var.secrets_cookie : random_password.secrets_cookie[0].result + image_config = var.image_registry != null || var.image_repository != null || var.image_tag != null ? { + image = merge( + var.image_registry != null ? { registry = var.image_registry } : {}, + var.image_repository != null ? { repository = var.image_repository } : {}, + var.image_tag != null ? { tag = var.image_tag } : {}, + ) + } : {} + + image_pull_secrets_config = length(var.image_pull_secrets) > 0 ? { + imagePullSecrets = [for name in var.image_pull_secrets : { name = name }] + } : {} + urls_config = merge( { self = { @@ -35,7 +47,7 @@ locals { var.logout_url != null ? { logout = var.logout_url } : {}, ) - default_helm_values = { + default_helm_values = merge({ replicaCount = var.replica_count secret = { @@ -115,7 +127,7 @@ locals { port = 4445 } } - } + }, local.image_config, local.image_pull_secrets_config) } resource "helm_release" "hydra" { diff --git a/kubernetes/modules/ory-hydra/variables.tf b/kubernetes/modules/ory-hydra/variables.tf index 6084e258..3fd9cd53 100644 --- a/kubernetes/modules/ory-hydra/variables.tf +++ b/kubernetes/modules/ory-hydra/variables.tf @@ -76,7 +76,6 @@ variable "logout_url" { description = "The URL of the logout UI. Example: https://login.example.com/logout" type = string default = null - nullable = false } variable "automigration_enabled" { @@ -164,6 +163,31 @@ variable "maester_enabled" { nullable = false } +variable "image_registry" { + description = "Override the Docker image registry for Hydra. Used for OEL (Ory Enterprise License) deployments. Example: europe-docker.pkg.dev" + type = string + default = null +} + +variable "image_repository" { + description = "Override the Docker image repository for Hydra. Used for OEL deployments. Example: ory-artifacts/ory-enterprise/hydra-oel" + type = string + default = null +} + +variable "image_tag" { + description = "Override the Docker image tag for Hydra. If not set, the chart default will be used." + type = string + default = null +} + +variable "image_pull_secrets" { + description = "List of Kubernetes secret names for pulling images from private registries. Required for OEL deployments." + type = list(string) + default = [] + nullable = false +} + variable "helm_values" { description = "Additional values to pass to the Helm chart. These will be deep-merged with the module's default values, with these values taking precedence." type = any diff --git a/kubernetes/modules/ory-kratos/main.tf b/kubernetes/modules/ory-kratos/main.tf index a74b16ba..962113a5 100644 --- a/kubernetes/modules/ory-kratos/main.tf +++ b/kubernetes/modules/ory-kratos/main.tf @@ -35,6 +35,18 @@ locals { identitySchemas = var.identity_schemas } : {} + image_config = var.image_registry != null || var.image_repository != null || var.image_tag != null ? { + image = merge( + var.image_registry != null ? { registry = var.image_registry } : {}, + var.image_repository != null ? { repository = var.image_repository } : {}, + var.image_tag != null ? { tag = var.image_tag } : {}, + ) + } : {} + + image_pull_secrets_config = length(var.image_pull_secrets) > 0 ? { + imagePullSecrets = [for name in var.image_pull_secrets : { name = name }] + } : {} + smtp_config = var.smtp_connection_uri != null ? { courier = { smtp = merge( @@ -45,7 +57,7 @@ locals { } } : {} - default_helm_values = { + default_helm_values = merge({ replicaCount = var.replica_count secret = { @@ -130,7 +142,7 @@ locals { port = 4434 } } - } + }, local.image_config, local.image_pull_secrets_config) } resource "helm_release" "kratos" { diff --git a/kubernetes/modules/ory-kratos/variables.tf b/kubernetes/modules/ory-kratos/variables.tf index 9680e544..fa1e7b7a 100644 --- a/kubernetes/modules/ory-kratos/variables.tf +++ b/kubernetes/modules/ory-kratos/variables.tf @@ -172,6 +172,31 @@ variable "smtp_from_name" { default = null } +variable "image_registry" { + description = "Override the Docker image registry for Kratos. Used for OEL (Ory Enterprise License) deployments. Example: europe-docker.pkg.dev" + type = string + default = null +} + +variable "image_repository" { + description = "Override the Docker image repository for Kratos. Used for OEL deployments. Example: ory-artifacts/ory-enterprise-kratos/kratos-oel" + type = string + default = null +} + +variable "image_tag" { + description = "Override the Docker image tag for Kratos. If not set, the chart default will be used." + type = string + default = null +} + +variable "image_pull_secrets" { + description = "List of Kubernetes secret names for pulling images from private registries. Required for OEL deployments." + type = list(string) + default = [] + nullable = false +} + variable "helm_values" { description = "Additional values to pass to the Helm chart. These will be deep-merged with the module's default values, with these values taking precedence." type = any