diff --git a/.devops/deploy-argocd-apps.yml b/.devops/deploy-argocd-apps.yml new file mode 100644 index 0000000..cafb6e8 --- /dev/null +++ b/.devops/deploy-argocd-apps.yml @@ -0,0 +1,255 @@ +pr: none + +trigger: + branches: + include: + - main + paths: + include: + - 'helm/dev/**' + +schedules: +- cron: "12 23 * * *" + displayName: Nightly batch run + branches: + include: + - main + always: true + +parameters: +- name: ENV + displayName: Environment + type: string + default: dev + values: + - dev + - uat + - prod + +- name: APPS_TOP + displayName: Top ArgoCD Apps. Set to [] to skip deployment. + type: object + default: + - p4pa-superset + +- name: APPS_MID + displayName: Mid ArgoCD Apps. Set to [] to skip deployment. + type: object + default: [] + +- name: APPS_EXT + displayName: Extra ArgoCD Apps. Set to [] to skip deployment. + type: object + default: [] + +- name: POSTMAN_BRANCH + displayName: Postman branch + type: string + default: 'develop' + +- name: ARGOCD_TARGET_BRANCH + displayName: ArgoCD target branch or version tag (vX.Y.Z) + type: string + default: 'main' + +- name: TRIGGER_MESSAGE + displayName: Trigger Message + type: string + default: 'none' + +variables: + # Static variables + - name: directoryPattern + value: 'helm/${{ parameters.ENV }}' + - name: area + value: p4pa-analytics + - name: sourceBranch + value: $[variables['Build.SourceBranch']] + - name: buildReason + value: $[variables['Build.Reason']] + - name: pipelineName + ${{ if eq(variables['Build.Reason'], 'Manual') }}: + ${{ if ne(parameters.TRIGGER_MESSAGE, 'none') }}: + value: '[Trigger-${{ parameters.TRIGGER_MESSAGE }}] [ENV-${{ parameters.ENV }}] [ArgoCD-${{ parameters.ARGOCD_TARGET_BRANCH }}] [Postman-${{ parameters.POSTMAN_BRANCH }}]' + ${{ if eq(parameters.TRIGGER_MESSAGE, 'none') }}: + value: '[ENV-${{ parameters.ENV }}] [ArgoCD-${{ parameters.ARGOCD_TARGET_BRANCH }}] [Postman-${{ parameters.POSTMAN_BRANCH }}]' + + + - ${{ if eq(parameters.ENV, 'dev') }}: + - name: argocdServer + value: $(DEV_ARGOCD_SERVER) + - name: argocdUsername + value: $(DEV_ARGOCD_USERNAME) + - name: argocdPassword + value: $(DEV_ARGOCD_PASSWORD) + - name: agentPoolName + value: $(DEV_AGENT_POOL) + - name: azureServiceConnectionName + value: $(DEV_AZURE_SERVICE_CONNECTION_NAME) + - name: postman_kv_name + value: $(DEV_POSTMAN_KV_NAME) + - name: postman_env_secret_name + value: $(DEV_POSTMAN_ENV_SECRET_NAME) + + - ${{ if eq(parameters.ENV, 'uat') }}: + - name: argocdServer + value: $(UAT_ARGOCD_SERVER) + - name: argocdUsername + value: $(UAT_ARGOCD_USERNAME) + - name: argocdPassword + value: $(UAT_ARGOCD_PASSWORD) + - name: agentPoolName + value: $(UAT_AGENT_POOL) + - name: azureServiceConnectionName + value: $(UAT_AZURE_SERVICE_CONNECTION_NAME) + - name: postman_kv_name + value: $(UAT_POSTMAN_KV_NAME) + - name: postman_env_secret_name + value: $(UAT_POSTMAN_ENV_SECRET_NAME) + + - ${{ if eq(parameters.ENV, 'prod') }}: + - name: argocdServer + value: $(PROD_ARGOCD_SERVER) + - name: argocdUsername + value: $(PROD_ARGOCD_USERNAME) + - name: argocdPassword + value: $(PROD_ARGOCD_PASSWORD) + - name: agentPoolName + value: $(PROD_AGENT_POOL) + - name: azureServiceConnectionName + value: $(PROD_AZURE_SERVICE_CONNECTION_NAME) + - name: postman_kv_name + value: $(PROD_POSTMAN_KV_NAME) + - name: postman_env_secret_name + value: $(PROD_POSTMAN_ENV_SECRET_NAME) + +resources: + repositories: + - repository: templates + type: github + name: pagopa/azure-pipeline-templates + ref: refs/tags/v6.8.0 + endpoint: 'azure-devops-github-ro' + - repository: argo-templates + type: github + name: pagopa/p4pa-payhub-deploy-aks + ref: refs/tags/v1.84.0 + endpoint: 'azure-devops-github-ro' + +pool: + vmImage: ubuntu-latest + +name: $(pipelineName) + +stages: + # Debug Steps + - stage: pre_steps + condition: succeeded() + jobs: + - job: legacy_debug_info + pool: + vmImage: 'ubuntu-latest' + steps: + - bash: | + echo "build reason: ${{variables['Build.Reason']}}" + echo "agentPoolName: ${{variables['agentPoolName']}}" + echo "argocdServer: ${{variables['argocdServer']}}" + echo "argocdTargetBranch: ${{parameters.ARGOCD_TARGET_BRANCH}}" + echo "postmanBranch: ${{parameters.POSTMAN_BRANCH}}" + echo "triggerMessage: ${{parameters.TRIGGER_MESSAGE}}" + echo "App TOPS: ${{ join(' ', parameters.APPS_TOP) }}" + echo "App MID: ${{ join(' ', parameters.APPS_MID) }}" + echo "App EXT: ${{ join(' ', parameters.APPS_EXT) }}" + displayName: 'Show Debug Information' + + # + # ArgoCD + # + - stage: sync_argo_apps + condition: succeeded() + jobs: + # Top Apps + - ${{ if and(ne(length(parameters.APPS_TOP), 0), ne(parameters.APPS_TOP, '[]')) }}: + - template: .devops/templates/deploy-argo-template.yml@argo-templates + parameters: + applicationsList: ${{ parameters.APPS_TOP }} + deploymentType: 'top' + environment: ${{ parameters.ENV }} + argocdServerUrl: $(argocdServer) + argocdUserName: $(argocdUsername) + argocdUserPassword: $(argocdPassword) + agentPoolName: $(agentPoolName) + applicationPrefix: $(area) + targetBranch: ${{ parameters.ARGOCD_TARGET_BRANCH }} + + # Mid Apps + - ${{ if and(ne(length(parameters.APPS_MID), 0), ne(parameters.APPS_MID, '[]')) }}: + - template: .devops/templates/deploy-argo-template.yml@argo-templates + parameters: + applicationsList: ${{ parameters.APPS_MID }} + deploymentType: 'mid' + environment: ${{ parameters.ENV }} + argocdServerUrl: $(argocdServer) + argocdUserName: $(argocdUsername) + argocdUserPassword: $(argocdPassword) + agentPoolName: $(agentPoolName) + applicationPrefix: $(area) + targetBranch: ${{ parameters.ARGOCD_TARGET_BRANCH }} + + # Ext Apps + - ${{ if and(ne(length(parameters.APPS_EXT), 0), ne(parameters.APPS_EXT, '[]')) }}: + - template: .devops/templates/deploy-argo-template.yml@argo-templates + parameters: + applicationsList: ${{ parameters.APPS_EXT }} + deploymentType: 'ext' + environment: ${{ parameters.ENV }} + argocdServerUrl: $(argocdServer) + argocdUserName: $(argocdUsername) + argocdUserPassword: $(argocdPassword) + agentPoolName: $(agentPoolName) + applicationPrefix: $(area) + targetBranch: ${{ parameters.ARGOCD_TARGET_BRANCH }} + + # Stage for Postman Tests + - stage: run_postman_tests + condition: and(succeeded(), or(${{eq(parameters.ENV, 'dev')}}, ${{eq(parameters.ENV, 'uat')}})) + dependsOn: sync_argo_apps + jobs: + # Top Apps Tests + - ${{ if and(ne(length(parameters.APPS_TOP), 0), ne(parameters.APPS_TOP, '[]')) }}: + - template: .devops/templates/postman-tests-template.yml@argo-templates + parameters: + appsList: ${{ parameters.APPS_TOP }} + appCategory: 'top' + env: ${{ parameters.ENV }} + agentPoolName: $(agentPoolName) + postmanBranch: ${{ parameters.POSTMAN_BRANCH }} + azureServiceConnection: $(azureServiceConnectionName) + keyVaultName: $(postman_kv_name) + secretName: $(postman_env_secret_name) + + # Mid Apps Tests + - ${{ if and(ne(length(parameters.APPS_MID), 0), ne(parameters.APPS_MID, '[]')) }}: + - template: .devops/templates/postman-tests-template.yml@argo-templates + parameters: + appsList: ${{ parameters.APPS_MID }} + appCategory: 'mid' + env: ${{ parameters.ENV }} + agentPoolName: $(agentPoolName) + postmanBranch: ${{ parameters.POSTMAN_BRANCH }} + azureServiceConnection: $(azureServiceConnectionName) + keyVaultName: $(postman_kv_name) + secretName: $(postman_env_secret_name) + + # Ext Apps Tests + - ${{ if and(ne(length(parameters.APPS_EXT), 0), ne(parameters.APPS_EXT, '[]')) }}: + - template: .devops/templates/postman-tests-template.yml@argo-templates + parameters: + appsList: ${{ parameters.APPS_EXT }} + appCategory: 'ext' + env: ${{ parameters.ENV }} + agentPoolName: $(agentPoolName) + postmanBranch: ${{ parameters.POSTMAN_BRANCH }} + azureServiceConnection: $(azureServiceConnectionName) + keyVaultName: $(postman_kv_name) + secretName: $(postman_env_secret_name) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000..308d864 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,36 @@ + + + + + +#### List of Changes + + + +#### Motivation and Context + + + +#### How Has This Been Tested? + + + + + +#### Screenshots (if appropriate): + +#### Types of changes + + + +- [ ] Bug fix (non-breaking change which fixes an issue) +- [ ] New feature (non-breaking change which adds functionality) +- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected) + +#### Checklist: + + + + +- [ ] My change requires a change to the documentation. +- [ ] I have updated the documentation accordingly. diff --git a/.github/terraform/.terraform.lock.hcl b/.github/terraform/.terraform.lock.hcl new file mode 100644 index 0000000..284069a --- /dev/null +++ b/.github/terraform/.terraform.lock.hcl @@ -0,0 +1,47 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/azurerm" { + version = "3.116.0" + constraints = "~> 3.108" + hashes = [ + "h1:2QbjtN4oMXzdA++Nvrj/wSmWZTPgXKOSFGGQCLEMrb4=", + "h1:BCR3NIorFSvGG3v/+JOiiw3VM4PkChLO4m84wzD9NDo=", + "zh:02b6606aff025fc2a962b3e568e000300abe959adac987183c24dac8eb057f4d", + "zh:2a23a8ce24ff9e885925ffee0c3ea7eadba7a702541d05869275778aa47bdea7", + "zh:57d10746384baeca4d5c56e88872727cdc150f437b8c5e14f0542127f7475e24", + "zh:59e3ebde1a2e1e094c671e179f231ead60684390dbf02d2b1b7fe67a228daa1a", + "zh:5f1f5c7d09efa2ee8ddf21bd9efbbf8286f6e90047556bef305c062fa0ac5880", + "zh:a40646aee3c9907276dab926e6123a8d70b1e56174836d4c59a9992034f88d70", + "zh:c21d40461bc5836cf56ad3d93d2fc47f61138574a55e972ad5ff1cb73bab66dc", + "zh:c56fb91a5ae66153ba0f737a26da1b3d4f88fdef7d41c63e06c5772d93b26953", + "zh:d1e60e85f51d12fc150aeab8e31d3f18f859c32f927f99deb5b74cb1e10087aa", + "zh:ed35e727e7d79e687cd3d148f52b442961ede286e7c5b4da1dcd9f0128009466", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + "zh:f6d2a4e7c58f44e7d04a4a9c73f35ed452f412c97c85def68c4b52814cbe03ab", + ] +} + +provider "registry.terraform.io/integrations/github" { + version = "6.3.0" + constraints = "~> 6.3" + hashes = [ + "h1:AG//wDT67eInhTk+SQdDz5o8R8YIIBrZGz7C9TXKDOw=", + "h1:smeAkyQqdvuOr8rtC/2+kdvWqS7YR92RWFrJL+k6z7A=", + "zh:04fe3b820fe8c247b98b9d6810b8bb84d3e8ac08054faf450c42489815ef4bfa", + "zh:24096b2d16208d1411a58bdb8df8cd9f0558fb9054ffeb95c4e7e90a9a34f976", + "zh:2b27332adf8d08fbdc08b5f55e87691bce02c311219e6deb39c08753bd93db6d", + "zh:335dd6c2d50fcdce2ef0cc194465fdf9df1f5fdecc805804c78df30a4eb2e11e", + "zh:383a6879565969dbdf5405b651cd870c09c615dbd3df2554e5574d39d161c98c", + "zh:4903038a6bc605f372e1569695db4a2e2862e1fc6cf4faf9e13c5f8f4fa2ed94", + "zh:4cc4dffbee8b28102d38abe855b7440d4f4226261b43fda2ec289b48c3de1537", + "zh:57c30c6fe0b64fa86906700ceb1691562b62f2b1ef0404952aeb4092acb6acb3", + "zh:7bf518396fb00e4f55c406f2ffb5583b43278682a92f0864a0c47e3a74627bbb", + "zh:93c2c5cb90f74ad3c0874b7f7d8a866f28a852f0eda736c6aef8ce65d4061f4d", + "zh:9562a82a6193a2db110fb34d1aceeedb27c0a640058dce9c31b37b17eeb5f4e7", + "zh:ac97f2d111703a219f27fcbf5e89460ea98f9168badcc0913c8b214a37f76814", + "zh:c882af4d33b761ec198cedac212ab1c114d97540119dc97daca38021ab3edd0a", + "zh:c9ffd0a37f07a93af02a1caa90bfbea27a952d3e5badf4aab866ec71cdb184a3", + "zh:fbd1fee2c9df3aa19cf8851ce134dea6e45ea01cb85695c1726670c285797e25", + ] +} diff --git a/.github/terraform/00_data.tf b/.github/terraform/00_data.tf new file mode 100644 index 0000000..73c81a5 --- /dev/null +++ b/.github/terraform/00_data.tf @@ -0,0 +1,11 @@ +# Secret +data "azurerm_key_vault" "key_vault" { + name = "${var.prefix}-${var.env_short}-${local.domain}-kv" + resource_group_name = "${var.prefix}-${var.env_short}-${var.location_short}-${local.domain}-sec-rg" +} + +# Github +data "github_organization_teams" "all" { + root_teams_only = true + summary_only = true +} diff --git a/.github/terraform/01_global.tf b/.github/terraform/01_global.tf new file mode 100644 index 0000000..94a29eb --- /dev/null +++ b/.github/terraform/01_global.tf @@ -0,0 +1,45 @@ +resource "github_branch_default" "default" { + repository = local.github.repository + branch = "main" +} + +resource "github_repository_ruleset" "branch_rules" { + for_each = var.env == "prod" ? local.branch_rulesets : {} + + name = each.key + repository = local.github.repository + target = "branch" + enforcement = "active" + + conditions { + ref_name { + include = [each.value.ref_name] + exclude = [] + } + } + + dynamic "bypass_actors" { + for_each = each.value.bypass_actors == true ? toset(local.bypass_branch_rules_teams) : [] + content { + actor_id = lookup(local.team_name_to_id, bypass_actors.value) + actor_type = "Team" + bypass_mode = "always" + } + } + + rules { + creation = false + update = false + deletion = true + required_signatures = false + required_linear_history = each.value.required_linear_history + + pull_request { + dismiss_stale_reviews_on_push = false + require_last_push_approval = false + required_review_thread_resolution = false + require_code_owner_review = each.value.require_code_owner_review + required_approving_review_count = each.value.required_approving_review_count + } + } +} diff --git a/.github/terraform/03_github_environment.tf b/.github/terraform/03_github_environment.tf new file mode 100644 index 0000000..23f5f1b --- /dev/null +++ b/.github/terraform/03_github_environment.tf @@ -0,0 +1,48 @@ +################################# +# Repository Environment # +################################# +resource "github_repository_environment" "github_repository_environment" { + environment = var.env + repository = local.github.repository + # filter teams reviewers from github_organization_teams + # if reviewers_teams is null no reviewers will be configured for environment + dynamic "reviewers" { + for_each = (var.github_repository_environment.reviewers_teams == null || var.env_short != "p" ? [] : [1]) + content { + teams = matchkeys( + data.github_organization_teams.all.teams.*.id, + data.github_organization_teams.all.teams.*.name, + var.github_repository_environment.reviewers_teams + ) + } + } + deployment_branch_policy { + protected_branches = var.github_repository_environment.protected_branches + custom_branch_policies = var.github_repository_environment.custom_branch_policies + } +} + +################################# +# Environment Deployment Policy # +################################# + +resource "github_repository_environment_deployment_policy" "this" { + repository = local.github.repository + environment = var.env + branch_pattern = local.map_repo[var.env] + + depends_on = [ + github_repository_environment.github_repository_environment + ] +} + +############################# +# Secrets of the Repository # +############################# +resource "github_actions_secret" "repo_secrets" { + for_each = var.env_short == "p" ? local.repo_secrets : {} + + repository = local.github.repository + secret_name = each.key + plaintext_value = each.value +} diff --git a/.github/terraform/99_locals.tf b/.github/terraform/99_locals.tf new file mode 100644 index 0000000..b2d16df --- /dev/null +++ b/.github/terraform/99_locals.tf @@ -0,0 +1,35 @@ +locals { + # Repo + github = { + org = "pagopa" + repository = "p4pa-analytics-deploy-aks" + } + domain = "payhub" + + repo_secrets = var.env_short == "p" ? { + } : {} + + map_repo = { + "dev" : "*", + "uat" : "uat" + "prod" : "main" + } + + bypass_branch_rules_teams = ["p4pa-admins", "payments-cloud-admin"] + + # this is use to lookup the id for each team + team_name_to_id = { + for team in data.github_organization_teams.all.teams : + team.name => team.id + } + + branch_rulesets = { + main = { + ref_name = "refs/heads/main" + bypass_actors = true + required_linear_history = true + require_code_owner_review = true + required_approving_review_count = 1 + }, + } +} diff --git a/.github/terraform/99_main.tf b/.github/terraform/99_main.tf new file mode 100644 index 0000000..737db66 --- /dev/null +++ b/.github/terraform/99_main.tf @@ -0,0 +1,24 @@ +terraform { + required_version = ">= 1.9" + + required_providers { + azurerm = { + source = "hashicorp/azurerm" + version = "~> 3.108" + } + github = { + source = "integrations/github" + version = "~> 6.3" + } + } + + backend "azurerm" {} +} + +provider "azurerm" { + features {} +} + +provider "github" { + owner = "pagopa" +} diff --git a/.github/terraform/99_variables.tf b/.github/terraform/99_variables.tf new file mode 100644 index 0000000..c86a82c --- /dev/null +++ b/.github/terraform/99_variables.tf @@ -0,0 +1,43 @@ +variable "prefix" { + type = string + validation { + condition = ( + length(var.prefix) <= 6 + ) + error_message = "Max length is 6 chars." + } +} + +variable "env" { + type = string + description = "Environment" +} + +variable "env_short" { + type = string + validation { + condition = ( + length(var.env_short) <= 1 + ) + error_message = "Max length is 1 chars." + } +} + +variable "location_short" { + type = string + description = "Location short like eg: neu, weu.." +} + +variable "github_repository_environment" { + type = object({ + protected_branches = bool + custom_branch_policies = bool + reviewers_teams = list(string) + }) + description = "GitHub Continuous Integration roles" + default = { + protected_branches = false + custom_branch_policies = true + reviewers_teams = ["p4pa-admins"] + } +} diff --git a/.github/terraform/env/dev/backend.ini b/.github/terraform/env/dev/backend.ini new file mode 100644 index 0000000..23b7e40 --- /dev/null +++ b/.github/terraform/env/dev/backend.ini @@ -0,0 +1 @@ +subscription=DEV-p4pa diff --git a/.github/terraform/env/dev/backend.tfvars b/.github/terraform/env/dev/backend.tfvars new file mode 100644 index 0000000..ab7af78 --- /dev/null +++ b/.github/terraform/env/dev/backend.tfvars @@ -0,0 +1,4 @@ +resource_group_name = "terraform-state-rg" +storage_account_name = "tfappdevp4pa" +container_name = "terraform-state" +key = "p4pa-analytics-deploy-aks-dev.tfstate" diff --git a/.github/terraform/env/dev/terraform.tfvars b/.github/terraform/env/dev/terraform.tfvars new file mode 100644 index 0000000..a72bf10 --- /dev/null +++ b/.github/terraform/env/dev/terraform.tfvars @@ -0,0 +1,12 @@ +prefix = "p4pa" +env = "dev" +env_short = "d" +location_short = "itn" + +tags = { + CreatedBy = "Terraform" + Environment = "DEV" + Owner = "P4PA" + Source = "https://github.com/pagopa/p4pa-analytics-deploy-aks" + CostCenter = "TS310 - PAGAMENTI & SERVIZI" +} diff --git a/.github/terraform/env/prod/backend.ini b/.github/terraform/env/prod/backend.ini new file mode 100644 index 0000000..ca90612 --- /dev/null +++ b/.github/terraform/env/prod/backend.ini @@ -0,0 +1 @@ +subscription=PROD-p4pa diff --git a/.github/terraform/env/prod/backend.tfvars b/.github/terraform/env/prod/backend.tfvars new file mode 100644 index 0000000..0a301b9 --- /dev/null +++ b/.github/terraform/env/prod/backend.tfvars @@ -0,0 +1,4 @@ +resource_group_name = "terraform-state-rg" +storage_account_name = "tfappprodp4pa" +container_name = "terraform-state" +key = "p4pa-analytics-deploy-aks-prod.tfstate" diff --git a/.github/terraform/env/prod/terraform.tfvars b/.github/terraform/env/prod/terraform.tfvars new file mode 100644 index 0000000..701d0a3 --- /dev/null +++ b/.github/terraform/env/prod/terraform.tfvars @@ -0,0 +1,12 @@ +prefix = "p4pa" +env = "prod" +env_short = "p" +location_short = "itn" + +tags = { + CreatedBy = "Terraform" + Environment = "PROD" + Owner = "P4PA" + Source = "https://github.com/pagopa/p4pa-analytics-deploy-aks" + CostCenter = "TS310 - PAGAMENTI & SERVIZI" +} diff --git a/.github/terraform/env/uat/backend.ini b/.github/terraform/env/uat/backend.ini new file mode 100644 index 0000000..c09a33c --- /dev/null +++ b/.github/terraform/env/uat/backend.ini @@ -0,0 +1 @@ +subscription=UAT-p4pa diff --git a/.github/terraform/env/uat/backend.tfvars b/.github/terraform/env/uat/backend.tfvars new file mode 100644 index 0000000..041b867 --- /dev/null +++ b/.github/terraform/env/uat/backend.tfvars @@ -0,0 +1,4 @@ +resource_group_name = "terraform-state-rg" +storage_account_name = "tfappuatp4pa" +container_name = "terraform-state" +key = "p4pa-analytics-deploy-aks-uat.tfstate" diff --git a/.github/terraform/env/uat/terraform.tfvars b/.github/terraform/env/uat/terraform.tfvars new file mode 100644 index 0000000..b1811df --- /dev/null +++ b/.github/terraform/env/uat/terraform.tfvars @@ -0,0 +1,12 @@ +prefix = "p4pa" +env = "uat" +env_short = "u" +location_short = "itn" + +tags = { + CreatedBy = "Terraform" + Environment = "UAT" + Owner = "P4PA" + Source = "https://github.com/pagopa/p4pa-analytics-deploy-aks" + CostCenter = "TS310 - PAGAMENTI & SERVIZI" +} diff --git a/.github/terraform/terraform.sh b/.github/terraform/terraform.sh new file mode 100755 index 0000000..047a751 --- /dev/null +++ b/.github/terraform/terraform.sh @@ -0,0 +1,324 @@ +#!/bin/bash +############################################################ +# Terraform script for managing infrastructure on Azure +# Fingerprint: d2hhdHlvdXdhbnQ/Cg== +############################################################ +# Global variables +# Version format x.y accepted +vers="1.11" +script_name=$(basename "$0") +git_repo="https://raw.githubusercontent.com/pagopa/eng-common-scripts/main/azure/${script_name}" +tmp_file="${script_name}.new" +# Check if the third parameter exists and is a file +if [ -n "$3" ] && [ -f "$3" ]; then + FILE_ACTION=true +else + FILE_ACTION=false +fi + +# Define functions +function clean_environment() { + rm -rf .terraform + rm tfplan 2>/dev/null + echo "cleaned!" +} + +function download_tool() { + #default value + cpu_type="intel" + os_type=$(uname) + + # only on MacOS + if [ "$os_type" == "Darwin" ]; then + cpu_brand=$(sysctl -n machdep.cpu.brand_string) + if grep -q -i "intel" <<< "$cpu_brand"; then + cpu_type="intel" + else + cpu_type="arm" + fi + fi + + echo $cpu_type + tool=$1 + git_repo="https://raw.githubusercontent.com/pagopa/eng-common-scripts/main/golang/${tool}_${cpu_type}" + if ! command -v $tool &> /dev/null; then + if ! curl -sL "$git_repo" -o "$tool"; then + echo "Error downloading ${tool}" + return 1 + else + chmod +x $tool + echo "${tool} downloaded! Please note this tool WON'T be copied in your **/bin folder for safety reasons. +You need to do it yourself!" + read -p "Press enter to continue" + + + fi + fi +} + +function extract_resources() { + TF_FILE=$1 + ENV=$2 + TARGETS="" + + # Check if the file exists + if [ ! -f "$TF_FILE" ]; then + echo "File $TF_FILE does not exist." + exit 1 + fi + + # Check if the directory exists + if [ ! -d "./env/$ENV" ]; then + echo "Directory ./env/$ENV does not exist." + exit 1 + fi + + TMP_FILE=$(mktemp) + grep -E '^resource|^module' $TF_FILE > $TMP_FILE + + while read -r line ; do + TYPE=$(echo $line | cut -d '"' -f 1 | tr -d ' ') + if [ "$TYPE" == "module" ]; then + NAME=$(echo $line | cut -d '"' -f 2) + TARGETS+=" -target=\"$TYPE.$NAME\"" + else + NAME1=$(echo $line | cut -d '"' -f 2) + NAME2=$(echo $line | cut -d '"' -f 4) + TARGETS+=" -target=\"$NAME1.$NAME2\"" + fi + done < $TMP_FILE + + rm $TMP_FILE + + echo "./terraform.sh $action $ENV $TARGETS" +} + +function help_usage() { + echo "terraform.sh Version ${vers}" + echo + echo "Usage: ./script.sh [ACTION] [ENV] [OTHER OPTIONS]" + echo "es. ACTION: init, apply, plan, etc." + echo "es. ENV: dev, uat, prod, etc." + echo + echo "Available actions:" + echo " clean Remove .terraform* folders and tfplan files" + echo " help This help" + echo " list List every environment available" + echo " update Update this script if possible" + echo " summ Generate summary of Terraform plan" + echo " tflist Generate an improved output of terraform state list" + echo " tlock Generate or update the dependency lock file" + echo " * any terraform option" +} + +function init_terraform() { + if [ -n "$env" ]; then + terraform init -reconfigure -backend-config="./env/$env/backend.tfvars" + else + echo "ERROR: no env configured!" + exit 1 + fi +} + +function list_env() { + # Check if env directory exists + if [ ! -d "./env" ]; then + echo "No environment directory found" + exit 1 + fi + + # List subdirectories under env directory + env_list=$(ls -d ./env/*/ 2>/dev/null) + + # Check if there are any subdirectories + if [ -z "$env_list" ]; then + echo "No environments found" + exit 1 + fi + + # Print the list of environments + echo "Available environments:" + for env in $env_list; do + env_name=$(echo "$env" | sed 's#./env/##;s#/##') + echo "- $env_name" + done +} + +function other_actions() { + if [ -n "$env" ] && [ -n "$action" ]; then + terraform "$action" -var-file="./env/$env/terraform.tfvars" -compact-warnings $other + else + echo "ERROR: no env or action configured!" + exit 1 + fi +} + +function state_output_taint_actions() { + if [ "$action" == "tflist" ]; then + # If 'tflist' is not installed globally and there is no 'tflist' file in the current directory, + # attempt to download the 'tflist' tool + if ! command -v tflist &> /dev/null && [ ! -f "tflist" ]; then + download_tool "tflist" + if [ $? -ne 0 ]; then + echo "Error: Failed to download tflist!!" + exit 1 + else + echo "tflist downloaded!" + fi + fi + if command -v tflist &> /dev/null; then + terraform state list | tflist + else + terraform state list | ./tflist + fi + else + terraform $action $other + fi +} + + +function parse_tfplan_option() { + # Create an array to contain arguments that do not start with '-tfplan=' + local other_args=() + + # Loop over all arguments + for arg in "$@"; do + # If the argument starts with '-tfplan=', extract the file name + if [[ "$arg" =~ ^-tfplan= ]]; then + echo "${arg#*=}" + else + # If the argument does not start with '-tfplan=', add it to the other_args array + other_args+=("$arg") + fi + done + + # Print all arguments in other_args separated by spaces + echo "${other_args[@]}" +} + +function tfsummary() { + local plan_file + plan_file=$(parse_tfplan_option "$@") + if [ -z "$plan_file" ]; then + plan_file="tfplan" + fi + action="plan" + other="-out=${plan_file}" + other_actions + if [ -n "$(command -v tf-summarize)" ]; then + tf-summarize -tree "${plan_file}" + else + echo "tf-summarize is not installed" + fi + if [ "$plan_file" == "tfplan" ]; then + rm $plan_file + fi +} + +function update_script() { + # Check if the repository was cloned successfully + if ! curl -sL "$git_repo" -o "$tmp_file"; then + echo "Error cloning the repository" + rm "$tmp_file" 2>/dev/null + return 1 + fi + + # Check if a newer version exists + remote_vers=$(sed -n '8s/vers="\(.*\)"/\1/p' "$tmp_file") + if [ "$(printf '%s\n' "$vers" "$remote_vers" | sort -V | tail -n 1)" == "$vers" ]; then + echo "The local script version is equal to or newer than the remote version." + rm "$tmp_file" 2>/dev/null + return 0 + fi + + # Check the fingerprint + local_fingerprint=$(sed -n '4p' "$0") + remote_fingerprint=$(sed -n '4p' "$tmp_file") + + if [ "$local_fingerprint" != "$remote_fingerprint" ]; then + echo "The local and remote file fingerprints do not match." + rm "$tmp_file" 2>/dev/null + return 0 + fi + + # Show the current and available versions to the user + echo "Current script version: $vers" + echo "Available script version: $remote_vers" + + # Ask the user if they want to update the script + read -rp "Do you want to update the script to version $remote_vers? (y/n): " answer + + if [ "$answer" == "y" ] || [ "$answer" == "Y" ]; then + # Replace the local script with the updated version + cp "$tmp_file" "$script_name" + chmod +x "$script_name" + rm "$tmp_file" 2>/dev/null + + echo "Script successfully updated to version $remote_vers" + else + echo "Update canceled by the user" + fi + + rm "$tmp_file" 2>/dev/null +} + +# Check arguments number +if [ "$#" -lt 1 ]; then + help_usage + exit 0 +fi + +# Parse arguments +action=$1 +env=$2 +filetf=$3 +shift 2 +other=$@ + +if [ -n "$env" ]; then + # shellcheck source=/dev/null + source "./env/$env/backend.ini" + if [ -z "$(command -v az)" ]; then + echo "az not found, cannot proceed" + exit 1 + fi + az account set -s "${subscription}" +fi + +# Call appropriate function based on action +case $action in + clean) + clean_environment + ;; + ?|help|-h) + help_usage + ;; + init) + init_terraform "$other" + ;; + list) + list_env + ;; + output|state|taint|tflist) + init_terraform + state_output_taint_actions $other + ;; + summ) + init_terraform + tfsummary "$other" + ;; + tlock) + terraform providers lock -platform=windows_amd64 -platform=darwin_amd64 -platform=darwin_arm64 -platform=linux_amd64 + ;; + update) + update_script + ;; + *) + if [ "$FILE_ACTION" = true ]; then + extract_resources "$filetf" "$env" + else + init_terraform + other_actions "$other" + fi + ;; +esac diff --git a/.github/workflows/helm-test.yml b/.github/workflows/helm-test.yml new file mode 100644 index 0000000..6aefe30 --- /dev/null +++ b/.github/workflows/helm-test.yml @@ -0,0 +1,50 @@ +name: Helm Chart Testing + +on: + pull_request: + paths: + - 'helm/**' + - 'ct.yaml' + push: + branches: + - main + paths: + - 'helm/**' + - 'ct.yaml' + +jobs: + helm-lint-test: + runs-on: ubuntu-latest + steps: + - name: Checkout + # v4.1.1 - https://github.com/actions/checkout/releases/tag/v4.1.1 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 + with: + fetch-depth: 0 + + - name: Set up Helm + # v4.2.0 - https://github.com/Azure/setup-helm/releases/tag/v4.2.0 + uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814 + with: + version: 'latest' + + - name: Add helm repositories + run: | + helm repo add pagopa-microservice https://pagopa.github.io/aks-microservice-chart-blueprint + helm repo update + + - name: Set up chart-testing + # v2.6.1 - https://github.com/helm/chart-testing-action/releases/tag/v2.6.0 + uses: helm/chart-testing-action@b43128a8b25298e1e7b043b78ea6613844e079b1 + + - name: List changed charts + id: list-changed + run: | + changed=$(ct list-changed --config ct.yaml) + if [[ -n "$changed" ]]; then + echo "changed=true" >> $GITHUB_OUTPUT + echo "Changed charts: $changed" + fi + + - name: Run chart-testing (lint) + run: ct lint --config ct.yaml --all --lint-conf ct.yaml diff --git a/.github/workflows/pr-title.yml b/.github/workflows/pr-title.yml new file mode 100644 index 0000000..215b172 --- /dev/null +++ b/.github/workflows/pr-title.yml @@ -0,0 +1,56 @@ +name: Validate PR title + +on: + pull_request_target: + types: + - opened + - edited + - synchronize + +jobs: + main: + name: Validate PR title + runs-on: ubuntu-22.04 + steps: + # Please look up the latest version from + # https://github.com/amannn/action-semantic-pull-request/releases + # from https://github.com/amannn/action-semantic-pull-request/commits/main + - uses: amannn/action-semantic-pull-request@e9fabac35e210fea40ca5b14c0da95a099eff26f # v5.4.0 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + # Configure which types are allowed. + # Default: https://github.com/commitizen/conventional-commit-types + types: | + fix + feat + docs + chore + breaking + # Configure that a scope must always be provided. + requireScope: false + # Configure additional validation for the subject based on a regex. + # This example ensures the subject starts with an uppercase character. + subjectPattern: '.+' + # If `subjectPattern` is configured, you can use this property to override + # the default error message that is shown when the pattern doesn't match. + # The variables `subject` and `title` can be used within the message. + subjectPatternError: | + The subject "{subject}" found in the pull request title "{title}" + didn't match the configured pattern. Please ensure that the subject + starts with an uppercase character. Ex. (chore: [P4PU-000] ) + # For work-in-progress PRs you can typically use draft pull requests + # from Github. However, private repositories on the free plan don't have + # this option and therefore this action allows you to opt-in to using the + # special "[WIP]" prefix to indicate this state. This will avoid the + # validation of the PR title and the pull request checks remain pending. + # Note that a second check will be reported if this is enabled. + wip: true + # When using "Squash and merge" on a PR with only one commit, GitHub + # will suggest using that commit message instead of the PR title for the + # merge commit, and it's easy to commit this by mistake. Enable this option + # to also validate the commit message for one commit PRs. + validateSingleCommit: false + # Related to `validateSingleCommit` you can opt-in to validate that the PR + # title matches a single commit to avoid confusion. + validateSingleCommitMatchesPrTitle: false diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000..124f49f --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,24 @@ +name: Release + +on: + # Trigger the workflow on push or pull request, + # but only for the main branch + push: + branches: + - main + paths-ignore: + - "CODEOWNERS" + - "**.md" + - ".**" + +jobs: + release: + name: Release + runs-on: ubuntu-latest + steps: + - name: Release action + id: release + # https://github.com/pagopa/eng-github-actions-iac-template/releases/tag/v1.20.0 + uses: pagopa/eng-github-actions-iac-template/global/release-action@5c7e5f690ad0f07f3bd945bdebf2a6c7a575b33f + with: + github_token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..e59560f --- /dev/null +++ b/.gitignore @@ -0,0 +1,5 @@ +.terraform +.idea +*.iml +**/charts +**/Chart.lock diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..54067f3 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,61 @@ +repos: + ## general + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v5.0.0 + hooks: + # Common errors + - id: end-of-file-fixer + exclude_types: [sql] + exclude: mypivot4-batch + - id: trailing-whitespace + args: [--markdown-linebreak-ext=md] + exclude_types: [sql] + exclude: mypivot4-batch + - id: check-yaml + exclude: mypivot4-batch + - id: check-executables-have-shebangs + exclude: mypivot4-batch + # Cross platform + - id: check-case-conflict + exclude: mypivot4-batch + - id: mixed-line-ending + args: [--fix=lf] + exclude_types: [sql] + exclude: mypivot4-batch + # Security + - id: detect-aws-credentials + args: ['--allow-missing-credentials'] + exclude: mypivot4-batch + - id: detect-private-key + ## terraform + - repo: https://github.com/antonbabenko/pre-commit-terraform + rev: v1.96.1 + hooks: + - id: terraform_fmt + - id: terraform_docs + args: + - --hook-config=--path-to-file=README.md # Valid UNIX path. I.e. ../TFDOC.md or docs/README.md etc. + - --hook-config=--add-to-existing-file=true # Boolean. true or false + - --hook-config=--create-file-if-not-exist=true # Boolean. true or false + - --args=--hide providers + # - id: terraform_tfsec + - id: terraform_validate + args: + - --init-args=-lockfile=readonly + - --args=-json + - --args=-no-color + - --args=-compact-warnings + # - id: terraform_providers_lock + # args: + # - --args=-platform=windows_amd64 + # - --args=-platform=darwin_amd64 + # - --args=-platform=darwin_arm64 + # - --args=-platform=linux_amd64 + # - --args=-platform=linux_arm64 + - repo: local + hooks: + - id: helm ct-lint + name: Helm Chart Testing Lint + entry: bash -c "ct lint --config ct.yaml --all --lint-conf ct.yaml" + language: system + pass_filenames: false diff --git a/.releaserc.json b/.releaserc.json new file mode 100644 index 0000000..57339cb --- /dev/null +++ b/.releaserc.json @@ -0,0 +1,19 @@ +{ + "plugins": [ + [ + "@semantic-release/commit-analyzer", + { + "preset": "angular", + "releaseRules": [{ "type": "breaking", "release": "major" }] + } + ], + "@semantic-release/release-notes-generator", + [ + "@semantic-release/github", + { + "successComment": false, + "failComment": false + } + ] + ] +} diff --git a/.terraform-version b/.terraform-version new file mode 100644 index 0000000..7bc1c40 --- /dev/null +++ b/.terraform-version @@ -0,0 +1 @@ +1.9.6 diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 0000000..5a356f1 --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1,6 @@ +# .github/CODEOWNERS + +# groups +.github/ @pagopa/payments-cloud-admin @pagopa/p4pa-admins + +helm/prod @pagopa/payments-cloud-admin @pagopa/p4pa-admins diff --git a/README.md b/README.md index b2ca229..2b61031 100644 --- a/README.md +++ b/README.md @@ -1 +1,10 @@ # p4pa-analytics-deploy-aks + +Helm chart to deploy p4pa-analytics into AKS + +# How to add a new app + +These are the steps needed to add a new app: + +- insert a new folder inside the `helm//` folder (e.g. `helm/dev/top/p4pa-superset`) (for the category it is important to see the confluence page that explains how to choose) +- insert a new file with the same name as the app inside the `helm/_global` folder it does not matter if it is not used diff --git a/ct.yaml b/ct.yaml new file mode 100644 index 0000000..d314c11 --- /dev/null +++ b/ct.yaml @@ -0,0 +1,96 @@ +# Basic Chart Testing Configuration +remote: origin +target-branch: main +timeout: 600s + +# Chart directories to scan for changes recursively +chart-dirs: + - helm/_global + - helm/dev/ext + - helm/dev/mid + - helm/dev/top + - helm/uat/ext + - helm/uat/mid + - helm/uat/top + - helm/prod/ext + - helm/prod/mid + - helm/prod/top + +# Required Helm repositories for chart dependencies +chart-repos: + - pagopa-microservice=https://pagopa.github.io/aks-microservice-chart-blueprint + +# General validation settings +validate-maintainers: false +validate-chart-schema: true +validate-yaml: true +check-version-increment: true +debug: false + +# ===================================================== +# Lint and Validation Rules +# ===================================================== +lint: + yamllint: + config: | + extends: default + rules: + braces: + min-spaces-inside: 0 + max-spaces-inside: 0 + brackets: + min-spaces-inside: 0 + max-spaces-inside: 0 + colons: + max-spaces-before: 0 + max-spaces-after: 1 + commas: + max-spaces-before: 0 + min-spaces-after: 1 + comments: disable + comments-indentation: disable + document-end: disable + document-start: disable + empty-lines: + max: 2 + max-start: 0 + max-end: 0 + hyphens: + max-spaces-after: 1 + indentation: + spaces: consistent + indent-sequences: true + check-multi-line-strings: false + key-duplicates: enable + line-length: disable + new-line-at-end-of-file: enable + new-lines: enable + trailing-spaces: enable + truthy: disable + +# Regole di validazione per Chart.yaml e values.yaml possono essere gestite +# attraverso la configurazione di schema validation di Helm +schema: + values: + - name: image.tag + required: true + type: string + - name: replicaCount + required: true + type: integer + + chart: + - name: version + required: true + type: string + - name: appVersion + required: true + type: string + - name: dependencies + required: true + type: array + + template: + - name: metadata.labels + required: true + type: object diff --git a/force-release b/force-release new file mode 100644 index 0000000..1ca3108 --- /dev/null +++ b/force-release @@ -0,0 +1 @@ +0014 diff --git a/helm/_global/p4pa-superset.yaml b/helm/_global/p4pa-superset.yaml new file mode 100644 index 0000000..0145ee1 --- /dev/null +++ b/helm/_global/p4pa-superset.yaml @@ -0,0 +1,583 @@ +superset: + # -- User ID directive. This user must have enough permissions to run the bootstrap script (for PROD env we should build a new image which would install the required packages) + runAsUser: 0 # 65534 cannot use because we need to install some packages first + + # -- Specify rather or not helm should create the secret described in `secret-env.yaml` template + secretEnv: + # -- Change to false in order to support externally created secret (Binami "Sealed Secrets" for Kubernetes or External Secrets Operator) + # note: when externally creating the secret, the chart still expects to pull values from a secret with the name of the release defaults to `release-name-superset-env` - full logic located in _helpers.tpl file: `define "superset.fullname"` + create: false + + # -- Specify service account name to be used + serviceAccountName: analytics-workload-identity + serviceAccount: + create: false + + # -- Install additional packages and do any other bootstrap configuration in this script + # For production clusters it's recommended to build own image with this step done in CI + # @default -- see `values.yaml` + bootstrapScript: | + #!/bin/bash + apt update && apt install -y gcc libpq-dev python3-dev pkg-config + uv pip install psycopg2-binary + if [ ! -f ~/bootstrap ]; then echo "Running Superset with uid {{ .Values.runAsUser }}" > ~/bootstrap; fi + + # -- Extra environment variables that will be passed into pods + extraEnv: + SUPERSET_APP_ROOT: "/analytics" # if you change this, apply the same on probe configurations + # Different gunicorn settings, refer to the gunicorn documentation + # https://docs.gunicorn.org/en/stable/settings.html# + # These variables are used as Flags at the gunicorn startup + # https://github.com/apache/superset/blob/master/docker/run-server.sh#L22 + # Extend timeout to allow long running queries. + # GUNICORN_TIMEOUT: 300 + # Increase the gunicorn worker amount, can improve performance drastically + # See: https://docs.gunicorn.org/en/stable/design.html#how-many-workers + # SERVER_WORKER_AMOUNT: 4 + # WORKER_MAX_REQUESTS: 0 + # WORKER_MAX_REQUESTS_JITTER: 0 + # SERVER_THREADS_AMOUNT: 20 + # GUNICORN_KEEPALIVE: 2 + # SERVER_LIMIT_REQUEST_LINE: 0 + # SERVER_LIMIT_REQUEST_FIELD_SIZE: 0 + + # OAUTH_HOME_DOMAIN: .. + # # If a whitelist is not set, any address that can use your OAuth2 endpoint will be able to login. + # # this includes any random Gmail address if your OAuth2 Web App is set to External. + # OAUTH_WHITELIST_REGEX: ... + + # -- Extra environment variables in RAW format that will be passed into pods + extraEnvRaw: [] + # Load DB password from other secret (e.g. for zalando operator) + # - name: DB_PASS + # valueFrom: + # secretKeyRef: + # name: superset.superset-postgres.credentials.postgresql.acid.zalan.do + # key: password + + + # -- Extra files to be mounted as ConfigMap on the path specified in `extraConfigMountPath` + extraConfigs: {} + # import_datasources.yaml: | + # databases: + # - allow_file_upload: true + # allow_ctas: true + # allow_cvas: true + # database_name: example-db + # extra: "{\r\n \"metadata_params\": {},\r\n \"engine_params\": {},\r\n \"\ + # metadata_cache_timeout\": {},\r\n \"schemas_allowed_for_file_upload\": []\r\n\ + # }" + # sqlalchemy_uri: example://example-db.local + # tables: [] + + extraVolumes: [] + # - name: customConfig + # configMap: + # name: '{{ template "superset.fullname" . }}-custom-config' + # - name: additionalSecret + # secret: + # secretName: my-secret + # defaultMode: 0600 + + extraVolumeMounts: [] + # - name: customConfig + # mountPath: /mnt/config + # readOnly: true + # - name: additionalSecret: + # mountPath: /mnt/secret + + # -- A dictionary of overrides to append at the end of superset_config.py - the name does not matter + # WARNING: the order is not guaranteed + # Files can be passed as helm --set-file configOverrides.my-override=my-file.py + configOverrides: + # extend_timeout: | + # # Extend timeout to allow long running queries. + # SUPERSET_WEBSERVER_TIMEOUT = ... + # enable_oauth: | + # from flask_appbuilder.security.manager import (AUTH_DB, AUTH_OAUTH) + # AUTH_TYPE = AUTH_OAUTH + # OAUTH_PROVIDERS = [ + # { + # "name": "google", + # "whitelist": [ os.getenv("OAUTH_WHITELIST_REGEX", "") ], + # "icon": "fa-google", + # "token_key": "access_token", + # "remote_app": { + # "client_id": os.environ.get("GOOGLE_KEY"), + # "client_secret": os.environ.get("GOOGLE_SECRET"), + # "api_base_url": "https://www.googleapis.com/oauth2/v2/", + # "client_kwargs": {"scope": "email profile"}, + # "request_token_url": None, + # "access_token_url": "https://accounts.google.com/o/oauth2/token", + # "authorize_url": "https://accounts.google.com/o/oauth2/auth", + # "authorize_params": {"hd": os.getenv("OAUTH_HOME_DOMAIN", "")} + # } + # } + # ] + # # Map Authlib roles to superset roles + # AUTH_ROLE_ADMIN = 'Admin' + # AUTH_ROLE_PUBLIC = 'Public' + # # Will allow user self registration, allowing to create Flask users from Authorized User + # AUTH_USER_REGISTRATION = True + # # The default user self registration role + # AUTH_USER_REGISTRATION_ROLE = "Admin" + + # -- Same as above but the values are files + configOverridesFiles: {} + # extend_timeout: extend_timeout.py + # enable_oauth: enable_oauth.py + + configMountPath: "/app/pythonpath" + + extraConfigMountPath: "/app/configs" + + image: + repository: apachesuperset.docker.scarf.sh/apache/superset + tag: 6.0.0rc2 # ~ using RC version in order to be able to use a context path different from / (SUPERSET_APP_ROOT env var) + pullPolicy: IfNotPresent + + imagePullSecrets: [] + + initImage: + repository: apache/superset + tag: dockerize + pullPolicy: IfNotPresent + + ingress: + enabled: true + ingressClassName: nginx + annotations: + # kubernetes.io/tls-acme: "true" + nginx.ingress.kubernetes.io/rewrite-target: /analytics$1 + nginx.ingress.kubernetes.io/use-regex: 'true' + ## Extend timeout to allow long running queries. + nginx.ingress.kubernetes.io/proxy-connect-timeout: "300" + nginx.ingress.kubernetes.io/proxy-read-timeout: "300" + nginx.ingress.kubernetes.io/proxy-send-timeout: "300" + path: /analytics(/?.*) + pathType: ImplementationSpecific + extraHostsRaw: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + + # Superset node configuration + supersetNode: + replicas: + enabled: true + + # -- Startup command + # @default -- See `values.yaml` + command: + - "/bin/sh" + - "-c" + - ". {{ .Values.configMountPath }}/superset_bootstrap.sh; /usr/bin/run-server.sh" + connections: + # -- Change in case of bringing your own redis and then also set redis.enabled:false + redis_host: "READ-FROM-ENV" + redis_port: "READ-FROM-ENV" + redis_user: "READ-FROM-ENV" + redis_password: "READ-FROM-ENV" + redis_cache_db: "1" + redis_celery_db: "0" + # Or SSL port is usually 6380 + # Update following for using Redis with SSL + redis_ssl: + enabled: true + # You need to change below configuration incase bringing own PostgresSQL instance and also set postgresql.enabled:false + # -- Database type for Superset metadata (Supported types: "postgresql", "mysql") + db_type: "postgresql" + db_host: "READ-FROM-ENV" + db_port: "READ-FROM-ENV" + db_user: "READ-FROM-ENV" + db_pass: "READ-FROM-ENV" + db_name: "READ-FROM-ENV" + + env: {} + # -- If true, forces deployment to reload on each upgrade + forceReload: false + # -- Init containers + # @default -- a container waiting for postgres + initContainers: + - name: wait-for-postgres + image: "{{ .Values.initImage.repository }}:{{ .Values.initImage.tag }}" + imagePullPolicy: "{{ .Values.initImage.pullPolicy }}" + envFrom: + - secretRef: + name: "{{ tpl .Values.envFromSecret . }}" + command: + - /bin/sh + - -c + - dockerize -wait "tcp://$DB_HOST:$DB_PORT" -timeout 120s + + # -- Launch additional containers into supersetNode pod + extraContainers: [] + # -- Annotations to be added to supersetNode deployment + deploymentAnnotations: {} + # -- Labels to be added to supersetNode deployment + deploymentLabels: {} + # -- Affinity to be added to supersetNode deployment + affinity: {} + # -- TopologySpreadConstrains to be added to supersetNode deployments + topologySpreadConstraints: [] + # -- Annotations to be added to supersetNode pods + podAnnotations: {} + # -- Labels to be added to supersetNode pods + podLabels: {} + startupProbe: + httpGet: + path: /analytics/health + port: http + initialDelaySeconds: 15 + timeoutSeconds: 1 + failureThreshold: 60 + periodSeconds: 5 + successThreshold: 1 + livenessProbe: + httpGet: + path: /analytics/health + port: http + initialDelaySeconds: 15 + timeoutSeconds: 1 + failureThreshold: 3 + periodSeconds: 15 + successThreshold: 1 + readinessProbe: + httpGet: + path: /analytics/health + port: http + initialDelaySeconds: 15 + timeoutSeconds: 1 + failureThreshold: 3 + periodSeconds: 15 + successThreshold: 1 + podSecurityContext: + seccompProfile: + type: RuntimeDefault + containerSecurityContext: {} # cannot limit user privileges due to bootstrap needs (see comment at row 2) + # allowPrivilegeEscalation: false + # runAsNonRoot: true + # runAsUser: 65534 + # runAsGroup: 65534 + + strategy: {} + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 25% + # maxUnavailable: 25% + + # Superset Celery worker configuration + supersetWorker: + replicas: + enabled: true + # -- Worker startup command + # @default -- a `celery worker` command + command: + - "/bin/sh" + - "-c" + - ". {{ .Values.configMountPath }}/superset_bootstrap.sh; celery --app=superset.tasks.celery_app:app worker" + # -- If true, forces deployment to reload on each upgrade + forceReload: false + # -- Init container + # @default -- a container waiting for postgres and redis + initContainers: + - name: wait-for-postgres-redis + image: "{{ .Values.initImage.repository }}:{{ .Values.initImage.tag }}" + imagePullPolicy: "{{ .Values.initImage.pullPolicy }}" + envFrom: + - secretRef: + name: "{{ tpl .Values.envFromSecret . }}" + command: + - /bin/sh + - -c + - dockerize -wait "tcp://$DB_HOST:$DB_PORT" -wait "tcp://$REDIS_HOST:$REDIS_PORT" -timeout 120s + # -- Launch additional containers into supersetWorker pod + extraContainers: [] + # -- Annotations to be added to supersetWorker deployment + deploymentAnnotations: {} + # -- Labels to be added to supersetWorker deployment + deploymentLabels: {} + # -- Affinity to be added to supersetWorker deployment + affinity: {} + # -- TopologySpreadConstrains to be added to supersetWorker deployments + topologySpreadConstraints: [] + # -- Annotations to be added to supersetWorker pods + podAnnotations: {} + # -- Labels to be added to supersetWorker pods + podLabels: {} + podSecurityContext: + seccompProfile: + type: RuntimeDefault + containerSecurityContext: {} # cannot limit user privileges due to bootstrap needs (see comment at row 2) + # allowPrivilegeEscalation: false + # runAsNonRoot: true + # runAsUser: 65534 + # runAsGroup: 65534 + strategy: {} + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 25% + # maxUnavailable: 25% + + # Superset beat configuration (to trigger scheduled jobs like reports) + supersetCeleryBeat: + # -- This is only required if you intend to use alerts and reports + enabled: false + # -- Command + # @default -- a `celery beat` command + command: + - "/bin/sh" + - "-c" + - ". {{ .Values.configMountPath }}/superset_bootstrap.sh; celery --app=superset.tasks.celery_app:app beat --pidfile /tmp/celerybeat.pid --schedule /tmp/celerybeat-schedule" + # -- If true, forces deployment to reload on each upgrade + forceReload: false + # -- List of init containers + # @default -- a container waiting for postgres + initContainers: + - name: wait-for-postgres-redis + image: "{{ .Values.initImage.repository }}:{{ .Values.initImage.tag }}" + imagePullPolicy: "{{ .Values.initImage.pullPolicy }}" + envFrom: + - secretRef: + name: "{{ tpl .Values.envFromSecret . }}" + command: + - /bin/sh + - -c + - dockerize -wait "tcp://$DB_HOST:$DB_PORT" -wait "tcp://$REDIS_HOST:$REDIS_PORT" -timeout 120s + # -- Launch additional containers into supersetCeleryBeat pods + extraContainers: [] + # -- Annotations to be added to supersetCeleryBeat deployment + deploymentAnnotations: {} + # -- Affinity to be added to supersetCeleryBeat deployment + affinity: {} + # -- TopologySpreadConstrains to be added to supersetCeleryBeat deployments + topologySpreadConstraints: [] + # -- Annotations to be added to supersetCeleryBeat pods + podAnnotations: {} + # -- Labels to be added to supersetCeleryBeat pods + podLabels: {} + podSecurityContext: + seccompProfile: + type: RuntimeDefault + containerSecurityContext: {} # cannot limit user privileges due to bootstrap needs (see comment at row 2) + # allowPrivilegeEscalation: false + # runAsNonRoot: true + # runAsUser: 65534 + # runAsGroup: 65534 + + supersetCeleryFlower: + # -- Enables a Celery flower deployment (management UI to monitor celery jobs) + # WARNING: on superset 1.x, this requires a Superset image that has `flower<1.0.0` installed (which is NOT the case of the default images) + # flower>=1.0.0 requires Celery 5+ which Superset 1.5 does not support + enabled: false + # -- Command + # @default -- a `celery flower` command + command: + - "/bin/sh" + - "-c" + - "celery --app=superset.tasks.celery_app:app flower" + # -- List of init containers + # @default -- a container waiting for postgres and redis + initContainers: + - name: wait-for-postgres-redis + image: "{{ .Values.initImage.repository }}:{{ .Values.initImage.tag }}" + imagePullPolicy: "{{ .Values.initImage.pullPolicy }}" + envFrom: + - secretRef: + name: "{{ tpl .Values.envFromSecret . }}" + command: + - /bin/sh + - -c + - dockerize -wait "tcp://$DB_HOST:$DB_PORT" -wait "tcp://$REDIS_HOST:$REDIS_PORT" -timeout 120s + # -- Launch additional containers into supersetCeleryFlower pods + extraContainers: [] + # -- Annotations to be added to supersetCeleryFlower deployment + deploymentAnnotations: {} + # -- Affinity to be added to supersetCeleryFlower deployment + affinity: {} + # -- TopologySpreadConstrains to be added to supersetCeleryFlower deployments + topologySpreadConstraints: [] + # -- Annotations to be added to supersetCeleryFlower pods + podAnnotations: {} + # -- Labels to be added to supersetCeleryFlower pods + podLabels: {} + podSecurityContext: + seccompProfile: + type: RuntimeDefault + containerSecurityContext: {} # cannot limit user privileges due to bootstrap needs (see comment at row 2) + # allowPrivilegeEscalation: false + # runAsNonRoot: true + # runAsUser: 65534 + # runAsGroup: 65534 + + supersetWebsockets: + # -- This is only required if you intend to use `GLOBAL_ASYNC_QUERIES` in `ws` mode + # see https://github.com/apache/superset/blob/master/CONTRIBUTING.md#async-chart-queries + enabled: false + replicaCount: 1 + image: + # -- There is no official image (yet), this one is community-supported + repository: oneacrefund/superset-websocket + tag: latest + pullPolicy: IfNotPresent + # -- The config.json to pass to the server, see https://github.com/apache/superset/tree/master/superset-websocket + # Note that the configuration can also read from environment variables (which will have priority), see https://github.com/apache/superset/blob/master/superset-websocket/src/config.ts for a list of supported variables + # @default -- see `values.yaml` + config: + { + "port": 8080, + "logLevel": "debug", + "logToFile": false, + "logFilename": "app.log", + "statsd": { "host": "127.0.0.1", "port": 8125, "globalTags": [] }, + "redis": + { + "port": 6379, + "host": "127.0.0.1", + "password": "", + "db": 0, + "ssl": false, + }, + "redisStreamPrefix": "async-events-", + "jwtSecret": "CHANGE-ME", + "jwtCookieName": "async-token", + } + service: + type: ClusterIP + annotations: {} + loadBalancerIP: ~ + port: 8080 + nodePort: + # -- (int) + http: nil + command: [] + # -- Launch additional containers into supersetWebsockets pods + extraContainers: [] + deploymentAnnotations: {} + # -- Affinity to be added to supersetWebsockets deployment + affinity: {} + # -- TopologySpreadConstrains to be added to supersetWebsockets deployments + topologySpreadConstraints: [] + podAnnotations: {} + podLabels: {} + strategy: {} + podSecurityContext: + seccompProfile: + type: RuntimeDefault + containerSecurityContext: {} # cannot limit user privileges due to bootstrap needs (see comment at row 2) + # allowPrivilegeEscalation: false + # runAsNonRoot: true + # runAsUser: 65534 + # runAsGroup: 65534 + + init: + # Configure resources + # Warning: fab command consumes a lot of ram and can + # cause the process to be killed due to OOM if it exceeds limit + # Make sure you are giving a strong password for the admin user creation( else make sure you are changing after setup) + # Also change the admin email to your own custom email. + resources: {} + # limits: + # cpu: + # memory: + # requests: + # cpu: + # memory: + # -- Command + # @default -- a `superset_init.sh` command + command: + - "/bin/sh" + - "-c" + - ". {{ .Values.configMountPath }}/superset_bootstrap.sh; . {{ .Values.configMountPath }}/superset_init.sh" + enabled: true + loadExamples: false + createAdmin: true + adminUser: + firstname: Superset + lastname: Admin + # -- List of initContainers + # @default -- a container waiting for postgres + initContainers: + - name: wait-for-postgres + image: "{{ .Values.initImage.repository }}:{{ .Values.initImage.tag }}" + imagePullPolicy: "{{ .Values.initImage.pullPolicy }}" + envFrom: + - secretRef: + name: "{{ tpl .Values.envFromSecret . }}" + command: + - /bin/sh + - -c + - dockerize -wait "tcp://$DB_HOST:$DB_PORT" -timeout 120s + # -- A Superset init script + # @default -- a script to create admin user and initialize roles + initscript: |- + #!/bin/sh + set -eu + echo "Upgrading DB schema..." + superset db upgrade + echo "Initializing roles..." + superset init + {{ if .Values.init.createAdmin }} + echo "Creating admin user..." + superset fab create-admin \ + --username $SUPERSET_ADMIN_USER \ + --firstname {{ .Values.init.adminUser.firstname }} \ + --lastname {{ .Values.init.adminUser.lastname }} \ + --email $SUPERSET_ADMIN_EMAIL \ + --password $SUPERSET_ADMIN_PSW \ + || true + {{- end }} + {{ if .Values.init.loadExamples }} + echo "Loading examples..." + superset load_examples + {{- end }} + if [ -f "{{ .Values.extraConfigMountPath }}/import_datasources.yaml" ]; then + echo "Importing database connections.... " + superset import_datasources -p {{ .Values.extraConfigMountPath }}/import_datasources.yaml + fi + # -- Launch additional containers into init job pod + extraContainers: [] + ## Annotations to be added to init job pods + podAnnotations: {} + # Labels to be added to init job pods + podLabels: {} + podSecurityContext: + seccompProfile: + type: RuntimeDefault + containerSecurityContext: {} # cannot limit user privileges due to bootstrap needs (see comment at row 2) + # allowPrivilegeEscalation: false + # runAsNonRoot: true + # runAsUser: 65534 + # runAsGroup: 65534 + ## Tolerations to be added to init job pods + tolerations: [] + ## Affinity to be added to init job pods + affinity: {} + # -- TopologySpreadConstrains to be added to init job + topologySpreadConstraints: [] + # -- Set priorityClassName for init job pods + priorityClassName: ~ + + postgresql: + ## Set to false if bringing your own PostgreSQL. + enabled: false + + redis: + ## Set to false if bringing your own redis. + enabled: false + + nodeSelector: {} + + tolerations: + - key: dedicated + operator: Equal + value: p4pastats + effect: NoSchedule + + affinity: {} + + # -- TopologySpreadConstrains to be added to all deployments + topologySpreadConstraints: [] + + # -- Set priorityClassName for superset pods + priorityClassName: ~ \ No newline at end of file diff --git a/helm/dev/ext/placeholder b/helm/dev/ext/placeholder new file mode 100644 index 0000000..e69de29 diff --git a/helm/dev/force-dev b/helm/dev/force-dev new file mode 100644 index 0000000..415196e --- /dev/null +++ b/helm/dev/force-dev @@ -0,0 +1 @@ +118 diff --git a/helm/dev/mid/placeholder b/helm/dev/mid/placeholder new file mode 100644 index 0000000..e69de29 diff --git a/helm/dev/top/p4pa-superset/Chart.yaml b/helm/dev/top/p4pa-superset/Chart.yaml new file mode 100644 index 0000000..e1fa1f0 --- /dev/null +++ b/helm/dev/top/p4pa-superset/Chart.yaml @@ -0,0 +1,10 @@ +apiVersion: v2 +name: p4pa-superset +description: P4PA Superset +type: application +version: 1.0.0 +appVersion: 1.0.0 +dependencies: + - name: superset + version: 0.15.0 + repository: "http://apache.github.io/superset/" diff --git a/helm/dev/top/p4pa-superset/deploy.sh b/helm/dev/top/p4pa-superset/deploy.sh new file mode 100644 index 0000000..bede653 --- /dev/null +++ b/helm/dev/top/p4pa-superset/deploy.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +../../../scripts/deploy.sh values.yaml analytics p4pa-d-itn-dev-aks p4pa-superset diff --git a/helm/dev/top/p4pa-superset/force-deploy b/helm/dev/top/p4pa-superset/force-deploy new file mode 100644 index 0000000..6350475 --- /dev/null +++ b/helm/dev/top/p4pa-superset/force-deploy @@ -0,0 +1 @@ +0001 diff --git a/helm/dev/top/p4pa-superset/values.yaml b/helm/dev/top/p4pa-superset/values.yaml new file mode 100644 index 0000000..d924746 --- /dev/null +++ b/helm/dev/top/p4pa-superset/values.yaml @@ -0,0 +1,120 @@ +superset: + ingress: + hosts: + - "hub.internal.dev.p4pa.pagopa.it" + tls: [] + + resources: + # The limits below will apply to all Superset components. To set individual resource limitations refer to the pod specific values below. + # The pod specific values will overwrite anything that is set here. + limits: + cpu: 500m + memory: 1024Mi + requests: + cpu: 100m + memory: 128Mi + + # Superset node configuration + supersetNode: + replicas: + replicaCount: 1 + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + # -- Sets the [pod disruption budget](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) for supersetNode pods + podDisruptionBudget: + # -- Whether the pod disruption budget should be created + enabled: false + # -- If set, maxUnavailable must not be set - see https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget + minAvailable: 1 + # -- If set, minAvailable must not be set - see https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget + maxUnavailable: 1 + # -- Resource settings for the supersetNode pods - these settings overwrite might existing values from the global resources object defined above. + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + # Superset Celery worker configuration + supersetWorker: + replicas: + replicaCount: 1 + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + # -- Sets the [pod disruption budget](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) for supersetWorker pods + podDisruptionBudget: + # -- Whether the pod disruption budget should be created + enabled: false + # -- If set, maxUnavailable must not be set - see https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget + minAvailable: 1 + # -- If set, minAvailable must not be set - see https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget + maxUnavailable: 1 + # -- Resource settings for the supersetWorker pods - these settings overwrite might existing values from the global resources object defined above. + resources: + limits: + cpu: 500m + memory: 2048Mi + requests: + cpu: 100m + memory: 1024Mi + + # Superset beat configuration (to trigger scheduled jobs like reports) + supersetCeleryBeat: + # -- Sets the [pod disruption budget](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) for supersetCeleryBeat pods + podDisruptionBudget: + # -- Whether the pod disruption budget should be created + enabled: false + # -- If set, maxUnavailable must not be set - see https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget + minAvailable: 1 + # -- If set, minAvailable must not be set - see https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget + maxUnavailable: 1 + # -- Resource settings for the CeleryBeat pods - these settings overwrite might existing values from the global resources object defined above. + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + supersetCeleryFlower: + replicaCount: 1 + # -- Sets the [pod disruption budget](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) for supersetCeleryFlower pods + podDisruptionBudget: + # -- Whether the pod disruption budget should be created + enabled: false + # -- If set, maxUnavailable must not be set - see https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget + minAvailable: 1 + # -- If set, minAvailable must not be set - see https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget + maxUnavailable: 1 + # -- Resource settings for the CeleryBeat pods - these settings overwrite might existing values from the global resources object defined above. + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + + supersetWebsockets: + # -- Sets the [pod disruption budget](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) for supersetWebsockets pods + podDisruptionBudget: + # -- Whether the pod disruption budget should be created + enabled: false + # -- If set, maxUnavailable must not be set - see https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget + minAvailable: 1 + # -- If set, minAvailable must not be set - see https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget + maxUnavailable: 1 + resources: {} + diff --git a/helm/prod/ext/placeholder b/helm/prod/ext/placeholder new file mode 100644 index 0000000..e69de29 diff --git a/helm/prod/force-prod b/helm/prod/force-prod new file mode 100644 index 0000000..6cb3869 --- /dev/null +++ b/helm/prod/force-prod @@ -0,0 +1 @@ +1009 diff --git a/helm/prod/mid/placeholder b/helm/prod/mid/placeholder new file mode 100644 index 0000000..e69de29 diff --git a/helm/prod/top/p4pa-superset/Chart.yaml b/helm/prod/top/p4pa-superset/Chart.yaml new file mode 100644 index 0000000..e1fa1f0 --- /dev/null +++ b/helm/prod/top/p4pa-superset/Chart.yaml @@ -0,0 +1,10 @@ +apiVersion: v2 +name: p4pa-superset +description: P4PA Superset +type: application +version: 1.0.0 +appVersion: 1.0.0 +dependencies: + - name: superset + version: 0.15.0 + repository: "http://apache.github.io/superset/" diff --git a/helm/prod/top/p4pa-superset/deploy.sh b/helm/prod/top/p4pa-superset/deploy.sh new file mode 100644 index 0000000..bede653 --- /dev/null +++ b/helm/prod/top/p4pa-superset/deploy.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +../../../scripts/deploy.sh values.yaml analytics p4pa-d-itn-dev-aks p4pa-superset diff --git a/helm/prod/top/p4pa-superset/force-deploy b/helm/prod/top/p4pa-superset/force-deploy new file mode 100644 index 0000000..6350475 --- /dev/null +++ b/helm/prod/top/p4pa-superset/force-deploy @@ -0,0 +1 @@ +0001 diff --git a/helm/prod/top/p4pa-superset/values.yaml b/helm/prod/top/p4pa-superset/values.yaml new file mode 100644 index 0000000..a8af5a5 --- /dev/null +++ b/helm/prod/top/p4pa-superset/values.yaml @@ -0,0 +1,120 @@ +superset: + ingress: + hosts: + - "hub.internal.p4pa.pagopa.it" + tls: [] + + resources: + # The limits below will apply to all Superset components. To set individual resource limitations refer to the pod specific values below. + # The pod specific values will overwrite anything that is set here. + limits: + cpu: 500m + memory: 1024Mi + requests: + cpu: 100m + memory: 128Mi + + # Superset node configuration + supersetNode: + replicas: + replicaCount: 1 + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + # -- Sets the [pod disruption budget](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) for supersetNode pods + podDisruptionBudget: + # -- Whether the pod disruption budget should be created + enabled: false + # -- If set, maxUnavailable must not be set - see https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget + minAvailable: 1 + # -- If set, minAvailable must not be set - see https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget + maxUnavailable: 1 + # -- Resource settings for the supersetNode pods - these settings overwrite might existing values from the global resources object defined above. + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + # Superset Celery worker configuration + supersetWorker: + replicas: + replicaCount: 1 + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + # -- Sets the [pod disruption budget](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) for supersetWorker pods + podDisruptionBudget: + # -- Whether the pod disruption budget should be created + enabled: false + # -- If set, maxUnavailable must not be set - see https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget + minAvailable: 1 + # -- If set, minAvailable must not be set - see https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget + maxUnavailable: 1 + # -- Resource settings for the supersetWorker pods - these settings overwrite might existing values from the global resources object defined above. + resources: + limits: + cpu: 500m + memory: 2048Mi + requests: + cpu: 100m + memory: 1024Mi + + # Superset beat configuration (to trigger scheduled jobs like reports) + supersetCeleryBeat: + # -- Sets the [pod disruption budget](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) for supersetCeleryBeat pods + podDisruptionBudget: + # -- Whether the pod disruption budget should be created + enabled: false + # -- If set, maxUnavailable must not be set - see https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget + minAvailable: 1 + # -- If set, minAvailable must not be set - see https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget + maxUnavailable: 1 + # -- Resource settings for the CeleryBeat pods - these settings overwrite might existing values from the global resources object defined above. + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + supersetCeleryFlower: + replicaCount: 1 + # -- Sets the [pod disruption budget](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) for supersetCeleryFlower pods + podDisruptionBudget: + # -- Whether the pod disruption budget should be created + enabled: false + # -- If set, maxUnavailable must not be set - see https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget + minAvailable: 1 + # -- If set, minAvailable must not be set - see https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget + maxUnavailable: 1 + # -- Resource settings for the CeleryBeat pods - these settings overwrite might existing values from the global resources object defined above. + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + + supersetWebsockets: + # -- Sets the [pod disruption budget](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) for supersetWebsockets pods + podDisruptionBudget: + # -- Whether the pod disruption budget should be created + enabled: false + # -- If set, maxUnavailable must not be set - see https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget + minAvailable: 1 + # -- If set, minAvailable must not be set - see https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget + maxUnavailable: 1 + resources: {} + diff --git a/helm/uat/ext/placeholder b/helm/uat/ext/placeholder new file mode 100644 index 0000000..e69de29 diff --git a/helm/uat/force-uat b/helm/uat/force-uat new file mode 100644 index 0000000..6cb3869 --- /dev/null +++ b/helm/uat/force-uat @@ -0,0 +1 @@ +1009 diff --git a/helm/uat/mid/placeholder b/helm/uat/mid/placeholder new file mode 100644 index 0000000..e69de29 diff --git a/helm/uat/top/p4pa-superset/Chart.yaml b/helm/uat/top/p4pa-superset/Chart.yaml new file mode 100644 index 0000000..e1fa1f0 --- /dev/null +++ b/helm/uat/top/p4pa-superset/Chart.yaml @@ -0,0 +1,10 @@ +apiVersion: v2 +name: p4pa-superset +description: P4PA Superset +type: application +version: 1.0.0 +appVersion: 1.0.0 +dependencies: + - name: superset + version: 0.15.0 + repository: "http://apache.github.io/superset/" diff --git a/helm/uat/top/p4pa-superset/deploy.sh b/helm/uat/top/p4pa-superset/deploy.sh new file mode 100644 index 0000000..bede653 --- /dev/null +++ b/helm/uat/top/p4pa-superset/deploy.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +../../../scripts/deploy.sh values.yaml analytics p4pa-d-itn-dev-aks p4pa-superset diff --git a/helm/uat/top/p4pa-superset/force-deploy b/helm/uat/top/p4pa-superset/force-deploy new file mode 100644 index 0000000..6350475 --- /dev/null +++ b/helm/uat/top/p4pa-superset/force-deploy @@ -0,0 +1 @@ +0001 diff --git a/helm/uat/top/p4pa-superset/values.yaml b/helm/uat/top/p4pa-superset/values.yaml new file mode 100644 index 0000000..b423bf6 --- /dev/null +++ b/helm/uat/top/p4pa-superset/values.yaml @@ -0,0 +1,120 @@ +superset: + ingress: + hosts: + - "hub.internal.uat.p4pa.pagopa.it" + tls: [] + + resources: + # The limits below will apply to all Superset components. To set individual resource limitations refer to the pod specific values below. + # The pod specific values will overwrite anything that is set here. + limits: + cpu: 500m + memory: 1024Mi + requests: + cpu: 100m + memory: 128Mi + + # Superset node configuration + supersetNode: + replicas: + replicaCount: 1 + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + # -- Sets the [pod disruption budget](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) for supersetNode pods + podDisruptionBudget: + # -- Whether the pod disruption budget should be created + enabled: false + # -- If set, maxUnavailable must not be set - see https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget + minAvailable: 1 + # -- If set, minAvailable must not be set - see https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget + maxUnavailable: 1 + # -- Resource settings for the supersetNode pods - these settings overwrite might existing values from the global resources object defined above. + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + # Superset Celery worker configuration + supersetWorker: + replicas: + replicaCount: 1 + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + # -- Sets the [pod disruption budget](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) for supersetWorker pods + podDisruptionBudget: + # -- Whether the pod disruption budget should be created + enabled: false + # -- If set, maxUnavailable must not be set - see https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget + minAvailable: 1 + # -- If set, minAvailable must not be set - see https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget + maxUnavailable: 1 + # -- Resource settings for the supersetWorker pods - these settings overwrite might existing values from the global resources object defined above. + resources: + limits: + cpu: 500m + memory: 2048Mi + requests: + cpu: 100m + memory: 1024Mi + + # Superset beat configuration (to trigger scheduled jobs like reports) + supersetCeleryBeat: + # -- Sets the [pod disruption budget](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) for supersetCeleryBeat pods + podDisruptionBudget: + # -- Whether the pod disruption budget should be created + enabled: false + # -- If set, maxUnavailable must not be set - see https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget + minAvailable: 1 + # -- If set, minAvailable must not be set - see https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget + maxUnavailable: 1 + # -- Resource settings for the CeleryBeat pods - these settings overwrite might existing values from the global resources object defined above. + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + supersetCeleryFlower: + replicaCount: 1 + # -- Sets the [pod disruption budget](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) for supersetCeleryFlower pods + podDisruptionBudget: + # -- Whether the pod disruption budget should be created + enabled: false + # -- If set, maxUnavailable must not be set - see https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget + minAvailable: 1 + # -- If set, minAvailable must not be set - see https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget + maxUnavailable: 1 + # -- Resource settings for the CeleryBeat pods - these settings overwrite might existing values from the global resources object defined above. + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + + supersetWebsockets: + # -- Sets the [pod disruption budget](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) for supersetWebsockets pods + podDisruptionBudget: + # -- Whether the pod disruption budget should be created + enabled: false + # -- If set, maxUnavailable must not be set - see https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget + minAvailable: 1 + # -- If set, minAvailable must not be set - see https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget + maxUnavailable: 1 + resources: {} + diff --git a/scripts/deploy.sh b/scripts/deploy.sh new file mode 100644 index 0000000..1356443 --- /dev/null +++ b/scripts/deploy.sh @@ -0,0 +1,63 @@ +#!/bin/bash + +set -e # Exit the script if any command fails + +### How to use +# +# deploy.sh +# deploy.sh values.yaml mil cstar-d-weu-dev01-aks status +# +# + +# Function to handle errors +handle_error() { + echo "❌ Error: $1" >&2 + exit 1 +} + +# Parameter verification +VALUES_FILE_NAME=$1 +NAMESPACE=$2 +CLUSTER_NAME=$3 # New parameter for cluster name +APP_NAME=$4 + +if [ -z "$VALUES_FILE_NAME" ] || [ -z "$NAMESPACE" ] || [ -z "$APP_NAME" ] || [ -z "$CLUSTER_NAME" ]; then + handle_error "All parameters are required: VALUES_FILE_NAME NAMESPACE APP_NAME CLUSTER_NAME" +fi + +# Check if kubectl is installed +if ! command -v kubectl &> /dev/null; then + handle_error "kubectl is not installed. Please install it and try again." +fi + +# Check if helm is installed +if ! command -v helm &> /dev/null; then + handle_error "Helm is not installed. Please install it and try again." +fi + +echo "🔄 Switching Kubernetes context to cluster $CLUSTER_NAME" +if ! kubectl config use-context "$CLUSTER_NAME"; then + handle_error "Unable to switch context to $CLUSTER_NAME. Make sure the cluster exists in your kubeconfig." +fi + +echo "🪚 Deleting charts folder" +rm -rf charts || handle_error "Unable to delete charts folder" + +echo "🔨 Starting Helm Template" +helm dep build && helm template . -f "$VALUES_FILE_NAME" --debug + + +echo "🚀 Launch helm deploy" +# Execute helm upgrade/install command and capture output and exit code +helm upgrade --namespace "$NAMESPACE" \ + --install --values "$VALUES_FILE_NAME" \ + --wait --timeout 3m0s "$APP_NAME" . + +exit_code=$? + +# Check the command result +if [ $exit_code -ne 0 ]; then + handle_error "Failed to upgrade/install Helm chart" +else + echo "✅ Release installation completed successfully" +fi