diff --git a/.github/workflows/docker-image.yaml b/.github/workflows/docker-image.yaml index e27a3393..69505bb2 100644 --- a/.github/workflows/docker-image.yaml +++ b/.github/workflows/docker-image.yaml @@ -11,6 +11,7 @@ on: branches: - 'main' - 'release/*' + - 'k8s-collector' jobs: docker: diff --git a/Makefile b/Makefile index cb46b699..4566672c 100644 --- a/Makefile +++ b/Makefile @@ -30,3 +30,7 @@ build: fmt vet docker-build: DOCKER_BUILDKIT=1 docker build --pull --build-arg GO_VER=${GO_VER} --build-arg VERSION=${VERSION} --build-arg BUILD=${BUILD} --rm -f Dockerfile -t lagoon/build-deploy-image:local . docker run --entrypoint /bin/bash lagoon/build-deploy-image:local -c 'build-deploy-tool version' + +tag-and-push: + docker tag lagoon/build-deploy-image:local uselagoon/build-deploy-image:${VERSION} + docker push uselagoon/build-deploy-image:${VERSION} diff --git a/legacy/build-deploy-docker-compose.sh b/legacy/build-deploy-docker-compose.sh index 0880467a..51b27360 100755 --- a/legacy/build-deploy-docker-compose.sh +++ b/legacy/build-deploy-docker-compose.sh @@ -222,34 +222,33 @@ touch /tmp/warnings ############################################## buildStartTime="$(date +"%Y-%m-%d %H:%M:%S")" - -# @TODO: uncomment when collector is introduced -# beginBuildStep "Initial Environment Collection" "collectEnvironment" +beginBuildStep "Initial Environment Collection" "collectEnvironment" ############################################## ### COLLECT INFORMATION ############################################## + +echo "Collecting information about the environment" + # run the collector -# @TODO: uncomment when collector is introduced -# @TODO: don't run the collector yet, leave this as placeholder to prevent possible introduction of issues -# ENVIRONMENT_DATA=$(build-deploy-tool collect environment) -# echo "$ENVIRONMENT_DATA" | jq -r '.deployments.items[]?.name' -# echo "$ENVIRONMENT_DATA" | jq -r '.cronjobs.items[]?.name' -# echo "$ENVIRONMENT_DATA" | jq -r '.ingress.items[]?.name' -# echo "$ENVIRONMENT_DATA" | jq -r '.services.items[]?.name' -# echo "$ENVIRONMENT_DATA" | jq -r '.secrets.items[]?.name' -# echo "$ENVIRONMENT_DATA" | jq -r '.pvcs.items[]?.name' -# echo "$ENVIRONMENT_DATA" | jq -r '.schedulesv1.items[]?.name' -# echo "$ENVIRONMENT_DATA" | jq -r '.schedulesv1alpha1.items[]?.name' -# echo "$ENVIRONMENT_DATA" | jq -r '.prebackuppodsv1.items[]?.name' -# echo "$ENVIRONMENT_DATA" | jq -r '.prebackuppodsv1alpha1.items[]?.name' -# echo "$ENVIRONMENT_DATA" | jq -r '.mariadbconsumers.items[]?.name' -# echo "$ENVIRONMENT_DATA" | jq -r '.mongodbconsumers.items[]?.name' -# echo "$ENVIRONMENT_DATA" | jq -r '.postgresqlconsumers.items[]?.name' +ENVIRONMENT_DATA=$(build-deploy-tool collect environment) + +LAGOON_VARIABLES_ONLY=$(buildEnvVarCheck LAGOON_VARIABLES_ONLY "false") +if [ "${LAGOON_VARIABLES_ONLY}" != "true" ]; then + # do some checks to ensure safety and fail build if required + if kubectl -n ${NAMESPACE} get configmap lagoon-env &> /dev/null; then + # if the environment has a `lagoon-env` configmap present, fail this variable only deployment + # if the configmap is still present, there are other changes required that only a full deployment can achieve + echo "This environment currently doesn't support environment variable only deployments" + echo "You will need to run a full deployment to ensure the environment is up to date" + exit 1 + fi +fi currentStepEnd="$(date +"%Y-%m-%d %H:%M:%S")" -# @TODO: uncomment when collector is introduced -# finalizeBuildStep "${buildStartTime}" "${buildStartTime}" "${currentStepEnd}" "${NAMESPACE}" "collectEnvironment" "Initial Environment Collection" "false" +finalizeBuildStep "${buildStartTime}" "${buildStartTime}" "${currentStepEnd}" "${NAMESPACE}" "collectEnvironment" "Initial Environment Collection" "false" + +if [ "${LAGOON_VARIABLES_ONLY}" != "true" ]; then previousStepEnd=${currentStepEnd} beginBuildStep "Initial Environment Setup" "initialSetup" echo "STEP: Preparation started ${previousStepEnd}" @@ -292,7 +291,7 @@ if kubectl -n ${NAMESPACE} get configmap lagoon-yaml &> /dev/null; then # if the key does exist, then nuke it and put the new key kubectl -n ${NAMESPACE} create configmap lagoon-yaml --from-file=pre-deploy=.lagoon.yml -o yaml --dry-run=client | kubectl replace -f - fi - else +else # create it kubectl -n ${NAMESPACE} create configmap lagoon-yaml --from-file=pre-deploy=.lagoon.yml fi @@ -363,7 +362,7 @@ if kubectl -n ${NAMESPACE} get configmap docker-compose-yaml &> /dev/null; then # if the key does exist, then nuke it and put the new key kubectl -n ${NAMESPACE} create configmap docker-compose-yaml --from-file=pre-deploy="${DOCKER_COMPOSE_YAML}" -o yaml --dry-run=client | kubectl replace -f - fi - else +else # create it kubectl -n ${NAMESPACE} create configmap docker-compose-yaml --from-file=pre-deploy="${DOCKER_COMPOSE_YAML}" fi @@ -881,6 +880,7 @@ if [[ "$BUILD_TYPE" == "promote" ]]; then echo "No images built for promote environments" fi +if [ "${LAGOON_VARIABLES_ONLY}" != "true" ]; then ############################################## ### PUSH IMAGES TO REGISTRY ############################################## @@ -1010,6 +1010,7 @@ if [ "${DEPRECATED_IMAGE_WARNINGS}" == "true" ]; then finalizeBuildStep "${buildStartTime}" "${previousStepEnd}" "${currentStepEnd}" "${NAMESPACE}" "deprecatedImagesComplete" "Deprecated Image Warnings" "true" fi +fi # set that the image build and push phase has ended IMAGE_BUILD_PUSH_COMPLETE="true" @@ -1293,6 +1294,7 @@ fi # Get list of autogenerated routes AUTOGENERATED_ROUTES=$(kubectl -n ${NAMESPACE} get ingress --sort-by='{.metadata.name}' -l "lagoon.sh/autogenerated=true" -o=go-template --template='{{range $indexItems, $ingress := .items}}{{if $indexItems}},{{end}}{{$tls := .spec.tls}}{{range $indexRule, $rule := .spec.rules}}{{if $indexRule}},{{end}}{{if $tls}}https://{{else}}http://{{end}}{{.host}}{{end}}{{end}}') +if [ "${LAGOON_VARIABLES_ONLY}" != "true" ]; then # loop through created DBAAS templates DBAAS=($(build-deploy-tool identify dbaas)) for DBAAS_ENTRY in "${DBAAS[@]}" @@ -1313,7 +1315,7 @@ do unset IMAGES_PULL[$SERVICE_NAME] CONSUMER_TYPE="mariadbconsumer" . /kubectl-build-deploy/scripts/exec-kubectl-dbaas-wait.sh - MARIADB_DBAAS_CONSUMER_SPECS["${SERVICE_NAME}"]=$(kubectl -n ${NAMESPACE} get mariadbconsumer/${SERVICE_NAME} -o json | jq -r '.spec | @base64') + MARIADB_DBAAS_CONSUMER_SPECS["${SERVICE_NAME}"]=$(kubectl -n ${NAMESPACE} get mariadbconsumer/${SERVICE_NAME} -o json | jq -r '. | @base64') ;; postgres-dbaas) @@ -1321,7 +1323,7 @@ do unset IMAGES_PULL[$SERVICE_NAME] CONSUMER_TYPE="postgresqlconsumer" . /kubectl-build-deploy/scripts/exec-kubectl-dbaas-wait.sh - POSTGRES_DBAAS_CONSUMER_SPECS["${SERVICE_NAME}"]=$(kubectl -n ${NAMESPACE} get postgresqlconsumer/${SERVICE_NAME} -o json | jq -r '.spec | @base64') + POSTGRES_DBAAS_CONSUMER_SPECS["${SERVICE_NAME}"]=$(kubectl -n ${NAMESPACE} get postgresqlconsumer/${SERVICE_NAME} -o json | jq -r '. | @base64') ;; mongodb-dbaas) @@ -1329,7 +1331,7 @@ do unset IMAGES_PULL[$SERVICE_NAME] CONSUMER_TYPE="mongodbconsumer" . /kubectl-build-deploy/scripts/exec-kubectl-dbaas-wait.sh - MONGODB_DBAAS_CONSUMER_SPECS["${SERVICE_NAME}"]=$(kubectl -n ${NAMESPACE} get mongodbconsumer/${SERVICE_NAME} -o json | jq -r '.spec | @base64') + MONGODB_DBAAS_CONSUMER_SPECS["${SERVICE_NAME}"]=$(kubectl -n ${NAMESPACE} get mongodbconsumer/${SERVICE_NAME} -o json | jq -r '. | @base64') ;; *) @@ -1337,19 +1339,39 @@ do esac done +else + # variable only deployment + MARIADB_DBAAS_CONSUMERS=$(echo "$ENVIRONMENT_DATA" | jq -r '.mariadbconsumers.items[]? | @base64') + for MARIADB_DBAAS_CONSUMER in ${MARIADB_DBAAS_CONSUMERS}; do + SERVICE_NAME=$(echo ${MARIADB_DBAAS_CONSUMER} | jq -Rr '@base64d | fromjson | .metadata.name') + MARIADB_DBAAS_CONSUMER_SPECS["${SERVICE_NAME}"]=$(echo ${MARIADB_DBAAS_CONSUMER} | jq -Rr '@base64d | fromjson | . | @base64') + done + MONGODB_DBAAS_CONSUMERS=$(echo "$ENVIRONMENT_DATA" | jq -r '.mongodbconsumers.items[]? | @base64') + for MONGODB_DBAAS_CONSUMER in ${MONGODB_DBAAS_CONSUMERS}; do + SERVICE_NAME=$(echo ${MONGODB_DBAAS_CONSUMER} | jq -Rr '@base64d | fromjson | .metadata.name') + MONGODB_DBAAS_CONSUMER_SPECS["${SERVICE_NAME}"]=$(echo ${MONGODB_DBAAS_CONSUMER} | jq -Rr '@base64d | fromjson | . | @base64') + done + POSTGRES_DBAAS_CONSUMERS=$(echo "$ENVIRONMENT_DATA" | jq -r '.postgresqlconsumers.items[]? | @base64') + for POSTGRES_DBAAS_CONSUMER in ${POSTGRES_DBAAS_CONSUMERS}; do + SERVICE_NAME=$(echo ${POSTGRES_DBAAS_CONSUMER} | jq -Rr '@base64d | fromjson | .metadata.name') + POSTGRES_DBAAS_CONSUMER_SPECS["${SERVICE_NAME}"]=$(echo ${POSTGRES_DBAAS_CONSUMER} | jq -Rr '@base64d | fromjson | . | @base64') + done + # variable only deployment +fi # convert specs into credential dump for ingestion by build-deploy-tool DBAAS_VARIABLES="[]" for SERVICE_NAME in "${!MARIADB_DBAAS_CONSUMER_SPECS[@]}" do + SERVICE_NAME=$(echo ${MARIADB_DBAAS_CONSUMER_SPECS["$SERVICE_NAME"]} | jq -Rr '@base64d | fromjson | .metadata.name') SERVICE_NAME_UPPERCASE=$(echo "$SERVICE_NAME" | tr '[:lower:]' '[:upper:]' | tr '-' '_') - DB_HOST=$(echo ${MARIADB_DBAAS_CONSUMER_SPECS["$SERVICE_NAME"]} | jq -Rr '@base64d | fromjson | .consumer.services.primary') - DB_USER=$(echo ${MARIADB_DBAAS_CONSUMER_SPECS["$SERVICE_NAME"]} | jq -Rr '@base64d | fromjson | .consumer.username') - DB_PASSWORD=$(echo ${MARIADB_DBAAS_CONSUMER_SPECS["$SERVICE_NAME"]} | jq -Rr '@base64d | fromjson | .consumer.password') - DB_NAME=$(echo ${MARIADB_DBAAS_CONSUMER_SPECS["$SERVICE_NAME"]} | jq -Rr '@base64d | fromjson | .consumer.database') - DB_PORT=$(echo ${MARIADB_DBAAS_CONSUMER_SPECS["$SERVICE_NAME"]} | jq -Rr '@base64d | fromjson | .provider.port') + DB_HOST=$(echo ${MARIADB_DBAAS_CONSUMER_SPECS["$SERVICE_NAME"]} | jq -Rr '@base64d | fromjson | .spec.consumer.services.primary') + DB_USER=$(echo ${MARIADB_DBAAS_CONSUMER_SPECS["$SERVICE_NAME"]} | jq -Rr '@base64d | fromjson | .spec.consumer.username') + DB_PASSWORD=$(echo ${MARIADB_DBAAS_CONSUMER_SPECS["$SERVICE_NAME"]} | jq -Rr '@base64d | fromjson | .spec.consumer.password') + DB_NAME=$(echo ${MARIADB_DBAAS_CONSUMER_SPECS["$SERVICE_NAME"]} | jq -Rr '@base64d | fromjson | .spec.consumer.database') + DB_PORT=$(echo ${MARIADB_DBAAS_CONSUMER_SPECS["$SERVICE_NAME"]} | jq -Rr '@base64d | fromjson | .spec.provider.port') DB_CONSUMER='{"'${SERVICE_NAME_UPPERCASE}'_HOST":"'${DB_HOST}'", "'${SERVICE_NAME_UPPERCASE}'_USERNAME":"'${DB_USER}'","'${SERVICE_NAME_UPPERCASE}'_PASSWORD":"'${DB_PASSWORD}'","'${SERVICE_NAME_UPPERCASE}'_DATABASE":"'${DB_NAME}'","'${SERVICE_NAME_UPPERCASE}'_PORT":"'${DB_PORT}'"}' - if DB_READREPLICA_HOSTS=$(echo ${MARIADB_DBAAS_CONSUMER_SPECS["$SERVICE_NAME"]} | jq -Rr '@base64d | fromjson | .consumer.services.replicas | .[]' 2>/dev/null); then + if DB_READREPLICA_HOSTS=$(echo ${MARIADB_DBAAS_CONSUMER_SPECS["$SERVICE_NAME"]} | jq -Rr '@base64d | fromjson | .spec.consumer.services.replicas | .[]' 2>/dev/null); then if [ "$DB_READREPLICA_HOSTS" != "null" ]; then DB_READREPLICA_HOSTS=$(echo "$DB_READREPLICA_HOSTS" | sed 's/^\|$//g' | paste -sd, -) DB_CONSUMER=$(echo "${DB_CONSUMER}" | jq '. + {"'${SERVICE_NAME_UPPERCASE}'_READREPLICA_HOSTS":"'${DB_READREPLICA_HOSTS}'"}') @@ -1360,14 +1382,15 @@ done for SERVICE_NAME in "${!POSTGRES_DBAAS_CONSUMER_SPECS[@]}" do + SERVICE_NAME=$(echo ${POSTGRES_DBAAS_CONSUMER_SPECS["$SERVICE_NAME"]} | jq -Rr '@base64d | fromjson | .metadata.name') SERVICE_NAME_UPPERCASE=$(echo "$SERVICE_NAME" | tr '[:lower:]' '[:upper:]' | tr '-' '_') - DB_HOST=$(echo ${POSTGRES_DBAAS_CONSUMER_SPECS["$SERVICE_NAME"]} | jq -Rr '@base64d | fromjson | .consumer.services.primary') - DB_USER=$(echo ${POSTGRES_DBAAS_CONSUMER_SPECS["$SERVICE_NAME"]} | jq -Rr '@base64d | fromjson | .consumer.username') - DB_PASSWORD=$(echo ${POSTGRES_DBAAS_CONSUMER_SPECS["$SERVICE_NAME"]} | jq -Rr '@base64d | fromjson | .consumer.password') - DB_NAME=$(echo ${POSTGRES_DBAAS_CONSUMER_SPECS["$SERVICE_NAME"]} | jq -Rr '@base64d | fromjson | .consumer.database') - DB_PORT=$(echo ${POSTGRES_DBAAS_CONSUMER_SPECS["$SERVICE_NAME"]} | jq -Rr '@base64d | fromjson | .provider.port') + DB_HOST=$(echo ${POSTGRES_DBAAS_CONSUMER_SPECS["$SERVICE_NAME"]} | jq -Rr '@base64d | fromjson | .spec.consumer.services.primary') + DB_USER=$(echo ${POSTGRES_DBAAS_CONSUMER_SPECS["$SERVICE_NAME"]} | jq -Rr '@base64d | fromjson | .spec.consumer.username') + DB_PASSWORD=$(echo ${POSTGRES_DBAAS_CONSUMER_SPECS["$SERVICE_NAME"]} | jq -Rr '@base64d | fromjson | .spec.consumer.password') + DB_NAME=$(echo ${POSTGRES_DBAAS_CONSUMER_SPECS["$SERVICE_NAME"]} | jq -Rr '@base64d | fromjson | .spec.consumer.database') + DB_PORT=$(echo ${POSTGRES_DBAAS_CONSUMER_SPECS["$SERVICE_NAME"]} | jq -Rr '@base64d | fromjson | .spec.provider.port') DB_CONSUMER='{"'${SERVICE_NAME_UPPERCASE}'_HOST":"'${DB_HOST}'", "'${SERVICE_NAME_UPPERCASE}'_USERNAME":"'${DB_USER}'","'${SERVICE_NAME_UPPERCASE}'_PASSWORD":"'${DB_PASSWORD}'","'${SERVICE_NAME_UPPERCASE}'_DATABASE":"'${DB_NAME}'","'${SERVICE_NAME_UPPERCASE}'_PORT":"'${DB_PORT}'"}' - if DB_READREPLICA_HOSTS=$(echo ${POSTGRES_DBAAS_CONSUMER_SPECS["$SERVICE_NAME"]} | jq -Rr '@base64d | fromjson | .consumer.services.replicas | .[]' 2>/dev/null); then + if DB_READREPLICA_HOSTS=$(echo ${POSTGRES_DBAAS_CONSUMER_SPECS["$SERVICE_NAME"]} | jq -Rr '@base64d | fromjson | .spec.consumer.services.replicas | .[]' 2>/dev/null); then if [ "$DB_READREPLICA_HOSTS" != "null" ]; then DB_READREPLICA_HOSTS=$(echo "$DB_READREPLICA_HOSTS" | sed 's/^\|$//g' | paste -sd, -) DB_CONSUMER=$(echo "${DB_CONSUMER}" | jq '. + {"'${SERVICE_NAME_UPPERCASE}'_READREPLICA_HOSTS":"'${DB_READREPLICA_HOSTS}'"}') @@ -1378,15 +1401,16 @@ done for SERVICE_NAME in "${!MONGODB_DBAAS_CONSUMER_SPECS[@]}" do + SERVICE_NAME=$(echo ${MONGODB_DBAAS_CONSUMER_SPECS["$SERVICE_NAME"]} | jq -Rr '@base64d | fromjson | .metadata.name') SERVICE_NAME_UPPERCASE=$(echo "$SERVICE_NAME" | tr '[:lower:]' '[:upper:]' | tr '-' '_') - DB_HOST=$(echo ${MONGODB_DBAAS_CONSUMER_SPECS["$SERVICE_NAME"]} | jq -Rr '@base64d | fromjson | .consumer.services.primary') - DB_USER=$(echo ${MONGODB_DBAAS_CONSUMER_SPECS["$SERVICE_NAME"]} | jq -Rr '@base64d | fromjson | .consumer.username') - DB_PASSWORD=$(echo ${MONGODB_DBAAS_CONSUMER_SPECS["$SERVICE_NAME"]} | jq -Rr '@base64d | fromjson | .consumer.password') - DB_NAME=$(echo ${MONGODB_DBAAS_CONSUMER_SPECS["$SERVICE_NAME"]} | jq -Rr '@base64d | fromjson | .consumer.database') - DB_PORT=$(echo ${MONGODB_DBAAS_CONSUMER_SPECS["$SERVICE_NAME"]} | jq -Rr '@base64d | fromjson | .provider.port') - DB_AUTHSOURCE=$(echo ${MONGODB_DBAAS_CONSUMER_SPECS["$SERVICE_NAME"]} | jq -Rr '@base64d | fromjson | .provider.auth.source') - DB_AUTHMECHANISM=$(echo ${MONGODB_DBAAS_CONSUMER_SPECS["$SERVICE_NAME"]} | jq -Rr '@base64d | fromjson | .provider.auth.mechanism') - DB_AUTHTLS=$(echo ${MONGODB_DBAAS_CONSUMER_SPECS["$SERVICE_NAME"]} | jq -Rr '@base64d | fromjson | .provider.auth.tls') + DB_HOST=$(echo ${MONGODB_DBAAS_CONSUMER_SPECS["$SERVICE_NAME"]} | jq -Rr '@base64d | fromjson | .spec.consumer.services.primary') + DB_USER=$(echo ${MONGODB_DBAAS_CONSUMER_SPECS["$SERVICE_NAME"]} | jq -Rr '@base64d | fromjson | .spec.consumer.username') + DB_PASSWORD=$(echo ${MONGODB_DBAAS_CONSUMER_SPECS["$SERVICE_NAME"]} | jq -Rr '@base64d | fromjson | .spec.consumer.password') + DB_NAME=$(echo ${MONGODB_DBAAS_CONSUMER_SPECS["$SERVICE_NAME"]} | jq -Rr '@base64d | fromjson | .spec.consumer.database') + DB_PORT=$(echo ${MONGODB_DBAAS_CONSUMER_SPECS["$SERVICE_NAME"]} | jq -Rr '@base64d | fromjson | .spec.provider.port') + DB_AUTHSOURCE=$(echo ${MONGODB_DBAAS_CONSUMER_SPECS["$SERVICE_NAME"]} | jq -Rr '@base64d | fromjson | .spec.provider.auth.source') + DB_AUTHMECHANISM=$(echo ${MONGODB_DBAAS_CONSUMER_SPECS["$SERVICE_NAME"]} | jq -Rr '@base64d | fromjson | .spec.provider.auth.mechanism') + DB_AUTHTLS=$(echo ${MONGODB_DBAAS_CONSUMER_SPECS["$SERVICE_NAME"]} | jq -Rr '@base64d | fromjson | .spec.provider.auth.tls') DB_CONSUMER='{"'${SERVICE_NAME_UPPERCASE}'_HOST":"'${DB_HOST}'", "'${SERVICE_NAME_UPPERCASE}'_USERNAME":"'${DB_USER}'", "'${SERVICE_NAME_UPPERCASE}'_PASSWORD":"'${DB_PASSWORD}'", "'${SERVICE_NAME_UPPERCASE}'_DATABASE":"'${DB_NAME}'", "'${SERVICE_NAME_UPPERCASE}'_PORT":"'${DB_PORT}'", "'${SERVICE_NAME_UPPERCASE}'_AUTHSOURCE":"'${DB_AUTHSOURCE}'", "'${SERVICE_NAME_UPPERCASE}'_AUTHMECHANISM":"'${DB_AUTHMECHANISM}'", "'${SERVICE_NAME_UPPERCASE}'_AUTHTLS":"'${DB_AUTHTLS}'"}' DBAAS_VARIABLES=$(echo "$DBAAS_VARIABLES" | jq '. + '$(echo "$DB_CONSUMER" | jq -sMrc)'') done @@ -1762,7 +1786,7 @@ echo "Updating lagoon-yaml configmap with a post-deploy version of the .lagoon.y if kubectl -n ${NAMESPACE} get configmap lagoon-yaml &> /dev/null; then # replace it, no need to check if the key is different, as that will happen in the pre-deploy phase kubectl -n ${NAMESPACE} get configmap lagoon-yaml -o json | jq --arg add "`cat .lagoon.yml`" '.data."post-deploy" = $add' | kubectl apply -f - - else +else # create it kubectl -n ${NAMESPACE} create configmap lagoon-yaml --from-file=post-deploy=.lagoon.yml fi @@ -1770,7 +1794,7 @@ echo "Updating docker-compose-yaml configmap with a post-deploy version of the d if kubectl -n ${NAMESPACE} get configmap docker-compose-yaml &> /dev/null; then # replace it, no need to check if the key is different, as that will happen in the pre-deploy phase kubectl -n ${NAMESPACE} get configmap docker-compose-yaml -o json | jq --arg add "`cat ${DOCKER_COMPOSE_YAML}`" '.data."post-deploy" = $add' | kubectl apply -f - - else +else # create it kubectl -n ${NAMESPACE} create configmap docker-compose-yaml --from-file=post-deploy="${DOCKER_COMPOSE_YAML}" fi @@ -1820,6 +1844,60 @@ if [ "$(featureFlag INSIGHTS)" = enabled ]; then fi fi +else + # variable only deployment + beginBuildStep "Restarting Deployments" "restartingDeployments" + + ############################################## + ### APPLY RESOURCES for variable only deployments + ############################################## + + # remove any storage calculator pods before restarting deployments to prevent storage binding issues + STORAGE_CALCULATOR_PODS=$(kubectl -n ${NAMESPACE} get pods -l lagoon.sh/storageCalculator=true --no-headers 2>/dev/null | cut -d " " -f 1 | xargs) + for STORAGE_CALCULATOR_POD in $STORAGE_CALCULATOR_PODS; do + kubectl -n ${NAMESPACE} delete pod ${STORAGE_CALCULATOR_POD} + done + + # patch the deployments with the changed configmap to force a rollout + CHANGES_MADE=false + + DEPLOYMENTS=$(echo "$ENVIRONMENT_DATA" | jq -r '.deployments.items[]? | @base64') + for DEPLOYMENT in ${DEPLOYMENTS} + do + SERVICE_NAME=$(echo ${DEPLOYMENT} | jq -Rr '@base64d | fromjson | .metadata.name') + CURRENT_SHA=$(echo ${DEPLOYMENT} | jq -Rr '@base64d | fromjson | .spec.template.metadata.annotations."lagoon.sh/configMapSha"') + if [ "${CONFIG_MAP_SHA}" != "${CURRENT_SHA}" ]; then + CHANGES_MADE=true + kubectl -n ${NAMESPACE} patch deployment ${SERVICE_NAME} --type=merge --patch '{"spec":{"template":{"metadata":{"annotations":{"lagoon.sh/configMapSha":"'${CONFIG_MAP_SHA}'"}}}}}' + fi + done + + # wait for the deployments to restart + for DEPLOYMENT in ${DEPLOYMENTS} + do + SERVICE_NAME=$(echo ${DEPLOYMENT} | jq -Rr '@base64d | fromjson | .metadata.name') + CURRENT_SHA=$(echo ${DEPLOYMENT} | jq -Rr '@base64d | fromjson | .spec.template.metadata.annotations."lagoon.sh/configMapSha"') + if [ "${CONFIG_MAP_SHA}" != "${CURRENT_SHA}" ]; then + . /kubectl-build-deploy/scripts/exec-monitor-deploy.sh + fi + done + + if [ "$CHANGES_MADE" == "false" ]; then + echo "No variables changed, no services restarted" + fi + + if kubectl -n ${NAMESPACE} get configmap lagoon-env &> /dev/null; then + # now delete the configmap after all the lagoon-env and lagoon-platform-env calcs have been done + # and the deployments have rolled out successfully, this makes less problems rolling back if a build fails + # somewhere between the new secret being created, and the deployments rolling out + kubectl -n ${NAMESPACE} delete configmap lagoon-env + fi + + currentStepEnd="$(date +"%Y-%m-%d %H:%M:%S")" + patchBuildStep "${buildStartTime}" "${previousStepEnd}" "${currentStepEnd}" "${NAMESPACE}" "deploymentRestartComplete" "Restarting Deployments" "false" + previousStepEnd=${currentStepEnd} + # variable only deployment +fi EXTRA_WARNINGS=$(cat /tmp/warnings | wc -l) BUILD_WARNING_COUNT=$((BUILD_WARNING_COUNT + EXTRA_WARNINGS))