Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

docs: adding CI.md documentation #1802

Closed
wants to merge 47 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
47 commits
Select commit Hold shift + click to select a range
3a678d6
init CI.md
nilgaar Oct 15, 2024
bdc5f19
+ Nightlies details
nilgaar Oct 16, 2024
e28373a
update CI
nilgaar Oct 17, 2024
f87be89
update CI.md anc add comments to test setup script
nilgaar Oct 18, 2024
daeb937
+ adding subhash answers
nilgaar Oct 21, 2024
d7808bd
+ diagrams
nilgaar Oct 21, 2024
c384b64
restyle and format
nilgaar Oct 21, 2024
21cafb3
Merge branch 'main' into RHIDP-4244
nilgaar Oct 22, 2024
8df9b52
Merge branch 'main' into RHIDP-4244
nilgaar Oct 22, 2024
fd07c43
minor fix on missing enclosure
nilgaar Oct 22, 2024
52478ed
Merge remote-tracking branch 'origin/RHIDP-4244' into RHIDP-4244
nilgaar Oct 22, 2024
7f148bf
Merge branch 'main' into RHIDP-4244
nilgaar Oct 23, 2024
aa4b8f6
-Maintenance
nilgaar Oct 23, 2024
39a938e
cut duplicated info
nilgaar Oct 23, 2024
d9402fd
Merge branch 'main' into RHIDP-4244
nilgaar Oct 24, 2024
c144d99
observations about /ok-to-test
nilgaar Oct 24, 2024
c6482b2
Update e2e-tests/docs/CI.md
nilgaar Oct 24, 2024
6f8ac21
Merge remote-tracking branch 'origin/RHIDP-4244' into RHIDP-4244
nilgaar Oct 24, 2024
ed76698
Update e2e-tests/docs/CI.md
nilgaar Oct 24, 2024
9952ac5
Merge remote-tracking branch 'origin/RHIDP-4244' into RHIDP-4244
nilgaar Oct 24, 2024
dc85962
moving High-Level Overview of openshift-ci-tests.sh
nilgaar Oct 25, 2024
98ac7c6
note RBAC namespaces
nilgaar Oct 25, 2024
aa1a48f
info about config files
nilgaar Oct 25, 2024
87df647
+ test runner
nilgaar Oct 25, 2024
699d810
Merge branch 'main' into RHIDP-4244
nilgaar Oct 25, 2024
3bc310a
Merge remote-tracking branch 'origin/RHIDP-4244' into RHIDP-4244
nilgaar Oct 25, 2024
e0a56f2
fix github diagram colors
nilgaar Oct 25, 2024
4b78909
update nithly diagram
nilgaar Oct 25, 2024
17e5a72
update nightly backgrounds
nilgaar Oct 25, 2024
9ca7303
background color
nilgaar Oct 25, 2024
fa1d50c
+ diagram in mermaid code
nilgaar Oct 25, 2024
95ba54f
Merge remote-tracking branch 'upstream/main' into RHIDP-4244
nilgaar Nov 4, 2024
4413800
Merge branch 'main' into RHIDP-4244
nilgaar Nov 5, 2024
21769bd
Merge branch 'main' into RHIDP-4244
nilgaar Nov 6, 2024
ab31bc9
moving to /docs
nilgaar Nov 7, 2024
bdc9ba1
Merge branch 'main' into RHIDP-4244
nilgaar Nov 7, 2024
0317f60
fix background on nightly diagram
nilgaar Nov 8, 2024
9d8fefa
Merge branch 'main' into RHIDP-4244
nilgaar Nov 8, 2024
b351534
Merge branch 'main' into RHIDP-4244
nilgaar Nov 11, 2024
182ad43
Delete nightly_diagram.mermaid
nilgaar Nov 14, 2024
2037d3a
Merge branch 'main' into RHIDP-4244
nilgaar Nov 14, 2024
732343b
Merge branch 'main' into RHIDP-4244
nilgaar Nov 14, 2024
e8977d2
Merge branch 'main' into RHIDP-4244
nilgaar Nov 15, 2024
892873e
Merge branch 'main' into RHIDP-4244
nilgaar Nov 15, 2024
1698958
Merge branch 'main' into RHIDP-4244
nilgaar Nov 15, 2024
8cbe1d4
Merge branch 'main' into RHIDP-4244
nilgaar Nov 19, 2024
7431a6b
Merge branch 'main' into RHIDP-4244
josephca Nov 19, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
101 changes: 74 additions & 27 deletions .ibm/pipelines/openshift-ci-tests.sh
Original file line number Diff line number Diff line change
@@ -1,41 +1,48 @@
#!/bin/sh

set -xe
export PS4='[$(date "+%Y-%m-%d %H:%M:%S")] ' # logs timestamp for every cmd.
set -xe # Enable debugging (-x) and exit on error (-e).
export PS4='[$(date "+%Y-%m-%d %H:%M:%S")] ' # Prepend timestamp to each command in debug output.

# Define log file names and directories.
LOGFILE="test-log"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
secret_name="rhdh-k8s-plugin-secret"
OVERALL_RESULT=0

# Define a cleanup function to be executed upon script exit.
cleanup() {
echo "Cleaning up before exiting"
if [[ "$JOB_NAME" == *aks* ]]; then
# If the job is for Azure Kubernetes Service (AKS), stop the AKS cluster.
az_aks_stop "${AKS_NIGHTLY_CLUSTER_NAME}" "${AKS_NIGHTLY_CLUSTER_RESOURCEGROUP}"
elif [[ "$JOB_NAME" == *pull-*-main-e2e-tests* ]]; then
# Cleanup namespaces after main branch PR e2e tests execution.
delete_namespace "${NAME_SPACE}"
delete_namespace "${NAME_SPACE_POSTGRES_DB}"
delete_namespace "${NAME_SPACE_RBAC}"
fi
rm -rf ~/tmpbin
rm -rf ~/tmpbin # Remove temporary binaries directory.
}

trap cleanup EXIT
trap cleanup EXIT # Ensure the cleanup function runs on script exit.

source "${DIR}/utils.sh"
source "${DIR}/utils.sh" # Source utility functions from utils.sh.

# Function to set Kubernetes cluster information based on the job name.
set_cluster_info() {
export K8S_CLUSTER_URL=$(cat /tmp/secrets/RHDH_PR_OS_CLUSTER_URL)
export K8S_CLUSTER_TOKEN=$(cat /tmp/secrets/RHDH_PR_OS_CLUSTER_TOKEN)

if [[ "$JOB_NAME" == *ocp-v4-14 ]]; then
# Use cluster credentials for OpenShift version 4.14.
K8S_CLUSTER_URL=$(cat /tmp/secrets/RHDH_OS_1_CLUSTER_URL)
K8S_CLUSTER_TOKEN=$(cat /tmp/secrets/RHDH_OS_1_CLUSTER_TOKEN)
elif [[ "$JOB_NAME" == *ocp-v4-13 ]]; then
# Use cluster credentials for OpenShift version 4.13.
K8S_CLUSTER_URL=$(cat /tmp/secrets/RHDH_OS_2_CLUSTER_URL)
K8S_CLUSTER_TOKEN=$(cat /tmp/secrets/RHDH_OS_2_CLUSTER_TOKEN)
elif [[ "$JOB_NAME" == *aks* ]]; then
# Use cluster credentials for AKS.
K8S_CLUSTER_URL=$(cat /tmp/secrets/RHDH_AKS_CLUSTER_URL)
K8S_CLUSTER_TOKEN=$(cat /tmp/secrets/RHDH_AKS_CLUSTER_TOKEN)
fi
Expand Down Expand Up @@ -80,12 +87,14 @@ add_helm_repos() {
)

for repo in "${repos[@]}"; do
local key="${repo%%=*}"
local value="${repo##*=}"
local key="${repo%%=*}" # Extract repository name.
local value="${repo##*=}" # Extract repository URL.

if ! helm repo list | grep -q "^$key"; then
# If the repository is not already added, add it.
helm repo add "$key" "$value"
else
# If the repository exists, update it.
echo "Repository $key already exists - updating repository instead."
fi
done
Expand All @@ -97,6 +106,7 @@ install_oc() {
if command -v oc >/dev/null 2>&1; then
echo "oc is already installed."
else
# Download and install the 'oc' CLI.
curl -LO https://mirror.openshift.com/pub/openshift-v4/clients/oc/latest/linux/oc.tar.gz
tar -xf oc.tar.gz
mv oc /usr/local/bin/
Expand All @@ -111,6 +121,7 @@ install_helm() {
else
echo "Installing Helm 3 client"
mkdir ~/tmpbin && cd ~/tmpbin
# Install Helm using the official script.
curl -sL https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash -f
export PATH=$(pwd):$PATH
echo "Helm client installed successfully."
Expand All @@ -126,13 +137,15 @@ uninstall_helmchart() {
fi
}

# Function to configure a OpenShift namespace.
configure_namespace() {
local project=$1
delete_namespace $project
oc create namespace "${project}"
oc config set-context --current --namespace="${project}"
}

# Function to delete a OpenShift namespace if it exists.
delete_namespace() {
local project=$1
if oc get namespace "$project" >/dev/null 2>&1; then
Expand All @@ -154,26 +167,32 @@ delete_namespace() {

configure_external_postgres_db() {
local project=$1
# Apply the PostgreSQL deployment YAML file.
oc apply -f "${DIR}/resources/postgres-db/postgres.yaml" --namespace="${NAME_SPACE_POSTGRES_DB}"
sleep 5

# Extract PostgreSQL certificates from the secret and save them to files.
oc get secret postgress-external-db-cluster-cert -n "${NAME_SPACE_POSTGRES_DB}" -o jsonpath='{.data.ca\.crt}' | base64 --decode > postgres-ca
oc get secret postgress-external-db-cluster-cert -n "${NAME_SPACE_POSTGRES_DB}" -o jsonpath='{.data.tls\.crt}' | base64 --decode > postgres-tls-crt
oc get secret postgress-external-db-cluster-cert -n "${NAME_SPACE_POSTGRES_DB}" -o jsonpath='{.data.tls\.key}' | base64 --decode > postgres-tsl-key

# Create a new secret in the project namespace with the PostgreSQL certificates.
oc create secret generic postgress-external-db-cluster-cert \
--from-file=ca.crt=postgres-ca \
--from-file=tls.crt=postgres-tls-crt \
--from-file=tls.key=postgres-tsl-key \
--dry-run=client -o yaml | oc apply -f - --namespace="${project}"

# Retrieve the PostgreSQL password and host, and update the credentials YAML file.
POSTGRES_PASSWORD=$(oc get secret/postgress-external-db-pguser-janus-idp -n "${NAME_SPACE_POSTGRES_DB}" -o jsonpath={.data.password})
sed -i "s|POSTGRES_PASSWORD:.*|POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}|g" "${DIR}/resources/postgres-db/postgres-cred.yaml"
POSTGRES_HOST=$(echo -n "postgress-external-db-primary.$NAME_SPACE_POSTGRES_DB.svc.cluster.local" | base64 | tr -d '\n')
sed -i "s|POSTGRES_HOST:.*|POSTGRES_HOST: ${POSTGRES_HOST}|g" "${DIR}/resources/postgres-db/postgres-cred.yaml"
# Apply the updated credentials YAML file.
oc apply -f "${DIR}/resources/postgres-db/postgres-cred.yaml" --namespace="${project}"
}

# Function to apply OpenShift YAML files to a namespace.
apply_yaml_files() {
local dir=$1
local project=$2
Expand All @@ -189,10 +208,12 @@ apply_yaml_files() {
"$dir/auth/secrets-rhdh-secrets.yaml"
)

# Update the namespace in each YAML file.
for file in "${files[@]}"; do
sed -i "s/namespace:.*/namespace: ${project}/g" "$file"
done

# Set GitHub App credentials based on the job name.
if [[ "$JOB_NAME" == *aks* ]]; then
GITHUB_APP_APP_ID=$GITHUB_APP_2_APP_ID
GITHUB_APP_CLIENT_ID=$GITHUB_APP_2_CLIENT_ID
Expand All @@ -206,32 +227,40 @@ apply_yaml_files() {
GITHUB_APP_CLIENT_SECRET=$(cat /tmp/secrets/GITHUB_APP_4_CLIENT_SECRET)
fi

# Replace placeholders in the secrets file with actual values.
for key in GITHUB_APP_APP_ID GITHUB_APP_CLIENT_ID GITHUB_APP_PRIVATE_KEY GITHUB_APP_CLIENT_SECRET GITHUB_APP_JANUS_TEST_APP_ID GITHUB_APP_JANUS_TEST_CLIENT_ID GITHUB_APP_JANUS_TEST_CLIENT_SECRET GITHUB_APP_JANUS_TEST_PRIVATE_KEY GITHUB_APP_WEBHOOK_URL GITHUB_APP_WEBHOOK_SECRET KEYCLOAK_CLIENT_SECRET ACR_SECRET GOOGLE_CLIENT_ID GOOGLE_CLIENT_SECRET K8S_CLUSTER_TOKEN_ENCODED OCM_CLUSTER_URL GITLAB_TOKEN; do
sed -i "s|${key}:.*|${key}: ${!key}|g" "$dir/auth/secrets-rhdh-secrets.yaml"
done

# Apply OpenShift resources to the namespace.
oc apply -f "$dir/resources/service_account/service-account-rhdh.yaml" --namespace="${project}"
oc apply -f "$dir/auth/service-account-rhdh-secret.yaml" --namespace="${project}"
oc apply -f "$dir/auth/secrets-rhdh-secrets.yaml" --namespace="${project}"
if [[ "$JOB_NAME" != *aks* ]]; then
# Deploy a test Backstage customization provider for non-AKS jobs.
oc new-app https://github.com/janus-qe/test-backstage-customization-provider --namespace="${project}"
oc expose svc/test-backstage-customization-provider --namespace="${project}"
fi
# Apply ClusterRoles and ClusterRoleBindings.
oc apply -f "$dir/resources/cluster_role/cluster-role-k8s.yaml" --namespace="${project}"
oc apply -f "$dir/resources/cluster_role_binding/cluster-role-binding-k8s.yaml" --namespace="${project}"
oc apply -f "$dir/resources/cluster_role/cluster-role-ocm.yaml" --namespace="${project}"
oc apply -f "$dir/resources/cluster_role_binding/cluster-role-binding-ocm.yaml" --namespace="${project}"

if [[ "$JOB_NAME" != *aks* ]]; then # Skip for AKS, because of strange `sed: -e expression #1, char 136: unterminated `s' command`
if [[ "$JOB_NAME" != *aks* ]]; then
# Update the API server URL in the secrets file for non-AKS jobs.
sed -i "s/K8S_CLUSTER_API_SERVER_URL:.*/K8S_CLUSTER_API_SERVER_URL: ${ENCODED_API_SERVER_URL}/g" "$dir/auth/secrets-rhdh-secrets.yaml"
fi
# Update the cluster name in the secrets file.
sed -i "s/K8S_CLUSTER_NAME:.*/K8S_CLUSTER_NAME: ${ENCODED_CLUSTER_NAME}/g" "$dir/auth/secrets-rhdh-secrets.yaml"

# Update the OCM cluster token in the secrets file.
set +x
token=$(oc get secret "${secret_name}" -n "${project}" -o=jsonpath='{.data.token}')
sed -i "s/OCM_CLUSTER_TOKEN: .*/OCM_CLUSTER_TOKEN: ${token}/" "$dir/auth/secrets-rhdh-secrets.yaml"
set -x

# Apply the appropriate ConfigMap based on the project name.
if [[ "${project}" == *rbac* ]]; then
oc apply -f "$dir/resources/config_map/configmap-app-config-rhdh-rbac.yaml" --namespace="${project}"
else
Expand All @@ -254,37 +283,42 @@ run_tests() {
yarn install
yarn playwright install

Xvfb :99 &
export DISPLAY=:99
Xvfb :99 & # Start a virtual framebuffer for GUI applications.
export DISPLAY=:99 # Set the display environment variable.

(
set -e
echo "Using PR container image: ${TAG_NAME}"
yarn "$project"
) 2>&1 | tee "/tmp/${LOGFILE}"
yarn "$project" # Run the tests for the specified Playwright project.
) 2>&1 | tee "/tmp/${LOGFILE}" # Log output to a file.

local RESULT=${PIPESTATUS[0]}
local RESULT=${PIPESTATUS[0]} # Capture the exit status of the tests.

pkill Xvfb
pkill Xvfb # Terminate the virtual framebuffer.

# Create directories for test results and attachments.
mkdir -p "${ARTIFACT_DIR}/${project}/test-results"
mkdir -p "${ARTIFACT_DIR}/${project}/attachments/screenshots"
# Copy test results to the artifact directory.
cp -a /tmp/backstage-showcase/e2e-tests/test-results/* "${ARTIFACT_DIR}/${project}/test-results"
cp -a /tmp/backstage-showcase/e2e-tests/${JUNIT_RESULTS} "${ARTIFACT_DIR}/${project}/${JUNIT_RESULTS}"

# Copy screenshots if they exist.
if [ -d "/tmp/backstage-showcase/e2e-tests/screenshots" ]; then
cp -a /tmp/backstage-showcase/e2e-tests/screenshots/* "${ARTIFACT_DIR}/${project}/attachments/screenshots/"
fi

# Convert ANSI logs to HTML.
ansi2html <"/tmp/${LOGFILE}" >"/tmp/${LOGFILE}.html"
cp -a "/tmp/${LOGFILE}.html" "${ARTIFACT_DIR}/${project}"
# Copy the Playwright report.
cp -a /tmp/backstage-showcase/e2e-tests/playwright-report/* "${ARTIFACT_DIR}/${project}"

droute_send "${release_name}" "${project}"
droute_send "${release_name}" "${project}" # Send test results through Data Router to ReportPortal.

echo "${project} RESULT: ${RESULT}"
if [ "${RESULT}" -ne 0 ]; then
OVERALL_RESULT=1
OVERALL_RESULT=1 # Set the overall result to failure if tests failed.
fi
}

Expand All @@ -310,7 +344,7 @@ check_backstage_running() {
export BASE_URL="${url}"
echo "######## BASE URL ########"
echo "${BASE_URL}"
return 0
return 0 # Success
else
echo "Attempt ${i} of ${max_attempts}: Backstage not yet available (HTTP Status: ${http_status})"
sleep "${wait_seconds}"
Expand All @@ -319,7 +353,7 @@ check_backstage_running() {

echo "Failed to reach Backstage at ${BASE_URL} after ${max_attempts} attempts." | tee -a "/tmp/${LOGFILE}"
cp -a "/tmp/${LOGFILE}" "${ARTIFACT_DIR}/${namespace}/"
return 1
return 1 # Failure
}

install_tekton_pipelines() {
Expand All @@ -333,6 +367,7 @@ install_tekton_pipelines() {
fi
}

# Function to initiate deployments on OpenShift clusters.
initiate_deployments() {

#install_pipelines_operator
Expand All @@ -343,14 +378,16 @@ initiate_deployments() {
configure_namespace "${NAME_SPACE}"
uninstall_helmchart "${NAME_SPACE}" "${RELEASE_NAME}"

# Deploy redis cache db.
# Deploy Redis cache database.
oc apply -f "$DIR/resources/redis-cache/redis-deployment.yaml" --namespace="${NAME_SPACE}"

cd "${DIR}"
apply_yaml_files "${DIR}" "${NAME_SPACE}"
echo "Deploying image from repository: ${QUAY_REPO}, TAG_NAME: ${TAG_NAME}, in NAME_SPACE: ${NAME_SPACE}"
# Install or upgrade the Helm chart.
helm upgrade -i "${RELEASE_NAME}" -n "${NAME_SPACE}" "${HELM_REPO_NAME}/${HELM_IMAGE_NAME}" --version "${CHART_VERSION}" -f "${DIR}/value_files/${HELM_CHART_VALUE_FILE_NAME}" --set global.clusterRouterBase="${K8S_CLUSTER_ROUTER_BASE}" --set upstream.backstage.image.repository="${QUAY_REPO}" --set upstream.backstage.image.tag="${TAG_NAME}"

# Configure namespaces for PostgreSQL DB and RBAC.
configure_namespace "${NAME_SPACE_POSTGRES_DB}"
configure_namespace "${NAME_SPACE_RBAC}"
configure_external_postgres_db "${NAME_SPACE_RBAC}"
Expand All @@ -361,6 +398,7 @@ initiate_deployments() {
helm upgrade -i "${RELEASE_NAME_RBAC}" -n "${NAME_SPACE_RBAC}" "${HELM_REPO_NAME}/${HELM_IMAGE_NAME}" --version "${CHART_VERSION}" -f "${DIR}/value_files/${HELM_CHART_RBAC_VALUE_FILE_NAME}" --set global.clusterRouterBase="${K8S_CLUSTER_ROUTER_BASE}" --set upstream.backstage.image.repository="${QUAY_REPO}" --set upstream.backstage.image.tag="${TAG_NAME}"
}

# Function to initiate deployments on AKS clusters.
initiate_aks_deployment() {
add_helm_repos
install_helm
Expand All @@ -371,11 +409,14 @@ initiate_aks_deployment() {
uninstall_helmchart "${NAME_SPACE_AKS}" "${RELEASE_NAME}"
cd "${DIR}"
apply_yaml_files "${DIR}" "${NAME_SPACE_AKS}"
# Merge Helm value files specific to AKS.
yq_merge_value_files "${DIR}/value_files/${HELM_CHART_VALUE_FILE_NAME}" "${DIR}/value_files/${HELM_CHART_AKS_DIFF_VALUE_FILE_NAME}" "/tmp/${HELM_CHART_AKS_MERGED_VALUE_FILE_NAME}"
echo "Deploying image from repository: ${QUAY_REPO}, TAG_NAME: ${TAG_NAME}, in NAME_SPACE: ${NAME_SPACE_AKS}"
# Install or upgrade the Helm chart on AKS.
helm upgrade -i "${RELEASE_NAME}" -n "${NAME_SPACE_AKS}" "${HELM_REPO_NAME}/${HELM_IMAGE_NAME}" --version "${CHART_VERSION}" -f "/tmp/${HELM_CHART_AKS_MERGED_VALUE_FILE_NAME}" --set global.host="${K8S_CLUSTER_ROUTER_BASE}" --set upstream.backstage.image.repository="${QUAY_REPO}" --set upstream.backstage.image.tag="${TAG_NAME}"
}

# Function to initiate RBAC deployments on AKS clusters.
initiate_rbac_aks_deployment() {
add_helm_repos
install_helm
Expand Down Expand Up @@ -410,13 +451,13 @@ check_and_test() {
local namespace=$2
if check_backstage_running "${release_name}" "${namespace}"; then
echo "Display pods for verification..."
oc get pods -n "${namespace}"
run_tests "${release_name}" "${namespace}"
oc get pods -n "${namespace}" # List all pods in the namespace.
run_tests "${release_name}" "${namespace}" # Run the E2E tests.
else
echo "Backstage is not running. Exiting..."
OVERALL_RESULT=1
OVERALL_RESULT=1 # Set the overall result to failure.
fi
save_all_pod_logs $namespace
save_all_pod_logs $namespace # Save logs from all pods.
}

# Function to remove finalizers from specific resources in a namespace that are blocking deletion.
Expand Down Expand Up @@ -453,11 +494,13 @@ main() {

install_oc
if [[ "$JOB_NAME" == *aks* ]]; then
# Get AKS cluster credentials.
az aks get-credentials --name="${AKS_NIGHTLY_CLUSTER_NAME}" --resource-group="${AKS_NIGHTLY_CLUSTER_RESOURCEGROUP}" --overwrite-existing
else
# Log in to the OpenShift cluster.
oc login --token="${K8S_CLUSTER_TOKEN}" --server="${K8S_CLUSTER_URL}"
fi
echo "OCP version: $(oc version)"
echo "OCP version: $(oc version)" # Display the OpenShift version.

set_namespace

Expand All @@ -469,25 +512,29 @@ main() {

API_SERVER_URL=$(oc whoami --show-server)
if [[ "$JOB_NAME" == *aks* ]]; then
# Get the router base for AKS.
K8S_CLUSTER_ROUTER_BASE=$(kubectl get svc nginx --namespace app-routing-system -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
else
# Get the router base for OpenShift.
K8S_CLUSTER_ROUTER_BASE=$(oc get route console -n openshift-console -o=jsonpath='{.spec.host}' | sed 's/^[^.]*\.//')
fi

echo "K8S_CLUSTER_ROUTER_BASE : $K8S_CLUSTER_ROUTER_BASE"

ENCODED_API_SERVER_URL=$(echo "${API_SERVER_URL}" | base64)
ENCODED_CLUSTER_NAME=$(echo "my-cluster" | base64)
ENCODED_API_SERVER_URL=$(echo "${API_SERVER_URL}" | base64) # Base64 encode the API server URL.
ENCODED_CLUSTER_NAME=$(echo "my-cluster" | base64) # Base64 encode the cluster name.


if [[ "$JOB_NAME" == *aks* ]]; then
# Initiate deployments on AKS.
initiate_aks_deployment
check_and_test "${RELEASE_NAME}" "${NAME_SPACE_AKS}"
delete_namespace "${NAME_SPACE_AKS}"
initiate_rbac_aks_deployment
check_and_test "${RELEASE_NAME_RBAC}" "${NAME_SPACE_RBAC_AKS}"
delete_namespace "${NAME_SPACE_RBAC_AKS}"
else
# Initiate deployments on OpenShift.
initiate_deployments
check_and_test "${RELEASE_NAME}" "${NAME_SPACE}"
check_and_test "${RELEASE_NAME_RBAC}" "${NAME_SPACE_RBAC}"
Expand All @@ -501,4 +548,4 @@ main() {
exit "${OVERALL_RESULT}"
}

main
main # Start the script execution by calling the main function.
Loading
Loading