From 4cf4e580c0529a30a26ea217eaeff1e638db6514 Mon Sep 17 00:00:00 2001 From: soloio-bot <> Date: Fri, 3 Jan 2025 17:23:38 +0000 Subject: [PATCH 01/34] Update from https://github.com/solo-io/procgen/commit/b0fcf769a8ed8b42653f7aeb190716fb7add3277 --- gloo-mesh/core/2-5/default/README.md | 42 +- .../deploy-kind-clusters/deploy-cluster1.sh | 289 +++ .../deploy-kind-clusters/deploy-cluster2.sh | 289 +++ .../steps/deploy-kind-clusters/deploy-mgmt.sh | 289 +++ gloo-mesh/core/2-5/default/package.json | 44 + gloo-mesh/core/2-5/default/run.sh | 1659 +++++++++++++++++ .../scripts/configure-domain-rewrite.sh | 2 +- .../2-5/default/scripts/register-domain.sh | 11 +- gloo-mesh/core/2-5/default/tests/chai-exec.js | 7 +- gloo-mesh/core/2-5/default/tests/chai-http.js | 27 +- .../tests/proxies-changes.test.js.liquid | 58 + 11 files changed, 2679 insertions(+), 38 deletions(-) create mode 100644 gloo-mesh/core/2-5/default/data/steps/deploy-kind-clusters/deploy-cluster1.sh create mode 100644 gloo-mesh/core/2-5/default/data/steps/deploy-kind-clusters/deploy-cluster2.sh create mode 100644 gloo-mesh/core/2-5/default/data/steps/deploy-kind-clusters/deploy-mgmt.sh create mode 100644 gloo-mesh/core/2-5/default/package.json create mode 100644 gloo-mesh/core/2-5/default/run.sh create mode 100644 gloo-mesh/core/2-5/default/tests/proxies-changes.test.js.liquid diff --git a/gloo-mesh/core/2-5/default/README.md b/gloo-mesh/core/2-5/default/README.md index adf36f3a52..252b8370a8 100644 --- a/gloo-mesh/core/2-5/default/README.md +++ b/gloo-mesh/core/2-5/default/README.md @@ -15,7 +15,7 @@ source ./scripts/assert.sh ## Table of Contents * [Introduction](#introduction) -* [Lab 1 - Deploy KinD clusters](#lab-1---deploy-kind-clusters-) +* [Lab 1 - Deploy KinD Cluster(s)](#lab-1---deploy-kind-cluster(s)-) * [Lab 2 - Deploy and register Gloo Mesh](#lab-2---deploy-and-register-gloo-mesh-) * [Lab 3 - Deploy Istio using Gloo Mesh Lifecycle Manager](#lab-3---deploy-istio-using-gloo-mesh-lifecycle-manager-) * [Lab 4 - Deploy the Bookinfo demo app](#lab-4---deploy-the-bookinfo-demo-app-) @@ -68,7 +68,7 @@ You can find more information about Gloo Mesh Core in the official documentation -## Lab 1 - Deploy KinD clusters +## Lab 1 - Deploy KinD Cluster(s) Clone this repository and go to the directory where this `README.md` file is. @@ -81,14 +81,13 @@ export CLUSTER1=cluster1 export CLUSTER2=cluster2 ``` -Run the following commands to deploy three Kubernetes clusters using [Kind](https://kind.sigs.k8s.io/): +Deploy the KinD clusters: ```bash -./scripts/deploy-aws.sh 1 mgmt -./scripts/deploy-aws.sh 2 cluster1 us-west us-west-1 -./scripts/deploy-aws.sh 3 cluster2 us-west us-west-2 +bash ./data/steps/deploy-kind-clusters/deploy-mgmt.sh +bash ./data/steps/deploy-kind-clusters/deploy-cluster1.sh +bash ./data/steps/deploy-kind-clusters/deploy-cluster2.sh ``` - Then run the following commands to wait for all the Pods to be ready: ```bash @@ -99,27 +98,8 @@ Then run the following commands to wait for all the Pods to be ready: **Note:** If you run the `check.sh` script immediately after the `deploy.sh` script, you may see a jsonpath error. If that happens, simply wait a few seconds and try again. -Once the `check.sh` script completes, when you execute the `kubectl get pods -A` command, you should see the following: - -``` -NAMESPACE NAME READY STATUS RESTARTS AGE -kube-system calico-kube-controllers-59d85c5c84-sbk4k 1/1 Running 0 4h26m -kube-system calico-node-przxs 1/1 Running 0 4h26m -kube-system coredns-6955765f44-ln8f5 1/1 Running 0 4h26m -kube-system coredns-6955765f44-s7xxx 1/1 Running 0 4h26m -kube-system etcd-cluster1-control-plane 1/1 Running 0 4h27m -kube-system kube-apiserver-cluster1-control-plane 1/1 Running 0 4h27m -kube-system kube-controller-manager-cluster1-control-plane1/1 Running 0 4h27m -kube-system kube-proxy-ksvzw 1/1 Running 0 4h26m -kube-system kube-scheduler-cluster1-control-plane 1/1 Running 0 4h27m -local-path-storage local-path-provisioner-58f6947c7-lfmdx 1/1 Running 0 4h26m -metallb-system controller-5c9894b5cd-cn9x2 1/1 Running 0 4h26m -metallb-system speaker-d7jkp 1/1 Running 0 4h26m -``` - -**Note:** The CNI pods might be different, depending on which CNI you have deployed. - -You can see that your currently connected to this cluster by executing the `kubectl config get-contexts` command: +Once the `check.sh` script completes, execute the `kubectl get pods -A` command, and verify that all pods are in a running state. + You can see that your currently connected to this cluster by executing the `kubectl config get-contexts` command: ``` CURRENT NAME CLUSTER AUTHINFO NAMESPACE @@ -138,7 +118,8 @@ cat <<'EOF' > ./test.js const helpers = require('./tests/chai-exec'); describe("Clusters are healthy", () => { - const clusters = [process.env.MGMT, process.env.CLUSTER1, process.env.CLUSTER2]; + const clusters = ["mgmt", "cluster1", "cluster2"]; + clusters.forEach(cluster => { it(`Cluster ${cluster} is healthy`, () => helpers.k8sObjectIsPresent({ context: cluster, namespace: "default", k8sType: "service", k8sObj: "kubernetes" })); }); @@ -150,6 +131,7 @@ timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || + ## Lab 2 - Deploy and register Gloo Mesh [VIDEO LINK](https://youtu.be/djfFiepK4GY "Video Link") @@ -190,6 +172,7 @@ EOF echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/deploy-and-register-gloo-mesh/tests/environment-variables.test.js.liquid" timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } --> + Run the following commands to deploy the Gloo Mesh management plane: ```bash @@ -490,6 +473,7 @@ timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || + ## Lab 3 - Deploy Istio using Gloo Mesh Lifecycle Manager [VIDEO LINK](https://youtu.be/f76-KOEjqHs "Video Link") diff --git a/gloo-mesh/core/2-5/default/data/steps/deploy-kind-clusters/deploy-cluster1.sh b/gloo-mesh/core/2-5/default/data/steps/deploy-kind-clusters/deploy-cluster1.sh new file mode 100644 index 0000000000..31b0806b9b --- /dev/null +++ b/gloo-mesh/core/2-5/default/data/steps/deploy-kind-clusters/deploy-cluster1.sh @@ -0,0 +1,289 @@ +#!/usr/bin/env bash +set -o errexit + +number="2" +name="cluster1" +region="" +zone="" +twodigits=$(printf "%02d\n" $number) + +kindest_node=${KINDEST_NODE} + +if [ -z "$kindest_node" ]; then + export k8s_version="1.28.0" + + [[ ${k8s_version::1} != 'v' ]] && export k8s_version=v${k8s_version} + kindest_node_ver=$(curl --silent "https://registry.hub.docker.com/v2/repositories/kindest/node/tags?page_size=100" \ + | jq -r '.results | .[] | select(.name==env.k8s_version) | .name+"@"+.digest') + + if [ -z "$kindest_node_ver" ]; then + echo "Incorrect Kubernetes version provided: ${k8s_version}." + exit 1 + fi + kindest_node=kindest/node:${kindest_node_ver} +fi +echo "Using KinD image: ${kindest_node}" + +if [ -z "$3" ]; then + case $name in + cluster1) + region=us-west-1 + ;; + cluster2) + region=us-west-2 + ;; + *) + region=us-east-1 + ;; + esac +fi + +if [ -z "$4" ]; then + case $name in + cluster1) + zone=us-west-1a + ;; + cluster2) + zone=us-west-2a + ;; + *) + zone=us-east-1a + ;; + esac +fi + +if hostname -I 2>/dev/null; then + myip=$(hostname -I | awk '{ print $1 }') +else + myip=$(ipconfig getifaddr en0) +fi + +# Function to determine the next available cluster number +get_next_cluster_number() { + if ! kind get clusters 2>&1 | grep "^kind" > /dev/null; then + echo 1 + else + highest_num=$(kind get clusters | grep "^kind" | tail -1 | cut -c 5-) + echo $((highest_num + 1)) + fi +} + +if [ -f /.dockerenv ]; then +myip=$HOST_IP +container=$(docker inspect $(docker ps -q) | jq -r ".[] | select(.Config.Hostname == \"$HOSTNAME\") | .Name" | cut -d/ -f2) +docker network connect "kind" $container || true +number=$(get_next_cluster_number) +twodigits=$(printf "%02d\n" $number) +fi + +reg_name='kind-registry' +reg_port='5000' +docker start "${reg_name}" 2>/dev/null || \ +docker run -d --restart=always -p "0.0.0.0:${reg_port}:5000" --name "${reg_name}" registry:2 + +cache_port='5000' +cat > registries < ${HOME}/.${cache_name}-config.yml </dev/null || \ +docker run -d --restart=always ${DEPLOY_EXTRA_PARAMS} -v ${HOME}/.${cache_name}-config.yml:/etc/docker/registry/config.yml --name "${cache_name}" registry:2 +done +mkdir -p /tmp/oidc + +cat <<'EOF' >/tmp/oidc/sa-signer-pkcs8.pub +-----BEGIN PUBLIC KEY----- +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA53YiBcrn7+ZK0Vb4odeA +1riYdvEb8To4H6/HtF+OKzuCIXFQ+bRy7yMrDGITYpfYPrTZOgfdeTLZqOiAj+cL +395nvxdly83SUrdh7ItfOPRluuuiPHnFn111wpyjBw5nut4Kx+M5MksNfA1hU0Zw +zIM9OviX8iEF8xHWUtz4BAMDG8N6+zpLo0pAzaei5hKuLZ9dZOzHBC8VOW82cQMm +5X5uOKsCHMtNSjqYUNB1DxN6xxM+odGWT/6xthPGk6YCxmO28YHPFZfiS2eAIpD8 +2p/16KQKU6TkZSrldkYxiHIPhu+5f9faZJG7dB9pLN1SfdTBio4PK5Mz9muLUCv9 +ywIDAQAB +-----END PUBLIC KEY----- +EOF + +cat <<'EOF' >/tmp/oidc/sa-signer.key +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA53YiBcrn7+ZK0Vb4odeA1riYdvEb8To4H6/HtF+OKzuCIXFQ ++bRy7yMrDGITYpfYPrTZOgfdeTLZqOiAj+cL395nvxdly83SUrdh7ItfOPRluuui +PHnFn111wpyjBw5nut4Kx+M5MksNfA1hU0ZwzIM9OviX8iEF8xHWUtz4BAMDG8N6 ++zpLo0pAzaei5hKuLZ9dZOzHBC8VOW82cQMm5X5uOKsCHMtNSjqYUNB1DxN6xxM+ +odGWT/6xthPGk6YCxmO28YHPFZfiS2eAIpD82p/16KQKU6TkZSrldkYxiHIPhu+5 +f9faZJG7dB9pLN1SfdTBio4PK5Mz9muLUCv9ywIDAQABAoIBAB8tro+RMYUDRHjG +el9ypAxIeWEsQVNRQFYkW4ZUiNYSAgl3Ni0svX6xAg989peFVL+9pLVIcfDthJxY +FVlNCjBxyQ/YmwHFC9vQkARJEd6eLUXsj8INtS0ubbp1VxCQRDDL0C/0z7OSoJJh +SwboqjEiTJExA2a+RArmEDTBRzdi3t+kT8G23JcqOivrITt17K6bQYyJXw7/vUdc +r/R+hfd5TqVq92VddzDT7RNJAxsbPPXjGnESlq1GALBDs+uBGYsP0fiEJb2nicSv +z9fBnBeERhut1gcE0C0iLRQZb+3r8TitBtxrZv+0BHgXrkKtXDwWTqGEKOwC4dBn +7nxkH2ECgYEA6+/DOTABGYOWOQftFkJMjcugzDrjoGpuXuVOTb65T+3FHAzU93zy +3bt3wQxrlugluyy9Sc/PL3ck2LgUsPHZ+s7zsdGvvGALBD6bOSSKATz9JgjwifO8 +PgqUz1kXRwez2CtKLOOCFFtcIzEdWIzsa1ubNqLzgN7rD+XBkUc2uEcCgYEA+yTy +72EDMQVoIZOygytHsDNdy0iS2RsBbdurT27wkYuFpFUVWdbNSL+8haE+wJHseHcw +BD4WIMpU+hnS4p4OO8+6V7PiXOS5E/se91EJigZAoixgDUiC8ihojWgK9PYEavUo +hULWbayO59SxYWeUI4Ze0GP8Jw8vdB86ib4ulF0CgYEAgyzRuLjk05+iZODwQyDn +WSquov3W0rh51s7cw0LX2wWSQm8r9NGGYhs5kJ5sLwGxAKj2MNSWF4jBdrCZ6Gr+ +y4BGY0X209/+IAUC3jlfdSLIiF4OBlT6AvB1HfclhvtUVUp0OhLfnpvQ1UwYScRI +KcRLvovIoIzP2g3emfwjAz8CgYEAxUHhOhm1mwRHJNBQTuxok0HVMrze8n1eov39 +0RcvBvJSVp+pdHXdqX1HwqHCmxhCZuAeq8ZkNP8WvZYY6HwCbAIdt5MHgbT4lXQR +f2l8F5gPnhFCpExG5ZLNg/urV3oAQE4stHap21zEpdyOMhZb6Yc5424U+EzaFdgN +b3EcPtUCgYAkKvUlSnBbgiJz1iaN6fuTqH0efavuFGMhjNmG7GtpNXdgyl1OWIuc +Yu+tZtHXtKYf3B99GwPrFzw/7yfDwae5YeWmi2/pFTH96wv3brJBqkAWY8G5Rsmd +qF50p34vIFqUBniNRwSArx8t2dq/CuAMgLAtSjh70Q6ZAnCF85PD8Q== +-----END RSA PRIVATE KEY----- +EOF + +echo Contents of kind${number}.yaml +cat << EOF | tee kind${number}.yaml +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: +- role: control-plane + image: ${kindest_node} + extraPortMappings: + - containerPort: 6443 + hostPort: 70${twodigits} + extraMounts: + - containerPath: /etc/kubernetes/oidc + hostPath: /tmp/oidc + labels: + ingress-ready: true + topology.kubernetes.io/region: ${region} + topology.kubernetes.io/zone: ${zone} +networking: + serviceSubnet: "10.$(echo $twodigits | sed 's/^0*//').0.0/16" + podSubnet: "10.1${twodigits}.0.0/16" +kubeadmConfigPatches: +- | + kind: ClusterConfiguration + apiServer: + extraArgs: + service-account-key-file: /etc/kubernetes/pki/sa.pub + service-account-key-file: /etc/kubernetes/oidc/sa-signer-pkcs8.pub + service-account-signing-key-file: /etc/kubernetes/oidc/sa-signer.key + service-account-issuer: https://solo-workshop-oidc.s3.us-east-1.amazonaws.com + api-audiences: sts.amazonaws.com + extraVolumes: + - name: oidc + hostPath: /etc/kubernetes/oidc + mountPath: /etc/kubernetes/oidc + readOnly: true + metadata: + name: config +containerdConfigPatches: +- |- + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:${reg_port}"] + endpoint = ["http://${reg_name}:${reg_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] + endpoint = ["http://docker:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-docker.pkg.dev"] + endpoint = ["http://us-docker:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-central1-docker.pkg.dev"] + endpoint = ["http://us-central1-docker:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."quay.io"] + endpoint = ["http://quay:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."gcr.io"] + endpoint = ["http://gcr:${cache_port}"] +EOF +echo ----------------------------------------------------- + +kind create cluster --name kind${number} --config kind${number}.yaml +ipkind=$(docker inspect kind${number}-control-plane | jq -r '.[0].NetworkSettings.Networks[].IPAddress') +networkkind=$(echo ${ipkind} | awk -F. '{ print $1"."$2 }') +kubectl config set-cluster kind-kind${number} --server=https://${myip}:70${twodigits} --insecure-skip-tls-verify=true + +# Preload images +cat << EOF >> images.txt +quay.io/metallb/controller:v0.13.12 +quay.io/metallb/speaker:v0.13.12 +EOF +cat images.txt | while read image; do + docker pull $image || true + kind load docker-image $image --name kind${number} || true +done + +docker network connect "kind" "${reg_name}" || true +docker network connect "kind" docker || true +docker network connect "kind" us-docker || true +docker network connect "kind" us-central1-docker || true +docker network connect "kind" quay || true +docker network connect "kind" gcr || true + +for i in 1 2 3 4 5; do kubectl --context=kind-kind${number} apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml && break || sleep 15; done +kubectl --context=kind-kind${number} create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)" +kubectl --context=kind-kind${number} -n metallb-system rollout status deploy controller || true + +cat << EOF | tee metallb${number}.yaml +apiVersion: metallb.io/v1beta1 +kind: IPAddressPool +metadata: + name: first-pool + namespace: metallb-system +spec: + addresses: + - ${networkkind}.1${twodigits}.1-${networkkind}.1${twodigits}.254 +--- +apiVersion: metallb.io/v1beta1 +kind: L2Advertisement +metadata: + name: empty + namespace: metallb-system +EOF + +printf "Create IPAddressPool in kind-kind${number}\n" +for i in {1..10}; do +kubectl --context=kind-kind${number} apply -f metallb${number}.yaml && break +sleep 2 +done + +# connect the registry to the cluster network if not already connected +printf "Renaming context kind-kind${number} to ${name}\n" +for i in {1..100}; do + (kubectl config get-contexts -oname | grep ${name}) && break + kubectl config rename-context kind-kind${number} ${name} && break + printf " $i"/100 + sleep 2 + [ $i -lt 100 ] || exit 1 +done + +# Document the local registry +# https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry +cat </dev/null; then + myip=$(hostname -I | awk '{ print $1 }') +else + myip=$(ipconfig getifaddr en0) +fi + +# Function to determine the next available cluster number +get_next_cluster_number() { + if ! kind get clusters 2>&1 | grep "^kind" > /dev/null; then + echo 1 + else + highest_num=$(kind get clusters | grep "^kind" | tail -1 | cut -c 5-) + echo $((highest_num + 1)) + fi +} + +if [ -f /.dockerenv ]; then +myip=$HOST_IP +container=$(docker inspect $(docker ps -q) | jq -r ".[] | select(.Config.Hostname == \"$HOSTNAME\") | .Name" | cut -d/ -f2) +docker network connect "kind" $container || true +number=$(get_next_cluster_number) +twodigits=$(printf "%02d\n" $number) +fi + +reg_name='kind-registry' +reg_port='5000' +docker start "${reg_name}" 2>/dev/null || \ +docker run -d --restart=always -p "0.0.0.0:${reg_port}:5000" --name "${reg_name}" registry:2 + +cache_port='5000' +cat > registries < ${HOME}/.${cache_name}-config.yml </dev/null || \ +docker run -d --restart=always ${DEPLOY_EXTRA_PARAMS} -v ${HOME}/.${cache_name}-config.yml:/etc/docker/registry/config.yml --name "${cache_name}" registry:2 +done +mkdir -p /tmp/oidc + +cat <<'EOF' >/tmp/oidc/sa-signer-pkcs8.pub +-----BEGIN PUBLIC KEY----- +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA53YiBcrn7+ZK0Vb4odeA +1riYdvEb8To4H6/HtF+OKzuCIXFQ+bRy7yMrDGITYpfYPrTZOgfdeTLZqOiAj+cL +395nvxdly83SUrdh7ItfOPRluuuiPHnFn111wpyjBw5nut4Kx+M5MksNfA1hU0Zw +zIM9OviX8iEF8xHWUtz4BAMDG8N6+zpLo0pAzaei5hKuLZ9dZOzHBC8VOW82cQMm +5X5uOKsCHMtNSjqYUNB1DxN6xxM+odGWT/6xthPGk6YCxmO28YHPFZfiS2eAIpD8 +2p/16KQKU6TkZSrldkYxiHIPhu+5f9faZJG7dB9pLN1SfdTBio4PK5Mz9muLUCv9 +ywIDAQAB +-----END PUBLIC KEY----- +EOF + +cat <<'EOF' >/tmp/oidc/sa-signer.key +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA53YiBcrn7+ZK0Vb4odeA1riYdvEb8To4H6/HtF+OKzuCIXFQ ++bRy7yMrDGITYpfYPrTZOgfdeTLZqOiAj+cL395nvxdly83SUrdh7ItfOPRluuui +PHnFn111wpyjBw5nut4Kx+M5MksNfA1hU0ZwzIM9OviX8iEF8xHWUtz4BAMDG8N6 ++zpLo0pAzaei5hKuLZ9dZOzHBC8VOW82cQMm5X5uOKsCHMtNSjqYUNB1DxN6xxM+ +odGWT/6xthPGk6YCxmO28YHPFZfiS2eAIpD82p/16KQKU6TkZSrldkYxiHIPhu+5 +f9faZJG7dB9pLN1SfdTBio4PK5Mz9muLUCv9ywIDAQABAoIBAB8tro+RMYUDRHjG +el9ypAxIeWEsQVNRQFYkW4ZUiNYSAgl3Ni0svX6xAg989peFVL+9pLVIcfDthJxY +FVlNCjBxyQ/YmwHFC9vQkARJEd6eLUXsj8INtS0ubbp1VxCQRDDL0C/0z7OSoJJh +SwboqjEiTJExA2a+RArmEDTBRzdi3t+kT8G23JcqOivrITt17K6bQYyJXw7/vUdc +r/R+hfd5TqVq92VddzDT7RNJAxsbPPXjGnESlq1GALBDs+uBGYsP0fiEJb2nicSv +z9fBnBeERhut1gcE0C0iLRQZb+3r8TitBtxrZv+0BHgXrkKtXDwWTqGEKOwC4dBn +7nxkH2ECgYEA6+/DOTABGYOWOQftFkJMjcugzDrjoGpuXuVOTb65T+3FHAzU93zy +3bt3wQxrlugluyy9Sc/PL3ck2LgUsPHZ+s7zsdGvvGALBD6bOSSKATz9JgjwifO8 +PgqUz1kXRwez2CtKLOOCFFtcIzEdWIzsa1ubNqLzgN7rD+XBkUc2uEcCgYEA+yTy +72EDMQVoIZOygytHsDNdy0iS2RsBbdurT27wkYuFpFUVWdbNSL+8haE+wJHseHcw +BD4WIMpU+hnS4p4OO8+6V7PiXOS5E/se91EJigZAoixgDUiC8ihojWgK9PYEavUo +hULWbayO59SxYWeUI4Ze0GP8Jw8vdB86ib4ulF0CgYEAgyzRuLjk05+iZODwQyDn +WSquov3W0rh51s7cw0LX2wWSQm8r9NGGYhs5kJ5sLwGxAKj2MNSWF4jBdrCZ6Gr+ +y4BGY0X209/+IAUC3jlfdSLIiF4OBlT6AvB1HfclhvtUVUp0OhLfnpvQ1UwYScRI +KcRLvovIoIzP2g3emfwjAz8CgYEAxUHhOhm1mwRHJNBQTuxok0HVMrze8n1eov39 +0RcvBvJSVp+pdHXdqX1HwqHCmxhCZuAeq8ZkNP8WvZYY6HwCbAIdt5MHgbT4lXQR +f2l8F5gPnhFCpExG5ZLNg/urV3oAQE4stHap21zEpdyOMhZb6Yc5424U+EzaFdgN +b3EcPtUCgYAkKvUlSnBbgiJz1iaN6fuTqH0efavuFGMhjNmG7GtpNXdgyl1OWIuc +Yu+tZtHXtKYf3B99GwPrFzw/7yfDwae5YeWmi2/pFTH96wv3brJBqkAWY8G5Rsmd +qF50p34vIFqUBniNRwSArx8t2dq/CuAMgLAtSjh70Q6ZAnCF85PD8Q== +-----END RSA PRIVATE KEY----- +EOF + +echo Contents of kind${number}.yaml +cat << EOF | tee kind${number}.yaml +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: +- role: control-plane + image: ${kindest_node} + extraPortMappings: + - containerPort: 6443 + hostPort: 70${twodigits} + extraMounts: + - containerPath: /etc/kubernetes/oidc + hostPath: /tmp/oidc + labels: + ingress-ready: true + topology.kubernetes.io/region: ${region} + topology.kubernetes.io/zone: ${zone} +networking: + serviceSubnet: "10.$(echo $twodigits | sed 's/^0*//').0.0/16" + podSubnet: "10.1${twodigits}.0.0/16" +kubeadmConfigPatches: +- | + kind: ClusterConfiguration + apiServer: + extraArgs: + service-account-key-file: /etc/kubernetes/pki/sa.pub + service-account-key-file: /etc/kubernetes/oidc/sa-signer-pkcs8.pub + service-account-signing-key-file: /etc/kubernetes/oidc/sa-signer.key + service-account-issuer: https://solo-workshop-oidc.s3.us-east-1.amazonaws.com + api-audiences: sts.amazonaws.com + extraVolumes: + - name: oidc + hostPath: /etc/kubernetes/oidc + mountPath: /etc/kubernetes/oidc + readOnly: true + metadata: + name: config +containerdConfigPatches: +- |- + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:${reg_port}"] + endpoint = ["http://${reg_name}:${reg_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] + endpoint = ["http://docker:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-docker.pkg.dev"] + endpoint = ["http://us-docker:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-central1-docker.pkg.dev"] + endpoint = ["http://us-central1-docker:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."quay.io"] + endpoint = ["http://quay:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."gcr.io"] + endpoint = ["http://gcr:${cache_port}"] +EOF +echo ----------------------------------------------------- + +kind create cluster --name kind${number} --config kind${number}.yaml +ipkind=$(docker inspect kind${number}-control-plane | jq -r '.[0].NetworkSettings.Networks[].IPAddress') +networkkind=$(echo ${ipkind} | awk -F. '{ print $1"."$2 }') +kubectl config set-cluster kind-kind${number} --server=https://${myip}:70${twodigits} --insecure-skip-tls-verify=true + +# Preload images +cat << EOF >> images.txt +quay.io/metallb/controller:v0.13.12 +quay.io/metallb/speaker:v0.13.12 +EOF +cat images.txt | while read image; do + docker pull $image || true + kind load docker-image $image --name kind${number} || true +done + +docker network connect "kind" "${reg_name}" || true +docker network connect "kind" docker || true +docker network connect "kind" us-docker || true +docker network connect "kind" us-central1-docker || true +docker network connect "kind" quay || true +docker network connect "kind" gcr || true + +for i in 1 2 3 4 5; do kubectl --context=kind-kind${number} apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml && break || sleep 15; done +kubectl --context=kind-kind${number} create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)" +kubectl --context=kind-kind${number} -n metallb-system rollout status deploy controller || true + +cat << EOF | tee metallb${number}.yaml +apiVersion: metallb.io/v1beta1 +kind: IPAddressPool +metadata: + name: first-pool + namespace: metallb-system +spec: + addresses: + - ${networkkind}.1${twodigits}.1-${networkkind}.1${twodigits}.254 +--- +apiVersion: metallb.io/v1beta1 +kind: L2Advertisement +metadata: + name: empty + namespace: metallb-system +EOF + +printf "Create IPAddressPool in kind-kind${number}\n" +for i in {1..10}; do +kubectl --context=kind-kind${number} apply -f metallb${number}.yaml && break +sleep 2 +done + +# connect the registry to the cluster network if not already connected +printf "Renaming context kind-kind${number} to ${name}\n" +for i in {1..100}; do + (kubectl config get-contexts -oname | grep ${name}) && break + kubectl config rename-context kind-kind${number} ${name} && break + printf " $i"/100 + sleep 2 + [ $i -lt 100 ] || exit 1 +done + +# Document the local registry +# https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry +cat </dev/null; then + myip=$(hostname -I | awk '{ print $1 }') +else + myip=$(ipconfig getifaddr en0) +fi + +# Function to determine the next available cluster number +get_next_cluster_number() { + if ! kind get clusters 2>&1 | grep "^kind" > /dev/null; then + echo 1 + else + highest_num=$(kind get clusters | grep "^kind" | tail -1 | cut -c 5-) + echo $((highest_num + 1)) + fi +} + +if [ -f /.dockerenv ]; then +myip=$HOST_IP +container=$(docker inspect $(docker ps -q) | jq -r ".[] | select(.Config.Hostname == \"$HOSTNAME\") | .Name" | cut -d/ -f2) +docker network connect "kind" $container || true +number=$(get_next_cluster_number) +twodigits=$(printf "%02d\n" $number) +fi + +reg_name='kind-registry' +reg_port='5000' +docker start "${reg_name}" 2>/dev/null || \ +docker run -d --restart=always -p "0.0.0.0:${reg_port}:5000" --name "${reg_name}" registry:2 + +cache_port='5000' +cat > registries < ${HOME}/.${cache_name}-config.yml </dev/null || \ +docker run -d --restart=always ${DEPLOY_EXTRA_PARAMS} -v ${HOME}/.${cache_name}-config.yml:/etc/docker/registry/config.yml --name "${cache_name}" registry:2 +done +mkdir -p /tmp/oidc + +cat <<'EOF' >/tmp/oidc/sa-signer-pkcs8.pub +-----BEGIN PUBLIC KEY----- +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA53YiBcrn7+ZK0Vb4odeA +1riYdvEb8To4H6/HtF+OKzuCIXFQ+bRy7yMrDGITYpfYPrTZOgfdeTLZqOiAj+cL +395nvxdly83SUrdh7ItfOPRluuuiPHnFn111wpyjBw5nut4Kx+M5MksNfA1hU0Zw +zIM9OviX8iEF8xHWUtz4BAMDG8N6+zpLo0pAzaei5hKuLZ9dZOzHBC8VOW82cQMm +5X5uOKsCHMtNSjqYUNB1DxN6xxM+odGWT/6xthPGk6YCxmO28YHPFZfiS2eAIpD8 +2p/16KQKU6TkZSrldkYxiHIPhu+5f9faZJG7dB9pLN1SfdTBio4PK5Mz9muLUCv9 +ywIDAQAB +-----END PUBLIC KEY----- +EOF + +cat <<'EOF' >/tmp/oidc/sa-signer.key +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA53YiBcrn7+ZK0Vb4odeA1riYdvEb8To4H6/HtF+OKzuCIXFQ ++bRy7yMrDGITYpfYPrTZOgfdeTLZqOiAj+cL395nvxdly83SUrdh7ItfOPRluuui +PHnFn111wpyjBw5nut4Kx+M5MksNfA1hU0ZwzIM9OviX8iEF8xHWUtz4BAMDG8N6 ++zpLo0pAzaei5hKuLZ9dZOzHBC8VOW82cQMm5X5uOKsCHMtNSjqYUNB1DxN6xxM+ +odGWT/6xthPGk6YCxmO28YHPFZfiS2eAIpD82p/16KQKU6TkZSrldkYxiHIPhu+5 +f9faZJG7dB9pLN1SfdTBio4PK5Mz9muLUCv9ywIDAQABAoIBAB8tro+RMYUDRHjG +el9ypAxIeWEsQVNRQFYkW4ZUiNYSAgl3Ni0svX6xAg989peFVL+9pLVIcfDthJxY +FVlNCjBxyQ/YmwHFC9vQkARJEd6eLUXsj8INtS0ubbp1VxCQRDDL0C/0z7OSoJJh +SwboqjEiTJExA2a+RArmEDTBRzdi3t+kT8G23JcqOivrITt17K6bQYyJXw7/vUdc +r/R+hfd5TqVq92VddzDT7RNJAxsbPPXjGnESlq1GALBDs+uBGYsP0fiEJb2nicSv +z9fBnBeERhut1gcE0C0iLRQZb+3r8TitBtxrZv+0BHgXrkKtXDwWTqGEKOwC4dBn +7nxkH2ECgYEA6+/DOTABGYOWOQftFkJMjcugzDrjoGpuXuVOTb65T+3FHAzU93zy +3bt3wQxrlugluyy9Sc/PL3ck2LgUsPHZ+s7zsdGvvGALBD6bOSSKATz9JgjwifO8 +PgqUz1kXRwez2CtKLOOCFFtcIzEdWIzsa1ubNqLzgN7rD+XBkUc2uEcCgYEA+yTy +72EDMQVoIZOygytHsDNdy0iS2RsBbdurT27wkYuFpFUVWdbNSL+8haE+wJHseHcw +BD4WIMpU+hnS4p4OO8+6V7PiXOS5E/se91EJigZAoixgDUiC8ihojWgK9PYEavUo +hULWbayO59SxYWeUI4Ze0GP8Jw8vdB86ib4ulF0CgYEAgyzRuLjk05+iZODwQyDn +WSquov3W0rh51s7cw0LX2wWSQm8r9NGGYhs5kJ5sLwGxAKj2MNSWF4jBdrCZ6Gr+ +y4BGY0X209/+IAUC3jlfdSLIiF4OBlT6AvB1HfclhvtUVUp0OhLfnpvQ1UwYScRI +KcRLvovIoIzP2g3emfwjAz8CgYEAxUHhOhm1mwRHJNBQTuxok0HVMrze8n1eov39 +0RcvBvJSVp+pdHXdqX1HwqHCmxhCZuAeq8ZkNP8WvZYY6HwCbAIdt5MHgbT4lXQR +f2l8F5gPnhFCpExG5ZLNg/urV3oAQE4stHap21zEpdyOMhZb6Yc5424U+EzaFdgN +b3EcPtUCgYAkKvUlSnBbgiJz1iaN6fuTqH0efavuFGMhjNmG7GtpNXdgyl1OWIuc +Yu+tZtHXtKYf3B99GwPrFzw/7yfDwae5YeWmi2/pFTH96wv3brJBqkAWY8G5Rsmd +qF50p34vIFqUBniNRwSArx8t2dq/CuAMgLAtSjh70Q6ZAnCF85PD8Q== +-----END RSA PRIVATE KEY----- +EOF + +echo Contents of kind${number}.yaml +cat << EOF | tee kind${number}.yaml +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: +- role: control-plane + image: ${kindest_node} + extraPortMappings: + - containerPort: 6443 + hostPort: 70${twodigits} + extraMounts: + - containerPath: /etc/kubernetes/oidc + hostPath: /tmp/oidc + labels: + ingress-ready: true + topology.kubernetes.io/region: ${region} + topology.kubernetes.io/zone: ${zone} +networking: + serviceSubnet: "10.$(echo $twodigits | sed 's/^0*//').0.0/16" + podSubnet: "10.1${twodigits}.0.0/16" +kubeadmConfigPatches: +- | + kind: ClusterConfiguration + apiServer: + extraArgs: + service-account-key-file: /etc/kubernetes/pki/sa.pub + service-account-key-file: /etc/kubernetes/oidc/sa-signer-pkcs8.pub + service-account-signing-key-file: /etc/kubernetes/oidc/sa-signer.key + service-account-issuer: https://solo-workshop-oidc.s3.us-east-1.amazonaws.com + api-audiences: sts.amazonaws.com + extraVolumes: + - name: oidc + hostPath: /etc/kubernetes/oidc + mountPath: /etc/kubernetes/oidc + readOnly: true + metadata: + name: config +containerdConfigPatches: +- |- + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:${reg_port}"] + endpoint = ["http://${reg_name}:${reg_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] + endpoint = ["http://docker:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-docker.pkg.dev"] + endpoint = ["http://us-docker:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-central1-docker.pkg.dev"] + endpoint = ["http://us-central1-docker:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."quay.io"] + endpoint = ["http://quay:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."gcr.io"] + endpoint = ["http://gcr:${cache_port}"] +EOF +echo ----------------------------------------------------- + +kind create cluster --name kind${number} --config kind${number}.yaml +ipkind=$(docker inspect kind${number}-control-plane | jq -r '.[0].NetworkSettings.Networks[].IPAddress') +networkkind=$(echo ${ipkind} | awk -F. '{ print $1"."$2 }') +kubectl config set-cluster kind-kind${number} --server=https://${myip}:70${twodigits} --insecure-skip-tls-verify=true + +# Preload images +cat << EOF >> images.txt +quay.io/metallb/controller:v0.13.12 +quay.io/metallb/speaker:v0.13.12 +EOF +cat images.txt | while read image; do + docker pull $image || true + kind load docker-image $image --name kind${number} || true +done + +docker network connect "kind" "${reg_name}" || true +docker network connect "kind" docker || true +docker network connect "kind" us-docker || true +docker network connect "kind" us-central1-docker || true +docker network connect "kind" quay || true +docker network connect "kind" gcr || true + +for i in 1 2 3 4 5; do kubectl --context=kind-kind${number} apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml && break || sleep 15; done +kubectl --context=kind-kind${number} create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)" +kubectl --context=kind-kind${number} -n metallb-system rollout status deploy controller || true + +cat << EOF | tee metallb${number}.yaml +apiVersion: metallb.io/v1beta1 +kind: IPAddressPool +metadata: + name: first-pool + namespace: metallb-system +spec: + addresses: + - ${networkkind}.1${twodigits}.1-${networkkind}.1${twodigits}.254 +--- +apiVersion: metallb.io/v1beta1 +kind: L2Advertisement +metadata: + name: empty + namespace: metallb-system +EOF + +printf "Create IPAddressPool in kind-kind${number}\n" +for i in {1..10}; do +kubectl --context=kind-kind${number} apply -f metallb${number}.yaml && break +sleep 2 +done + +# connect the registry to the cluster network if not already connected +printf "Renaming context kind-kind${number} to ${name}\n" +for i in {1..100}; do + (kubectl config get-contexts -oname | grep ${name}) && break + kubectl config rename-context kind-kind${number} ${name} && break + printf " $i"/100 + sleep 2 + [ $i -lt 100 ] || exit 1 +done + +# Document the local registry +# https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry +cat </dev/null || true +source ./scripts/assert.sh +export MGMT=mgmt +export CLUSTER1=cluster1 +export CLUSTER2=cluster2 +bash ./data/steps/deploy-kind-clusters/deploy-mgmt.sh +bash ./data/steps/deploy-kind-clusters/deploy-cluster1.sh +bash ./data/steps/deploy-kind-clusters/deploy-cluster2.sh +./scripts/check.sh mgmt +./scripts/check.sh cluster1 +./scripts/check.sh cluster2 +kubectl config use-context ${MGMT} +cat <<'EOF' > ./test.js +const helpers = require('./tests/chai-exec'); + +describe("Clusters are healthy", () => { + const clusters = ["mgmt", "cluster1", "cluster2"]; + + clusters.forEach(cluster => { + it(`Cluster ${cluster} is healthy`, () => helpers.k8sObjectIsPresent({ context: cluster, namespace: "default", k8sType: "service", k8sObj: "kubernetes" })); + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/deploy-kind-clusters/tests/cluster-healthy.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +export GLOO_MESH_VERSION=v2.5.12 +curl -sL https://run.solo.io/meshctl/install | sh - +export PATH=$HOME/.gloo-mesh/bin:$PATH +cat <<'EOF' > ./test.js +var chai = require('chai'); +var expect = chai.expect; + +describe("Required environment variables should contain value", () => { + afterEach(function(done){ + if(this.currentTest.currentRetry() > 0){ + process.stdout.write("."); + setTimeout(done, 1000); + } else { + done(); + } + }); + + it("Context environment variables should not be empty", () => { + expect(process.env.MGMT).not.to.be.empty + expect(process.env.CLUSTER1).not.to.be.empty + expect(process.env.CLUSTER2).not.to.be.empty + }); + + it("Gloo Mesh licence environment variables should not be empty", () => { + expect(process.env.GLOO_MESH_LICENSE_KEY).not.to.be.empty + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/deploy-and-register-gloo-mesh/tests/environment-variables.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl --context ${MGMT} create ns gloo-mesh + +helm upgrade --install gloo-platform-crds gloo-platform-crds \ + --repo https://storage.googleapis.com/gloo-platform/helm-charts \ + --namespace gloo-mesh \ + --kube-context ${MGMT} \ + --set featureGates.insightsConfiguration=true \ + --version 2.5.12 + +helm upgrade --install gloo-platform gloo-platform \ + --repo https://storage.googleapis.com/gloo-platform/helm-charts \ + --namespace gloo-mesh \ + --kube-context ${MGMT} \ + --version 2.5.12 \ + -f -< ./test.js + +const helpers = require('./tests/chai-exec'); + +describe("MGMT server is healthy", () => { + let cluster = process.env.MGMT; + let deployments = ["gloo-mesh-mgmt-server","gloo-mesh-redis","gloo-telemetry-gateway","prometheus-server"]; + deployments.forEach(deploy => { + it(deploy + ' pods are ready in ' + cluster, () => helpers.checkDeployment({ context: cluster, namespace: "gloo-mesh", k8sObj: deploy })); + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/deploy-and-register-gloo-mesh/tests/check-deployment.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +cat <<'EOF' > ./test.js +const chaiExec = require("@jsdevtools/chai-exec"); +var chai = require('chai'); +var expect = chai.expect; +chai.use(chaiExec); + +afterEach(function (done) { + if (this.currentTest.currentRetry() > 0) { + process.stdout.write("."); + setTimeout(done, 1000); + } else { + done(); + } +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/deploy-and-register-gloo-mesh/tests/get-gloo-mesh-mgmt-server-ip.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +export ENDPOINT_GLOO_MESH=$(kubectl --context ${MGMT} -n gloo-mesh get svc gloo-mesh-mgmt-server -o jsonpath='{.status.loadBalancer.ingress[0].*}'):9900 +export HOST_GLOO_MESH=$(echo ${ENDPOINT_GLOO_MESH%:*}) +export ENDPOINT_TELEMETRY_GATEWAY=$(kubectl --context ${MGMT} -n gloo-mesh get svc gloo-telemetry-gateway -o jsonpath='{.status.loadBalancer.ingress[0].*}'):4317 +export ENDPOINT_GLOO_MESH_UI=$(kubectl --context ${MGMT} -n gloo-mesh get svc gloo-mesh-ui -o jsonpath='{.status.loadBalancer.ingress[0].*}'):8090 +cat <<'EOF' > ./test.js +const dns = require('dns'); +const chaiHttp = require("chai-http"); +const chai = require("chai"); +const expect = chai.expect; +chai.use(chaiHttp); +const { waitOnFailedTest } = require('./tests/utils'); + +afterEach(function(done) { waitOnFailedTest(done, this.currentTest.currentRetry())}); + +describe("Address '" + process.env.HOST_GLOO_MESH + "' can be resolved in DNS", () => { + it(process.env.HOST_GLOO_MESH + ' can be resolved', (done) => { + return dns.lookup(process.env.HOST_GLOO_MESH, (err, address, family) => { + expect(address).to.be.an.ip; + done(); + }); + }); +}); +EOF +echo "executing test ./gloo-mesh-2-0/tests/can-resolve.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl apply --context ${MGMT} -f - < ca.crt +kubectl create secret generic relay-root-tls-secret -n gloo-mesh --context ${CLUSTER1} --from-file ca.crt=ca.crt +rm ca.crt + +kubectl get secret relay-identity-token-secret -n gloo-mesh --context ${MGMT} -o jsonpath='{.data.token}' | base64 -d > token +kubectl create secret generic relay-identity-token-secret -n gloo-mesh --context ${CLUSTER1} --from-file token=token +rm token + +helm upgrade --install gloo-platform-crds gloo-platform-crds \ + --repo https://storage.googleapis.com/gloo-platform/helm-charts \ + --namespace gloo-mesh \ + --kube-context ${CLUSTER1} \ + --version 2.5.12 + +helm upgrade --install gloo-platform gloo-platform \ + --repo https://storage.googleapis.com/gloo-platform/helm-charts \ + --namespace gloo-mesh \ + --kube-context ${CLUSTER1} \ + --version 2.5.12 \ + -f -< ca.crt +kubectl create secret generic relay-root-tls-secret -n gloo-mesh --context ${CLUSTER2} --from-file ca.crt=ca.crt +rm ca.crt + +kubectl get secret relay-identity-token-secret -n gloo-mesh --context ${MGMT} -o jsonpath='{.data.token}' | base64 -d > token +kubectl create secret generic relay-identity-token-secret -n gloo-mesh --context ${CLUSTER2} --from-file token=token +rm token + +helm upgrade --install gloo-platform-crds gloo-platform-crds \ + --repo https://storage.googleapis.com/gloo-platform/helm-charts \ + --namespace gloo-mesh \ + --kube-context ${CLUSTER2} \ + --version 2.5.12 + +helm upgrade --install gloo-platform gloo-platform \ + --repo https://storage.googleapis.com/gloo-platform/helm-charts \ + --namespace gloo-mesh \ + --kube-context ${CLUSTER2} \ + --version 2.5.12 \ + -f -< ./test.js +var chai = require('chai'); +var expect = chai.expect; +const helpers = require('./tests/chai-exec'); +describe("Cluster registration", () => { + it("cluster1 is registered", () => { + podName = helpers.getOutputForCommand({ command: "kubectl -n gloo-mesh get pods -l app=gloo-mesh-mgmt-server -o jsonpath='{.items[0].metadata.name}' --context " + process.env.MGMT }).replaceAll("'", ""); + command = helpers.getOutputForCommand({ command: "kubectl --context " + process.env.MGMT + " -n gloo-mesh debug -q -i " + podName + " --image=curlimages/curl -- curl -s http://localhost:9091/metrics" }).replaceAll("'", ""); + expect(command).to.contain("cluster1"); + }); + it("cluster2 is registered", () => { + podName = helpers.getOutputForCommand({ command: "kubectl -n gloo-mesh get pods -l app=gloo-mesh-mgmt-server -o jsonpath='{.items[0].metadata.name}' --context " + process.env.MGMT }).replaceAll("'", ""); + command = helpers.getOutputForCommand({ command: "kubectl --context " + process.env.MGMT + " -n gloo-mesh debug -q -i " + podName + " --image=curlimages/curl -- curl -s http://localhost:9091/metrics" }).replaceAll("'", ""); + expect(command).to.contain("cluster2"); + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/deploy-and-register-gloo-mesh/tests/cluster-registration.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +curl -L https://istio.io/downloadIstio | sh - + +if [ -d "istio-"*/ ]; then + cd istio-*/ + export PATH=$PWD/bin:$PATH + cd .. +fi +cat <<'EOF' > ./test.js +const chaiExec = require("@jsdevtools/chai-exec"); +var chai = require('chai'); +var expect = chai.expect; +chai.use(chaiExec); + +afterEach(function (done) { + if (this.currentTest.currentRetry() > 0) { + process.stdout.write("."); + setTimeout(done, 1000); + } else { + done(); + } +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/istio-lifecycle-manager-install/tests/istio-version.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl --context ${CLUSTER1} create ns istio-gateways + +kubectl apply --context ${CLUSTER1} -f - < ./test.js + +const helpers = require('./tests/chai-exec'); + +const chaiExec = require("@jsdevtools/chai-exec"); +const helpersHttp = require('./tests/chai-http'); +const chai = require("chai"); +const expect = chai.expect; + +afterEach(function (done) { + if (this.currentTest.currentRetry() > 0) { + process.stdout.write("."); + setTimeout(done, 1000); + } else { + done(); + } +}); + +describe("Checking Istio installation", function() { + it('istiod pods are ready in cluster ' + process.env.CLUSTER1, () => helpers.checkDeploymentsWithLabels({ context: process.env.CLUSTER1, namespace: "istio-system", labels: "app=istiod", instances: 1 })); + it('gateway pods are ready in cluster ' + process.env.CLUSTER1, () => helpers.checkDeploymentsWithLabels({ context: process.env.CLUSTER1, namespace: "istio-gateways", labels: "app=istio-ingressgateway", instances: 2 })); + it('istiod pods are ready in cluster ' + process.env.CLUSTER2, () => helpers.checkDeploymentsWithLabels({ context: process.env.CLUSTER2, namespace: "istio-system", labels: "app=istiod", instances: 1 })); + it('gateway pods are ready in cluster ' + process.env.CLUSTER2, () => helpers.checkDeploymentsWithLabels({ context: process.env.CLUSTER2, namespace: "istio-gateways", labels: "app=istio-ingressgateway", instances: 2 })); + it("Gateways have an ip attached in cluster " + process.env.CLUSTER1, () => { + let cli = chaiExec("kubectl --context " + process.env.CLUSTER1 + " -n istio-gateways get svc -l app=istio-ingressgateway -o jsonpath='{.items}'"); + cli.stderr.should.be.empty; + let deployments = JSON.parse(cli.stdout.slice(1,-1)); + expect(deployments).to.have.lengthOf(2); + deployments.forEach((deployment) => { + expect(deployment.status.loadBalancer).to.have.property("ingress"); + }); + }); + it("Gateways have an ip attached in cluster " + process.env.CLUSTER2, () => { + let cli = chaiExec("kubectl --context " + process.env.CLUSTER2 + " -n istio-gateways get svc -l app=istio-ingressgateway -o jsonpath='{.items}'"); + cli.stderr.should.be.empty; + let deployments = JSON.parse(cli.stdout.slice(1,-1)); + expect(deployments).to.have.lengthOf(2); + deployments.forEach((deployment) => { + expect(deployment.status.loadBalancer).to.have.property("ingress"); + }); + }); +}); + +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/istio-lifecycle-manager-install/tests/istio-ready.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +timeout 2m bash -c "until [[ \$(kubectl --context ${CLUSTER1} -n istio-gateways get svc -l istio=ingressgateway -o json | jq '.items[0].status.loadBalancer | length') -gt 0 ]]; do + sleep 1 +done" +export HOST_GW_CLUSTER1="$(kubectl --context ${CLUSTER1} -n istio-gateways get svc -l istio=ingressgateway -o jsonpath='{.items[0].status.loadBalancer.ingress[0].*}')" +export HOST_GW_CLUSTER2="$(kubectl --context ${CLUSTER2} -n istio-gateways get svc -l istio=ingressgateway -o jsonpath='{.items[0].status.loadBalancer.ingress[0].*}')" +cat <<'EOF' > ./test.js +const dns = require('dns'); +const chaiHttp = require("chai-http"); +const chai = require("chai"); +const expect = chai.expect; +chai.use(chaiHttp); +const { waitOnFailedTest } = require('./tests/utils'); + +afterEach(function(done) { waitOnFailedTest(done, this.currentTest.currentRetry())}); + +describe("Address '" + process.env.HOST_GW_CLUSTER1 + "' can be resolved in DNS", () => { + it(process.env.HOST_GW_CLUSTER1 + ' can be resolved', (done) => { + return dns.lookup(process.env.HOST_GW_CLUSTER1, (err, address, family) => { + expect(address).to.be.an.ip; + done(); + }); + }); +}); +EOF +echo "executing test ./gloo-mesh-2-0/tests/can-resolve.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +cat <<'EOF' > ./test.js +const dns = require('dns'); +const chaiHttp = require("chai-http"); +const chai = require("chai"); +const expect = chai.expect; +chai.use(chaiHttp); +const { waitOnFailedTest } = require('./tests/utils'); + +afterEach(function(done) { waitOnFailedTest(done, this.currentTest.currentRetry())}); + +describe("Address '" + process.env.HOST_GW_CLUSTER2 + "' can be resolved in DNS", () => { + it(process.env.HOST_GW_CLUSTER2 + ' can be resolved', (done) => { + return dns.lookup(process.env.HOST_GW_CLUSTER2, (err, address, family) => { + expect(address).to.be.an.ip; + done(); + }); + }); +}); +EOF +echo "executing test ./gloo-mesh-2-0/tests/can-resolve.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl --context ${CLUSTER1} create ns bookinfo-frontends +kubectl --context ${CLUSTER1} create ns bookinfo-backends +kubectl --context ${CLUSTER1} label namespace bookinfo-frontends istio.io/rev=1-20 --overwrite +kubectl --context ${CLUSTER1} label namespace bookinfo-backends istio.io/rev=1-20 --overwrite + +# Deploy the frontend bookinfo service in the bookinfo-frontends namespace +kubectl --context ${CLUSTER1} -n bookinfo-frontends apply -f data/steps/deploy-bookinfo/productpage-v1.yaml + +# Deploy the backend bookinfo services in the bookinfo-backends namespace for all versions less than v3 +kubectl --context ${CLUSTER1} -n bookinfo-backends apply \ + -f data/steps/deploy-bookinfo/details-v1.yaml \ + -f data/steps/deploy-bookinfo/ratings-v1.yaml \ + -f data/steps/deploy-bookinfo/reviews-v1-v2.yaml + +# Update the reviews service to display where it is coming from +kubectl --context ${CLUSTER1} -n bookinfo-backends set env deploy/reviews-v1 CLUSTER_NAME=${CLUSTER1} +kubectl --context ${CLUSTER1} -n bookinfo-backends set env deploy/reviews-v2 CLUSTER_NAME=${CLUSTER1} +echo -n Waiting for bookinfo pods to be ready... +timeout -v 5m bash -c " +until [[ \$(kubectl --context ${CLUSTER1} -n bookinfo-frontends get deploy -o json | jq '[.items[].status.readyReplicas] | add') -eq 1 && \\ + \$(kubectl --context ${CLUSTER1} -n bookinfo-backends get deploy -o json | jq '[.items[].status.readyReplicas] | add') -eq 4 ]] 2>/dev/null +do + sleep 1 + echo -n . +done" +echo +kubectl --context ${CLUSTER2} create ns bookinfo-frontends +kubectl --context ${CLUSTER2} create ns bookinfo-backends +kubectl --context ${CLUSTER2} label namespace bookinfo-frontends istio.io/rev=1-20 --overwrite +kubectl --context ${CLUSTER2} label namespace bookinfo-backends istio.io/rev=1-20 --overwrite + +# Deploy the frontend bookinfo service in the bookinfo-frontends namespace +kubectl --context ${CLUSTER2} -n bookinfo-frontends apply -f data/steps/deploy-bookinfo/productpage-v1.yaml +# Deploy the backend bookinfo services in the bookinfo-backends namespace for all versions +kubectl --context ${CLUSTER2} -n bookinfo-backends apply \ + -f data/steps/deploy-bookinfo/details-v1.yaml \ + -f data/steps/deploy-bookinfo/ratings-v1.yaml \ + -f data/steps/deploy-bookinfo/reviews-v1-v2.yaml \ + -f data/steps/deploy-bookinfo/reviews-v3.yaml +# Update the reviews service to display where it is coming from +kubectl --context ${CLUSTER2} -n bookinfo-backends set env deploy/reviews-v1 CLUSTER_NAME=${CLUSTER2} +kubectl --context ${CLUSTER2} -n bookinfo-backends set env deploy/reviews-v2 CLUSTER_NAME=${CLUSTER2} +kubectl --context ${CLUSTER2} -n bookinfo-backends set env deploy/reviews-v3 CLUSTER_NAME=${CLUSTER2} + +echo -n Waiting for bookinfo pods to be ready... +timeout -v 5m bash -c " +until [[ \$(kubectl --context ${CLUSTER2} -n bookinfo-frontends get deploy -o json | jq '[.items[].status.readyReplicas] | add') -eq 1 && \\ + \$(kubectl --context ${CLUSTER2} -n bookinfo-backends get deploy -o json | jq '[.items[].status.readyReplicas] | add') -eq 5 ]] 2>/dev/null +do + sleep 1 + echo -n . +done" +echo +kubectl --context ${CLUSTER2} -n bookinfo-frontends get pods && kubectl --context ${CLUSTER2} -n bookinfo-backends get pods +cat <<'EOF' > ./test.js +const helpers = require('./tests/chai-exec'); + +describe("Bookinfo app", () => { + let cluster = process.env.CLUSTER1 + let deployments = ["productpage-v1"]; + deployments.forEach(deploy => { + it(deploy + ' pods are ready in ' + cluster, () => helpers.checkDeployment({ context: cluster, namespace: "bookinfo-frontends", k8sObj: deploy })); + }); + deployments = ["ratings-v1", "details-v1", "reviews-v1", "reviews-v2"]; + deployments.forEach(deploy => { + it(deploy + ' pods are ready in ' + cluster, () => helpers.checkDeployment({ context: cluster, namespace: "bookinfo-backends", k8sObj: deploy })); + }); + cluster = process.env.CLUSTER2 + deployments = ["productpage-v1"]; + deployments.forEach(deploy => { + it(deploy + ' pods are ready in ' + cluster, () => helpers.checkDeployment({ context: cluster, namespace: "bookinfo-frontends", k8sObj: deploy })); + }); + deployments = ["ratings-v1", "details-v1", "reviews-v1", "reviews-v2", "reviews-v3"]; + deployments.forEach(deploy => { + it(deploy + ' pods are ready in ' + cluster, () => helpers.checkDeployment({ context: cluster, namespace: "bookinfo-backends", k8sObj: deploy })); + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/bookinfo/deploy-bookinfo/tests/check-bookinfo.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl --context ${CLUSTER1} create ns httpbin +kubectl apply --context ${CLUSTER1} -f - </dev/null +do + sleep 1 + echo -n . +done" +echo +kubectl --context ${CLUSTER1} -n httpbin get pods +cat <<'EOF' > ./test.js +const helpers = require('./tests/chai-exec'); + +describe("httpbin app", () => { + let cluster = process.env.CLUSTER1 + + let deployments = ["not-in-mesh", "in-mesh"]; + + deployments.forEach(deploy => { + it(deploy + ' pods are ready in ' + cluster, () => helpers.checkDeployment({ context: cluster, namespace: "httpbin", k8sObj: deploy })); + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/httpbin/deploy-httpbin/tests/check-httpbin.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl apply --context ${CLUSTER1} -f - < ./test.js +const helpers = require('./tests/chai-http'); + +describe("productpage is available (HTTP)", () => { + it('/productpage is available in cluster1', () => helpers.checkURL({ host: `http://cluster1-bookinfo.example.com`, path: '/productpage', retCode: 200 })); +}) +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/bookinfo/gateway-expose-istio/tests/productpage-available.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +openssl req -x509 -nodes -days 365 -newkey rsa:2048 \ + -keyout tls.key -out tls.crt -subj "/CN=*" +kubectl --context ${CLUSTER1} -n istio-gateways create secret generic tls-secret \ +--from-file=tls.key=tls.key \ +--from-file=tls.crt=tls.crt + +kubectl --context ${CLUSTER2} -n istio-gateways create secret generic tls-secret \ +--from-file=tls.key=tls.key \ +--from-file=tls.crt=tls.crt +kubectl apply --context ${CLUSTER1} -f - < ./test.js +const helpers = require('./tests/chai-http'); + +describe("productpage is available (HTTPS)", () => { + it('/productpage is available in cluster1', () => helpers.checkURL({ host: `https://cluster1-bookinfo.example.com`, path: '/productpage', retCode: 200 })); +}) +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/bookinfo/gateway-expose-istio/tests/productpage-available-secure.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +cat <<'EOF' > ./test.js +var chai = require('chai'); +var expect = chai.expect; +const helpers = require('./tests/chai-exec'); + +describe("Otel metrics", () => { + it("cluster1 is sending metrics to telemetryGateway", () => { + podName = helpers.getOutputForCommand({ command: "kubectl -n gloo-mesh get pods -l app.kubernetes.io/name=prometheus -o jsonpath='{.items[0].metadata.name}' --context " + process.env.MGMT }).replaceAll("'", ""); + command = helpers.getOutputForCommand({ command: "kubectl --context " + process.env.MGMT + " -n gloo-mesh debug -q -i " + podName + " --image=curlimages/curl -- curl -s http://localhost:9090/api/v1/query?query=istio_requests_total" }).replaceAll("'", ""); + expect(command).to.contain("cluster\":\"cluster1"); + }); +}); + + +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/bookinfo/gateway-expose-istio/tests/otel-metrics.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=150 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +cat <<'EOF' > ./test.js +const helpers = require('./tests/chai-http'); +const puppeteer = require('puppeteer'); +const chai = require('chai'); +const expect = chai.expect; +const GraphPage = require('./tests/pages/gloo-ui/graph-page'); +const { recognizeTextFromScreenshot } = require('./tests/utils/image-ocr-processor'); +const { enhanceBrowser } = require('./tests/utils/enhance-browser'); + +afterEach(function (done) { + if (this.currentTest.currentRetry() > 0) { + process.stdout.write("."); + setTimeout(done, 4000); + } else { + done(); + } +}); + +describe("graph page", function () { + // UI tests often require a longer timeout. + // So here we force it to a minimum of 30 seconds. + const currentTimeout = this.timeout(); + this.timeout(Math.max(currentTimeout, 30000)); + + let browser; + let page; + let graphPage; + + beforeEach(async function () { + browser = await puppeteer.launch({ + headless: "new", + slowMo: 40, + ignoreHTTPSErrors: true, + args: ['--no-sandbox', '--disable-setuid-sandbox'], + }); + browser = enhanceBrowser(browser, this.currentTest.title); + page = await browser.newPage(); + graphPage = new GraphPage(page); + await Promise.all(Array.from({ length: 20 }, () => + helpers.checkURL({ host: `https://cluster1-bookinfo.example.com`, path: '/productpage', retCode: 200 }))); + }); + + afterEach(async function () { + await browser.close(); + }); + + it("should show ingress gateway and product page", async function () { + await graphPage.navigateTo(`http://${process.env.ENDPOINT_GLOO_MESH_UI}/graph`); + + // Select the clusters and namespaces so that the graph shows + await graphPage.selectClusters(['cluster1', 'cluster2']); + await graphPage.selectNamespaces(['istio-gateways', 'bookinfo-backends', 'bookinfo-frontends']); + // Disabling Cilium nodes due to this issue: https://github.com/solo-io/gloo-mesh-enterprise/issues/18623 + await graphPage.toggleLayoutSettings(); + await graphPage.disableCiliumNodes(); + await graphPage.toggleLayoutSettings(); + + // Capture a screenshot of the canvas and run text recognition + await graphPage.fullscreenGraph(); + await graphPage.centerGraph(); + const screenshotPath = 'ui-test-data/canvas.png'; + await graphPage.captureCanvasScreenshot(screenshotPath); + + const recognizedTexts = await recognizeTextFromScreenshot( + screenshotPath, + ["istio-ingressgateway", "productpage-v1", "details-v1", "ratings-v1", "reviews-v1", "reviews-v2"]); + + const flattenedRecognizedText = recognizedTexts.join(",").replace(/\n/g, ''); + console.log("Flattened recognized text:", flattenedRecognizedText); + + // Validate recognized texts + expect(flattenedRecognizedText).to.include("istio-ingressgateway"); + expect(flattenedRecognizedText).to.include("productpage-v1"); + expect(flattenedRecognizedText).to.include("details-v1"); + expect(flattenedRecognizedText).to.include("ratings-v1"); + expect(flattenedRecognizedText).to.include("reviews-v1"); + expect(flattenedRecognizedText).to.include("reviews-v2"); + }); +}); + +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/bookinfo/gateway-expose-istio/tests/graph-shows-traffic.test.js.liquid" +timeout --signal=INT 7m mocha ./test.js --timeout 120000 --retries=3 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +cat <<'EOF' > ./test.js +const helpersHttp = require('./tests/chai-http'); +const InsightsPage = require('./tests/pages/insights-page'); +const constants = require('./tests/pages/constants'); +const puppeteer = require('puppeteer'); +var chai = require('chai'); +var expect = chai.expect; +const { enhanceBrowser } = require('./tests/utils/enhance-browser'); + +afterEach(function (done) { + if (this.currentTest.currentRetry() > 0) { + process.stdout.write("."); + setTimeout(done, 4000); + } else { + done(); + } +}); + +describe("Insights UI", function() { + // UI tests often require a longer timeout. + // So here we force it to a minimum of 30 seconds. + const currentTimeout = this.timeout(); + this.timeout(Math.max(currentTimeout, 30000)); + + let browser; + let insightsPage; + + // Use Mocha's 'before' hook to set up Puppeteer + beforeEach(async function() { + browser = await puppeteer.launch({ + headless: "new", + slowMo: 40, + ignoreHTTPSErrors: true, + args: ['--no-sandbox', '--disable-setuid-sandbox'], + }); + browser = enhanceBrowser(browser, this.currentTest.title); + let page = await browser.newPage(); + insightsPage = new InsightsPage(page); + }); + + // Use Mocha's 'after' hook to close Puppeteer + afterEach(async function() { + await browser.close(); + }); + + it("should displays BP0001 warning with text 'Globally scoped routing'", async () => { + await insightsPage.navigateTo(`http://${process.env.ENDPOINT_GLOO_MESH_UI}/insights`); + await insightsPage.selectClusters(['cluster1', 'cluster2']); + await insightsPage.selectInsightTypes([constants.InsightType.BP]); + const data = await insightsPage.getTableDataRows() + expect(data.some(item => item.includes("Globally scoped routing"))).to.be.true; + }); + + it("should have quick resource state filters", async () => { + await insightsPage.navigateTo(`http://${process.env.ENDPOINT_GLOO_MESH_UI}/insights`); + const healthy = await insightsPage.getHealthyResourcesCount(); + const warning = await insightsPage.getWarningResourcesCount(); + const error = await insightsPage.getErrorResourcesCount(); + expect(healthy).to.be.greaterThan(0); + expect(warning).to.be.greaterThan(0); + expect(error).to.be.a('number'); + }); +}); + +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/bookinfo/insights-intro/tests/insight-ui-BP0001.test.js.liquid" +timeout --signal=INT 5m mocha ./test.js --timeout 120000 --retries=20 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +cat <<'EOF' > ./test.js +var chai = require('chai'); +var expect = chai.expect; +const helpers = require('./tests/chai-exec'); + +describe("Insight generation", () => { + it("Insight BP0002 has been triggered in the source (MGMT)", () => { + helpers.getOutputForCommand({ command: `kubectl --context ${process.env.MGMT} -n gloo-mesh patch svc gloo-mesh-mgmt-server -p '{"spec":{"ports": [{"port": 9094,"name":"http-insights"}]}}'` }); + helpers.getOutputForCommand({ command: "kubectl -n gloo-mesh run debug --image=nginx:1.25.3 --context " + process.env.MGMT }); + command = helpers.getOutputForCommand({ command: "kubectl --context " + process.env.MGMT + " -n gloo-mesh exec debug -- curl -s http://gloo-mesh-mgmt-server.gloo-mesh:9094/metrics" }).replaceAll("'", ""); + const regex = /solo_io_insights{.*BP0002.*} 1/; + const match = command.match(regex); + expect(match).to.not.be.null; + }); + + it("Insight BP0002 has been triggered in PROMETHEUS", () => { + helpers.getOutputForCommand({ command: `kubectl --context ${process.env.MGMT} -n gloo-mesh patch svc prometheus-server -p '{"spec":{"ports": [{"port": 9090,"name":"http-metrics"}]}}'` }); + command = helpers.getOutputForCommand({ command: "kubectl --context " + process.env.MGMT + " -n gloo-mesh exec debug -- curl -s 'http://prometheus-server.gloo-mesh:9090/api/v1/query?query=solo_io_insights'" }).replaceAll("'", ""); + let result = JSON.parse(command); + let active = false; + result.data.result.forEach(item => { + if(item.metric.code == "BP0002" && item.value[1] > 0) { + active = true + } + }); + expect(active).to.be.true; + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/bookinfo/insights-intro/tests/insight-metrics.test.js.liquid" +timeout --signal=INT 5m mocha ./test.js --timeout 120000 --retries=20 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl apply --context ${MGMT} -f - < ./test.js +const helpersHttp = require('./tests/chai-http'); +const InsightsPage = require('./tests/pages/insights-page'); +const constants = require('./tests/pages/constants'); +const puppeteer = require('puppeteer'); +const { enhanceBrowser } = require('./tests/utils/enhance-browser'); +var chai = require('chai'); +var expect = chai.expect; + +afterEach(function (done) { + if (this.currentTest.currentRetry() > 0) { + process.stdout.write("."); + setTimeout(done, 4000); + } else { + done(); + } +}); + +describe("Insights UI", function() { + // UI tests often require a longer timeout. + // So here we force it to a minimum of 30 seconds. + const currentTimeout = this.timeout(); + this.timeout(Math.max(currentTimeout, 30000)); + + let browser; + let insightsPage; + + // Use Mocha's 'before' hook to set up Puppeteer + beforeEach(async function() { + browser = await puppeteer.launch({ + headless: "new", + slowMo: 40, + ignoreHTTPSErrors: true, + args: ['--no-sandbox', '--disable-setuid-sandbox'], + }); + browser = enhanceBrowser(browser, this.currentTest.title); + let page = await browser.newPage(); + await page.setViewport({ width: 1500, height: 1000 }); + insightsPage = new InsightsPage(page); + }); + + // Use Mocha's 'after' hook to close Puppeteer + afterEach(async function() { + await browser.close(); + }); + + it("should not display BP0002 in the UI", async () => { + await insightsPage.navigateTo(`http://${process.env.ENDPOINT_GLOO_MESH_UI}/insights`); + await insightsPage.selectClusters(['cluster1', 'cluster2']); + await insightsPage.selectInsightTypes([constants.InsightType.BP]); + const data = await insightsPage.getTableDataRows() + expect(data.some(item => item.includes("is not namespaced"))).to.be.false; + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/bookinfo/insights-intro/tests/insight-not-ui-BP0002.test.js.liquid" +timeout --signal=INT 5m mocha ./test.js --timeout 120000 --retries=20 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl apply --context ${CLUSTER1} -f - < ./test.js +const helpersHttp = require('./tests/chai-http'); +const InsightsPage = require('./tests/pages/insights-page'); +const constants = require('./tests/pages/constants'); +const puppeteer = require('puppeteer'); +const { enhanceBrowser } = require('./tests/utils/enhance-browser'); +var chai = require('chai'); +var expect = chai.expect; + +afterEach(function (done) { + if (this.currentTest.currentRetry() > 0) { + process.stdout.write("."); + setTimeout(done, 4000); + } else { + done(); + } +}); + +describe("Insights UI", function() { + // UI tests often require a longer timeout. + // So here we force it to a minimum of 30 seconds. + const currentTimeout = this.timeout(); + this.timeout(Math.max(currentTimeout, 30000)); + + let browser; + let insightsPage; + + // Use Mocha's 'before' hook to set up Puppeteer + beforeEach(async function() { + browser = await puppeteer.launch({ + headless: "new", + slowMo: 40, + ignoreHTTPSErrors: true, + args: ['--no-sandbox', '--disable-setuid-sandbox'], + }); + browser = enhanceBrowser(browser, this.currentTest.title); + let page = await browser.newPage(); + await page.setViewport({ width: 1500, height: 1000 }); + insightsPage = new InsightsPage(page); + }); + + // Use Mocha's 'after' hook to close Puppeteer + afterEach(async function() { + await browser.close(); + }); + + it("should not display BP0001 in the UI", async () => { + await insightsPage.navigateTo(`http://${process.env.ENDPOINT_GLOO_MESH_UI}/insights`); + await insightsPage.selectClusters(['cluster1', 'cluster2']); + await insightsPage.selectInsightTypes([constants.InsightType.BP]); + const data = await insightsPage.getTableDataRows() + expect(data.some(item => item.includes("is not namespaced"))).to.be.false; + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/bookinfo/insights-intro/tests/insight-not-ui-BP0001.test.js.liquid" +timeout --signal=INT 5m mocha ./test.js --timeout 120000 --retries=20 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl apply --context ${CLUSTER1} -f - < ./test.js +var chai = require('chai'); +var expect = chai.expect; +const helpers = require('./tests/chai-exec'); + +describe("Insight generation", () => { + it("Insight CFG0001 has been triggered in the source (MGMT)", () => { + helpers.getOutputForCommand({ command: `kubectl --context ${process.env.MGMT} -n gloo-mesh patch svc gloo-mesh-mgmt-server -p '{"spec":{"ports": [{"port": 9094,"name":"http-insights"}]}}'` }); + helpers.getOutputForCommand({ command: "kubectl -n gloo-mesh run debug --image=nginx: --context " + process.env.MGMT }); + command = helpers.getOutputForCommand({ command: "kubectl --context " + process.env.MGMT + " -n gloo-mesh exec debug -- curl -s http://gloo-mesh-mgmt-server.gloo-mesh:9094/metrics" }).replaceAll("'", ""); + const regex = /solo_io_insights{.*CFG0001.*} 1/; + const match = command.match(regex); + expect(match).to.not.be.null; + }); + + it("Insight CFG0001 has been triggered in PROMETHEUS", () => { + helpers.getOutputForCommand({ command: `kubectl --context ${process.env.MGMT} -n gloo-mesh patch svc prometheus-server -p '{"spec":{"ports": [{"port": 9090,"name":"http-metrics"}]}}'` }); + command = helpers.getOutputForCommand({ command: "kubectl --context " + process.env.MGMT + " -n gloo-mesh exec debug -- curl -s 'http://prometheus-server.gloo-mesh:9090/api/v1/query?query=solo_io_insights'" }).replaceAll("'", ""); + let result = JSON.parse(command); + let active = false; + result.data.result.forEach(item => { + if(item.metric.code == "CFG0001" && item.value[1] > 0) { + active = true + } + }); + expect(active).to.be.true; + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/bookinfo/insights-config/../insights-intro/tests/insight-metrics.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl apply --context ${CLUSTER1} -f - < ./test.js +var chai = require('chai'); +var expect = chai.expect; +const helpers = require('./tests/chai-exec'); + +describe("Insight generation", () => { + it("Insight CFG0001 has not been triggered in the source (MGMT)", () => { + helpers.getOutputForCommand({ command: `kubectl --context ${process.env.MGMT} -n gloo-mesh patch svc gloo-mesh-mgmt-server -p '{"spec":{"ports": [{"port": 9094,"name":"http-insights"}]}}'` }); + helpers.getOutputForCommand({ command: "kubectl -n gloo-mesh run debug --image=nginx: --context " + process.env.MGMT }); + command = helpers.getOutputForCommand({ command: "kubectl --context " + process.env.MGMT + " -n gloo-mesh exec debug -- curl -s http://gloo-mesh-mgmt-server.gloo-mesh:9094/metrics" }).replaceAll("'", ""); + const regex = /solo_io_insights{.*CFG0001.*} 1/; + const match = command.match(regex); + expect(match).to.be.null; + }); + + it("Insight CFG0001 has not been triggered in PROMETHEUS", () => { + helpers.getOutputForCommand({ command: `kubectl --context ${process.env.MGMT} -n gloo-mesh patch svc prometheus-server -p '{"spec":{"ports": [{"port": 9090,"name":"http-metrics"}]}}'` }); + command = helpers.getOutputForCommand({ command: "kubectl --context " + process.env.MGMT + " -n gloo-mesh exec debug -- curl -s 'http://prometheus-server.gloo-mesh:9090/api/v1/query?query=solo_io_insights'" }).replaceAll("'", ""); + let result = JSON.parse(command); + let active = false; + result.data.result.forEach(item => { + if(item.metric.code == "CFG0001" && item.value[1] > 0) { + active = true + } + }); + expect(active).to.be.false; + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/bookinfo/insights-config/../insights-intro/tests/insight-metrics.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl --context ${CLUSTER1} -n bookinfo-backends delete virtualservice reviews +kubectl --context ${CLUSTER1} -n bookinfo-backends delete destinationrule reviews +kubectl apply --context ${CLUSTER1} -f - < ./test.js +var chai = require('chai'); +var expect = chai.expect; +const helpers = require('./tests/chai-exec'); + +describe("Insight generation", () => { + it("Insight SEC0008 has been triggered in the source (MGMT)", () => { + helpers.getOutputForCommand({ command: `kubectl --context ${process.env.MGMT} -n gloo-mesh patch svc gloo-mesh-mgmt-server -p '{"spec":{"ports": [{"port": 9094,"name":"http-insights"}]}}'` }); + helpers.getOutputForCommand({ command: "kubectl -n gloo-mesh run debug --image=nginx: --context " + process.env.MGMT }); + command = helpers.getOutputForCommand({ command: "kubectl --context " + process.env.MGMT + " -n gloo-mesh exec debug -- curl -s http://gloo-mesh-mgmt-server.gloo-mesh:9094/metrics" }).replaceAll("'", ""); + const regex = /solo_io_insights{.*SEC0008.*} 1/; + const match = command.match(regex); + expect(match).to.not.be.null; + }); + + it("Insight SEC0008 has been triggered in PROMETHEUS", () => { + helpers.getOutputForCommand({ command: `kubectl --context ${process.env.MGMT} -n gloo-mesh patch svc prometheus-server -p '{"spec":{"ports": [{"port": 9090,"name":"http-metrics"}]}}'` }); + command = helpers.getOutputForCommand({ command: "kubectl --context " + process.env.MGMT + " -n gloo-mesh exec debug -- curl -s 'http://prometheus-server.gloo-mesh:9090/api/v1/query?query=solo_io_insights'" }).replaceAll("'", ""); + let result = JSON.parse(command); + let active = false; + result.data.result.forEach(item => { + if(item.metric.code == "SEC0008" && item.value[1] > 0) { + active = true + } + }); + expect(active).to.be.true; + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/bookinfo/insights-security/../insights-intro/tests/insight-metrics.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl apply --context ${CLUSTER1} -f - < ./test.js +var chai = require('chai'); +var expect = chai.expect; +const helpers = require('./tests/chai-exec'); + +describe("Insight generation", () => { + it("Insight SEC0008 has not been triggered in the source (MGMT)", () => { + helpers.getOutputForCommand({ command: `kubectl --context ${process.env.MGMT} -n gloo-mesh patch svc gloo-mesh-mgmt-server -p '{"spec":{"ports": [{"port": 9094,"name":"http-insights"}]}}'` }); + helpers.getOutputForCommand({ command: "kubectl -n gloo-mesh run debug --image=nginx: --context " + process.env.MGMT }); + command = helpers.getOutputForCommand({ command: "kubectl --context " + process.env.MGMT + " -n gloo-mesh exec debug -- curl -s http://gloo-mesh-mgmt-server.gloo-mesh:9094/metrics" }).replaceAll("'", ""); + const regex = /solo_io_insights{.*SEC0008.*} 1/; + const match = command.match(regex); + expect(match).to.be.null; + }); + + it("Insight SEC0008 has not been triggered in PROMETHEUS", () => { + helpers.getOutputForCommand({ command: `kubectl --context ${process.env.MGMT} -n gloo-mesh patch svc prometheus-server -p '{"spec":{"ports": [{"port": 9090,"name":"http-metrics"}]}}'` }); + command = helpers.getOutputForCommand({ command: "kubectl --context " + process.env.MGMT + " -n gloo-mesh exec debug -- curl -s 'http://prometheus-server.gloo-mesh:9090/api/v1/query?query=solo_io_insights'" }).replaceAll("'", ""); + let result = JSON.parse(command); + let active = false; + result.data.result.forEach(item => { + if(item.metric.code == "SEC0008" && item.value[1] > 0) { + active = true + } + }); + expect(active).to.be.false; + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/bookinfo/insights-security/../insights-intro/tests/insight-metrics.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl --context ${CLUSTER1} -n bookinfo-backends delete authorizationpolicy reviews +kubectl --context ${CLUSTER1} -n istio-system delete peerauthentication default diff --git a/gloo-mesh/core/2-5/default/scripts/configure-domain-rewrite.sh b/gloo-mesh/core/2-5/default/scripts/configure-domain-rewrite.sh index be6dbd6d8b..d6e684c9da 100755 --- a/gloo-mesh/core/2-5/default/scripts/configure-domain-rewrite.sh +++ b/gloo-mesh/core/2-5/default/scripts/configure-domain-rewrite.sh @@ -90,4 +90,4 @@ done # If the loop exits, it means the check failed consistently for 1 minute echo "DNS rewrite rule verification failed." -exit 1 +exit 1 \ No newline at end of file diff --git a/gloo-mesh/core/2-5/default/scripts/register-domain.sh b/gloo-mesh/core/2-5/default/scripts/register-domain.sh index f9084487e8..1cb84cd86a 100755 --- a/gloo-mesh/core/2-5/default/scripts/register-domain.sh +++ b/gloo-mesh/core/2-5/default/scripts/register-domain.sh @@ -14,7 +14,9 @@ hosts_file="/etc/hosts" # Function to check if the input is a valid IP address is_ip() { if [[ $1 =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then - return 0 # 0 = true + return 0 # 0 = true - valid IPv4 address + elif [[ $1 =~ ^[0-9a-f]+[:]+[0-9a-f]*[:]*[0-9a-f]*[:]*[0-9a-f]*[:]*[0-9a-f]*[:]*[0-9a-f]*[:]*[0-9]*$ ]]; then + return 0 # 0 = true - valid IPv6 address else return 1 # 1 = false fi @@ -38,14 +40,15 @@ else fi # Check if the entry already exists -if grep -q "$hostname" "$hosts_file"; then +if grep -q "$hostname\$" "$hosts_file"; then # Update the existing entry with the new IP tempfile=$(mktemp) - sed "s/^.*$hostname/$new_ip $hostname/" "$hosts_file" > "$tempfile" + sed "s/^.*$hostname\$/$new_ip $hostname/" "$hosts_file" > "$tempfile" sudo cp "$tempfile" "$hosts_file" + rm "$tempfile" echo "Updated $hostname in $hosts_file with new IP: $new_ip" else # Add a new entry if it doesn't exist echo "$new_ip $hostname" | sudo tee -a "$hosts_file" > /dev/null echo "Added $hostname to $hosts_file with IP: $new_ip" -fi \ No newline at end of file +fi diff --git a/gloo-mesh/core/2-5/default/tests/chai-exec.js b/gloo-mesh/core/2-5/default/tests/chai-exec.js index 67ba62f095..020262437f 100644 --- a/gloo-mesh/core/2-5/default/tests/chai-exec.js +++ b/gloo-mesh/core/2-5/default/tests/chai-exec.js @@ -139,7 +139,11 @@ global = { }, k8sObjectIsPresent: ({ context, namespace, k8sType, k8sObj }) => { - let command = "kubectl --context " + context + " -n " + namespace + " get " + k8sType + " " + k8sObj + " -o name"; + // covers both namespace scoped and cluster scoped objects + let command = "kubectl --context " + context + " get " + k8sType + " " + k8sObj + " -o name"; + if (namespace) { + command = "kubectl --context " + context + " -n " + namespace + " get " + k8sType + " " + k8sObj + " -o name"; + } debugLog(`Executing command: ${command}`); let cli = chaiExec(command); @@ -176,7 +180,6 @@ global = { debugLog(`Command output (stdout): ${cli.stdout}`); return cli.stdout; }, - curlInPod: ({ curlCommand, podName, namespace }) => { debugLog(`Executing curl command: ${curlCommand} on pod: ${podName} in namespace: ${namespace}`); const cli = chaiExec(curlCommand); diff --git a/gloo-mesh/core/2-5/default/tests/chai-http.js b/gloo-mesh/core/2-5/default/tests/chai-http.js index 67f43db003..92bf579690 100644 --- a/gloo-mesh/core/2-5/default/tests/chai-http.js +++ b/gloo-mesh/core/2-5/default/tests/chai-http.js @@ -25,7 +25,30 @@ global = { .send() .then(async function (res) { debugLog(`Response status code: ${res.status}`); - expect(res).to.have.status(retCode); + expect(res).to.have.property('status', retCode); + }); + }, + + checkURLWithIP: ({ ip, host, protocol = "http", path = "", headers = [], certFile = '', keyFile = '', retCode }) => { + debugLog(`Checking URL with IP: ${ip}, Host: ${host}, Path: ${path} with expected return code: ${retCode}`); + + let cert = certFile ? fs.readFileSync(certFile) : ''; + let key = keyFile ? fs.readFileSync(keyFile) : ''; + + let url = `${protocol}://${ip}`; + + // Use chai-http to make a request to the IP address, but set the Host header + let request = chai.request(url).head(path).redirects(0).cert(cert).key(key).set('Host', host); + + debugLog(`Setting headers: ${JSON.stringify(headers)}`); + headers.forEach(header => request.set(header.key, header.value)); + + return request + .send() + .then(async function (res) { + debugLog(`Response status code: ${res.status}`); + debugLog(`Response ${JSON.stringify(res)}`); + expect(res).to.have.property('status', retCode); }); }, @@ -124,7 +147,7 @@ global = { .send() .then(async function (res) { debugLog(`Response status code: ${res.status}`); - expect(res).to.have.status(retCode); + expect(res).to.have.property('status', retCode); }); } }; diff --git a/gloo-mesh/core/2-5/default/tests/proxies-changes.test.js.liquid b/gloo-mesh/core/2-5/default/tests/proxies-changes.test.js.liquid new file mode 100644 index 0000000000..1934ea13b6 --- /dev/null +++ b/gloo-mesh/core/2-5/default/tests/proxies-changes.test.js.liquid @@ -0,0 +1,58 @@ +{%- assign version_1_18_or_after = "1.18.0" | minimumGlooGatewayVersion %} +const { execSync } = require('child_process'); +const { expect } = require('chai'); +const { diff } = require('jest-diff'); + +function delay(ms) { + return new Promise(resolve => setTimeout(resolve, ms)); +} + +describe('Gloo snapshot stability test', function() { + let contextName = process.env.{{ context | default: "CLUSTER1" }}; + let delaySeconds = {{ delay | default: 5 }}; + + let firstSnapshot; + + it('should retrieve initial snapshot', function() { + const output = execSync( + `kubectl --context ${contextName} -n gloo-system exec deploy/gloo -- wget -O - localhost:{% if version_1_18_or_after %}9095{% else %}9091{% endif %}/snapshots/proxies -q`, + { encoding: 'utf8' } + ); + + try { + firstSnapshot = JSON.parse(output); + } catch (err) { + throw new Error('Failed to parse JSON output from initial snapshot: ' + err.message); + } + expect(firstSnapshot).to.be.an('object'); + }); + + it('should not change after the given delay', async function() { + await delay(delaySeconds * 1000); + + let secondSnapshot; + try { + const output2 = execSync( + `kubectl --context ${contextName} -n gloo-system exec deploy/gloo -- wget -O - localhost:{% if version_1_18_or_after %}9095{% else %}9091{% endif %}/snapshots/proxies -q`, + { encoding: 'utf8' } + ); + secondSnapshot = JSON.parse(output2); + } catch (err) { + throw new Error('Failed to retrieve or parse the second snapshot: ' + err.message); + } + + const firstJson = JSON.stringify(firstSnapshot, null, 2); + const secondJson = JSON.stringify(secondSnapshot, null, 2); + + // Show only 2 lines of context around each change + const diffOutput = diff(firstJson, secondJson, { contextLines: 2, expand: false }); + + if (! diffOutput.includes("Compared values have no visual difference.")) { + console.error('Differences found between snapshots:\n' + diffOutput); + throw new Error('Snapshots differ after the delay.'); + } else { + console.log('No differences found. The snapshots are stable.'); + } + }); +}); + From 225f453903270fac120bf822c8bf9e1b066b691a Mon Sep 17 00:00:00 2001 From: soloio-bot <> Date: Fri, 3 Jan 2025 17:23:38 +0000 Subject: [PATCH 02/34] Update from https://github.com/solo-io/procgen/commit/b0fcf769a8ed8b42653f7aeb190716fb7add3277 --- .../core/2-6/ambient-multi-cluster/README.md | 204 +-- .../deploy-kind-clusters/deploy-cluster1.sh | 292 +++ .../deploy-kind-clusters/deploy-cluster2.sh | 292 +++ .../2-6/ambient-multi-cluster/package.json | 44 + .../core/2-6/ambient-multi-cluster/run.sh | 1572 +++++++++++++++++ .../scripts/configure-domain-rewrite.sh | 2 +- .../scripts/register-domain.sh | 11 +- .../ambient-multi-cluster/tests/chai-exec.js | 7 +- .../ambient-multi-cluster/tests/chai-http.js | 27 +- .../tests/proxies-changes.test.js.liquid | 58 + 10 files changed, 2391 insertions(+), 118 deletions(-) create mode 100644 gloo-mesh/core/2-6/ambient-multi-cluster/data/steps/deploy-kind-clusters/deploy-cluster1.sh create mode 100644 gloo-mesh/core/2-6/ambient-multi-cluster/data/steps/deploy-kind-clusters/deploy-cluster2.sh create mode 100644 gloo-mesh/core/2-6/ambient-multi-cluster/package.json create mode 100644 gloo-mesh/core/2-6/ambient-multi-cluster/run.sh create mode 100644 gloo-mesh/core/2-6/ambient-multi-cluster/tests/proxies-changes.test.js.liquid diff --git a/gloo-mesh/core/2-6/ambient-multi-cluster/README.md b/gloo-mesh/core/2-6/ambient-multi-cluster/README.md index f0c3b09e2b..84e2964a37 100644 --- a/gloo-mesh/core/2-6/ambient-multi-cluster/README.md +++ b/gloo-mesh/core/2-6/ambient-multi-cluster/README.md @@ -15,7 +15,7 @@ source ./scripts/assert.sh ## Table of Contents * [Introduction](#introduction) -* [Lab 1 - Deploy KinD clusters](#lab-1---deploy-kind-clusters-) +* [Lab 1 - Deploy KinD Cluster(s)](#lab-1---deploy-kind-cluster(s)-) * [Lab 2 - Deploy and register Gloo Mesh](#lab-2---deploy-and-register-gloo-mesh-) * [Lab 3 - Configure common trust certificates in both clusters](#lab-3---configure-common-trust-certificates-in-both-clusters-) * [Lab 4 - Deploy Istio using Helm](#lab-4---deploy-istio-using-helm-) @@ -69,7 +69,7 @@ You can find more information about Gloo Mesh Core in the official documentation -## Lab 1 - Deploy KinD clusters +## Lab 1 - Deploy KinD Cluster(s) Clone this repository and go to the directory where this `README.md` file is. @@ -82,13 +82,12 @@ export CLUSTER1=cluster1 export CLUSTER2=cluster2 ``` -Run the following commands to deploy two Kubernetes clusters using [Kind](https://kind.sigs.k8s.io/): +Deploy the KinD clusters: ```bash -./scripts/deploy-aws-with-calico.sh 1 cluster1 us-west us-west-1 -./scripts/deploy-aws-with-calico.sh 2 cluster2 us-west us-west-2 +bash ./data/steps/deploy-kind-clusters/deploy-cluster1.sh +bash ./data/steps/deploy-kind-clusters/deploy-cluster2.sh ``` - Then run the following commands to wait for all the Pods to be ready: ```bash @@ -98,45 +97,14 @@ Then run the following commands to wait for all the Pods to be ready: **Note:** If you run the `check.sh` script immediately after the `deploy.sh` script, you may see a jsonpath error. If that happens, simply wait a few seconds and try again. -Once the `check.sh` script completes, when you execute the `kubectl get pods -A` command, you should see the following: - -``` -NAMESPACE NAME READY STATUS RESTARTS AGE -kube-system calico-kube-controllers-59d85c5c84-sbk4k 1/1 Running 0 4h26m -kube-system calico-node-przxs 1/1 Running 0 4h26m -kube-system coredns-6955765f44-ln8f5 1/1 Running 0 4h26m -kube-system coredns-6955765f44-s7xxx 1/1 Running 0 4h26m -kube-system etcd-cluster1-control-plane 1/1 Running 0 4h27m -kube-system kube-apiserver-cluster1-control-plane 1/1 Running 0 4h27m -kube-system kube-controller-manager-cluster1-control-plane1/1 Running 0 4h27m -kube-system kube-proxy-ksvzw 1/1 Running 0 4h26m -kube-system kube-scheduler-cluster1-control-plane 1/1 Running 0 4h27m -local-path-storage local-path-provisioner-58f6947c7-lfmdx 1/1 Running 0 4h26m -metallb-system controller-5c9894b5cd-cn9x2 1/1 Running 0 4h26m -metallb-system speaker-d7jkp 1/1 Running 0 4h26m -``` - -**Note:** The CNI pods might be different, depending on which CNI you have deployed. - -You can see that your currently connected to this cluster by executing the `kubectl config get-contexts` command: - -``` -CURRENT NAME CLUSTER AUTHINFO NAMESPACE - cluster1 kind-cluster1 cluster1 -* cluster2 kind-cluster2 cluster2 -``` - -Run the following command to make `cluster1` the current cluster. - -```bash -kubectl config use-context ${MGMT} -``` +Once the `check.sh` script completes, execute the `kubectl get pods -A` command, and verify that all pods are in a running state. + Run the following commands to deploy the Gloo Mesh management plane: ```bash @@ -443,6 +413,7 @@ timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || + ## Lab 3 - Configure common trust certificates in both clusters Create intermediate CAs in both clusters and the Root CA. @@ -551,7 +522,7 @@ describe("istio_version is at least 1.23.0", () => { it("version should be at least 1.23.0", () => { // Compare the string istio_version to the number 1.23.0 // example 1.23.0-patch0 is valid, but 1.22.6 is not - let version = "1.24-alpha.a2295ca05a358e7c8e9edbbd3f500c8b4eb11864"; + let version = "1.23.1"; let versionParts = version.split('-')[0].split('.'); let major = parseInt(versionParts[0]); let minor = parseInt(versionParts[1]); @@ -745,42 +716,30 @@ EOF ``` Let's deploy Istio using Helm in cluster1. We'll install the base Istio components, the Istiod control plane, the Istio CNI, the ztunnel, and the ingress/eastwest gateways. -For private registries, let's first load the images into kind: -```bash -KIND_NAME=$(kubectl config get-contexts ${CLUSTER1} | grep ${CLUSTER1} | awk '{printf $3}' | cut -d'-' -f2) - -for image in pilot install-cni ztunnel proxyv2; do - docker pull "us-docker.pkg.dev/istio-enterprise-private/internal-istio-builds/${image}:1.24-alpha.a2295ca05a358e7c8e9edbbd3f500c8b4eb11864" - docker pull "us-docker.pkg.dev/istio-enterprise-private/internal-istio-builds/${image}:1.24-alpha.a2295ca05a358e7c8e9edbbd3f500c8b4eb11864-distroless" - kind load docker-image --name "$KIND_NAME" "us-docker.pkg.dev/istio-enterprise-private/internal-istio-builds/${image}:1.24-alpha.a2295ca05a358e7c8e9edbbd3f500c8b4eb11864" - kind load docker-image --name "$KIND_NAME" "us-docker.pkg.dev/istio-enterprise-private/internal-istio-builds/${image}:1.24-alpha.a2295ca05a358e7c8e9edbbd3f500c8b4eb11864-distroless" -done -``` - ```bash -helm upgrade --install istio-base oci://us-docker.pkg.dev/istio-enterprise-private/internal-istio-helm/base \ +helm upgrade --install istio-base oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/base \ --namespace istio-system \ --kube-context=${CLUSTER1} \ ---version 1.24-alpha.a2295ca05a358e7c8e9edbbd3f500c8b4eb11864 \ +--version 1.23.1-solo \ --create-namespace \ -f - </istiod \ --namespace istio-system \ --kube-context=${CLUSTER1} \ ---version 1.24-alpha.a2295ca05a358e7c8e9edbbd3f500c8b4eb11864 \ +--version 1.23.1-solo \ --create-namespace \ -f - < proxy: clusterDomain: cluster.local - tag: 1.24-alpha.a2295ca05a358e7c8e9edbbd3f500c8b4eb11864 + tag: 1.23.1-solo multiCluster: clusterName: cluster1 profile: ambient @@ -806,15 +765,15 @@ pilot: enabled: true EOF -helm upgrade --install istio-cni oci://us-docker.pkg.dev/istio-enterprise-private/internal-istio-helm/cni \ +helm upgrade --install istio-cni oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/cni \ --namespace kube-system \ --kube-context=${CLUSTER1} \ ---version 1.24-alpha.a2295ca05a358e7c8e9edbbd3f500c8b4eb11864 \ +--version 1.23.1-solo \ --create-namespace \ -f - < + proxy: 1.23.1-solo profile: ambient cni: ambient: @@ -824,10 +783,10 @@ cni: - kube-system EOF -helm upgrade --install ztunnel oci://us-docker.pkg.dev/istio-enterprise-private/internal-istio-helm/ztunnel \ +helm upgrade --install ztunnel oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/ztunnel \ --namespace istio-system \ --kube-context=${CLUSTER1} \ ---version 1.24-alpha.a2295ca05a358e7c8e9edbbd3f500c8b4eb11864 \ +--version 1.23.1-solo \ --create-namespace \ -f - < istioNamespace: istio-system multiCluster: clusterName: cluster1 @@ -843,15 +802,15 @@ namespace: istio-system profile: ambient proxy: clusterDomain: cluster.local -tag: 1.24-alpha.a2295ca05a358e7c8e9edbbd3f500c8b4eb11864 +tag: 1.23.1-solo terminationGracePeriodSeconds: 29 variant: distroless EOF -helm upgrade --install istio-ingressgateway- oci://us-docker.pkg.dev/istio-enterprise-private/internal-istio-helm/gateway \ +helm upgrade --install istio-ingressgateway-1-23 oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/gateway \ --namespace istio-gateways \ --kube-context=${CLUSTER1} \ ---version 1.24-alpha.a2295ca05a358e7c8e9edbbd3f500c8b4eb11864 \ +--version 1.23.1-solo \ --create-namespace \ -f - </gateway \ --namespace istio-gateways \ --kube-context=${CLUSTER1} \ ---version 1.24-alpha.a2295ca05a358e7c8e9edbbd3f500c8b4eb11864 \ +--version 1.23.1-solo \ --create-namespace \ -f - < /dev ``` Let's deploy Istio using Helm in cluster2. We'll install the base Istio components, the Istiod control plane, the Istio CNI, the ztunnel, and the ingress/eastwest gateways. -For private registries, let's first load the images into kind: -```bash -KIND_NAME=$(kubectl config get-contexts ${CLUSTER2} | grep ${CLUSTER2} | awk '{printf $3}' | cut -d'-' -f2) - -for image in pilot install-cni ztunnel proxyv2; do - docker pull "us-docker.pkg.dev/istio-enterprise-private/internal-istio-builds/${image}:1.24-alpha.a2295ca05a358e7c8e9edbbd3f500c8b4eb11864" - docker pull "us-docker.pkg.dev/istio-enterprise-private/internal-istio-builds/${image}:1.24-alpha.a2295ca05a358e7c8e9edbbd3f500c8b4eb11864-distroless" - kind load docker-image --name "$KIND_NAME" "us-docker.pkg.dev/istio-enterprise-private/internal-istio-builds/${image}:1.24-alpha.a2295ca05a358e7c8e9edbbd3f500c8b4eb11864" - kind load docker-image --name "$KIND_NAME" "us-docker.pkg.dev/istio-enterprise-private/internal-istio-builds/${image}:1.24-alpha.a2295ca05a358e7c8e9edbbd3f500c8b4eb11864-distroless" -done -``` - ```bash -helm upgrade --install istio-base oci://us-docker.pkg.dev/istio-enterprise-private/internal-istio-helm/base \ +helm upgrade --install istio-base oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/base \ --namespace istio-system \ --kube-context=${CLUSTER2} \ ---version 1.24-alpha.a2295ca05a358e7c8e9edbbd3f500c8b4eb11864 \ +--version 1.23.1-solo \ --create-namespace \ -f - </istiod \ --namespace istio-system \ --kube-context=${CLUSTER2} \ ---version 1.24-alpha.a2295ca05a358e7c8e9edbbd3f500c8b4eb11864 \ +--version 1.23.1-solo \ --create-namespace \ -f - < proxy: clusterDomain: cluster.local - tag: 1.24-alpha.a2295ca05a358e7c8e9edbbd3f500c8b4eb11864 + tag: 1.23.1-solo multiCluster: clusterName: cluster2 profile: ambient @@ -956,15 +903,15 @@ pilot: enabled: true EOF -helm upgrade --install istio-cni oci://us-docker.pkg.dev/istio-enterprise-private/internal-istio-helm/cni \ +helm upgrade --install istio-cni oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/cni \ --namespace kube-system \ --kube-context=${CLUSTER2} \ ---version 1.24-alpha.a2295ca05a358e7c8e9edbbd3f500c8b4eb11864 \ +--version 1.23.1-solo \ --create-namespace \ -f - < + proxy: 1.23.1-solo profile: ambient cni: ambient: @@ -974,10 +921,10 @@ cni: - kube-system EOF -helm upgrade --install ztunnel oci://us-docker.pkg.dev/istio-enterprise-private/internal-istio-helm/ztunnel \ +helm upgrade --install ztunnel oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/ztunnel \ --namespace istio-system \ --kube-context=${CLUSTER2} \ ---version 1.24-alpha.a2295ca05a358e7c8e9edbbd3f500c8b4eb11864 \ +--version 1.23.1-solo \ --create-namespace \ -f - < istioNamespace: istio-system multiCluster: clusterName: cluster2 @@ -993,15 +940,15 @@ namespace: istio-system profile: ambient proxy: clusterDomain: cluster.local -tag: 1.24-alpha.a2295ca05a358e7c8e9edbbd3f500c8b4eb11864 +tag: 1.23.1-solo terminationGracePeriodSeconds: 29 variant: distroless EOF -helm upgrade --install istio-ingressgateway- oci://us-docker.pkg.dev/istio-enterprise-private/internal-istio-helm/gateway \ +helm upgrade --install istio-ingressgateway-1-23 oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/gateway \ --namespace istio-gateways \ --kube-context=${CLUSTER2} \ ---version 1.24-alpha.a2295ca05a358e7c8e9edbbd3f500c8b4eb11864 \ +--version 1.23.1-solo \ --create-namespace \ -f - </gateway \ --namespace istio-gateways \ --kube-context=${CLUSTER2} \ ---version 1.24-alpha.a2295ca05a358e7c8e9edbbd3f500c8b4eb11864 \ +--version 1.23.1-solo \ --create-namespace \ -f - < [VIDEO LINK](https://youtu.be/w1xB-o_gHs0 "Video Link") @@ -1170,6 +1115,12 @@ kubectl --context ${CLUSTER1} create ns httpbin kubectl --context ${CLUSTER1} label namespace httpbin istio.io/dataplane-mode=ambient kubectl apply --context ${CLUSTER1} -f - < [!IMPORTANT] -> Limitations: +> Limitation: > -> * Workloads have to use the default service account. > * Multi-Network traffic is currently not supported by Istio Gateways, Sidecars, and Waypoints. Next, let's send some traffic across the clusters: diff --git a/gloo-mesh/core/2-6/ambient-multi-cluster/data/steps/deploy-kind-clusters/deploy-cluster1.sh b/gloo-mesh/core/2-6/ambient-multi-cluster/data/steps/deploy-kind-clusters/deploy-cluster1.sh new file mode 100644 index 0000000000..1c6e42eb5e --- /dev/null +++ b/gloo-mesh/core/2-6/ambient-multi-cluster/data/steps/deploy-kind-clusters/deploy-cluster1.sh @@ -0,0 +1,292 @@ +#!/usr/bin/env bash +set -o errexit + +number="1" +name="cluster1" +region="" +zone="" +twodigits=$(printf "%02d\n" $number) + +kindest_node=${KINDEST_NODE} + +if [ -z "$kindest_node" ]; then + export k8s_version="1.28.0" + + [[ ${k8s_version::1} != 'v' ]] && export k8s_version=v${k8s_version} + kindest_node_ver=$(curl --silent "https://registry.hub.docker.com/v2/repositories/kindest/node/tags?page_size=100" \ + | jq -r '.results | .[] | select(.name==env.k8s_version) | .name+"@"+.digest') + + if [ -z "$kindest_node_ver" ]; then + echo "Incorrect Kubernetes version provided: ${k8s_version}." + exit 1 + fi + kindest_node=kindest/node:${kindest_node_ver} +fi +echo "Using KinD image: ${kindest_node}" + +if [ -z "$3" ]; then + case $name in + cluster1) + region=us-west-1 + ;; + cluster2) + region=us-west-2 + ;; + *) + region=us-east-1 + ;; + esac +fi + +if [ -z "$4" ]; then + case $name in + cluster1) + zone=us-west-1a + ;; + cluster2) + zone=us-west-2a + ;; + *) + zone=us-east-1a + ;; + esac +fi + +if hostname -I 2>/dev/null; then + myip=$(hostname -I | awk '{ print $1 }') +else + myip=$(ipconfig getifaddr en0) +fi + +# Function to determine the next available cluster number +get_next_cluster_number() { + if ! kind get clusters 2>&1 | grep "^kind" > /dev/null; then + echo 1 + else + highest_num=$(kind get clusters | grep "^kind" | tail -1 | cut -c 5-) + echo $((highest_num + 1)) + fi +} + +if [ -f /.dockerenv ]; then +myip=$HOST_IP +container=$(docker inspect $(docker ps -q) | jq -r ".[] | select(.Config.Hostname == \"$HOSTNAME\") | .Name" | cut -d/ -f2) +docker network connect "kind" $container || true +number=$(get_next_cluster_number) +twodigits=$(printf "%02d\n" $number) +fi + +reg_name='kind-registry' +reg_port='5000' +docker start "${reg_name}" 2>/dev/null || \ +docker run -d --restart=always -p "0.0.0.0:${reg_port}:5000" --name "${reg_name}" registry:2 + +cache_port='5000' +cat > registries < ${HOME}/.${cache_name}-config.yml </dev/null || \ +docker run -d --restart=always ${DEPLOY_EXTRA_PARAMS} -v ${HOME}/.${cache_name}-config.yml:/etc/docker/registry/config.yml --name "${cache_name}" registry:2 +done +mkdir -p /tmp/oidc + +cat <<'EOF' >/tmp/oidc/sa-signer-pkcs8.pub +-----BEGIN PUBLIC KEY----- +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA53YiBcrn7+ZK0Vb4odeA +1riYdvEb8To4H6/HtF+OKzuCIXFQ+bRy7yMrDGITYpfYPrTZOgfdeTLZqOiAj+cL +395nvxdly83SUrdh7ItfOPRluuuiPHnFn111wpyjBw5nut4Kx+M5MksNfA1hU0Zw +zIM9OviX8iEF8xHWUtz4BAMDG8N6+zpLo0pAzaei5hKuLZ9dZOzHBC8VOW82cQMm +5X5uOKsCHMtNSjqYUNB1DxN6xxM+odGWT/6xthPGk6YCxmO28YHPFZfiS2eAIpD8 +2p/16KQKU6TkZSrldkYxiHIPhu+5f9faZJG7dB9pLN1SfdTBio4PK5Mz9muLUCv9 +ywIDAQAB +-----END PUBLIC KEY----- +EOF + +cat <<'EOF' >/tmp/oidc/sa-signer.key +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA53YiBcrn7+ZK0Vb4odeA1riYdvEb8To4H6/HtF+OKzuCIXFQ ++bRy7yMrDGITYpfYPrTZOgfdeTLZqOiAj+cL395nvxdly83SUrdh7ItfOPRluuui +PHnFn111wpyjBw5nut4Kx+M5MksNfA1hU0ZwzIM9OviX8iEF8xHWUtz4BAMDG8N6 ++zpLo0pAzaei5hKuLZ9dZOzHBC8VOW82cQMm5X5uOKsCHMtNSjqYUNB1DxN6xxM+ +odGWT/6xthPGk6YCxmO28YHPFZfiS2eAIpD82p/16KQKU6TkZSrldkYxiHIPhu+5 +f9faZJG7dB9pLN1SfdTBio4PK5Mz9muLUCv9ywIDAQABAoIBAB8tro+RMYUDRHjG +el9ypAxIeWEsQVNRQFYkW4ZUiNYSAgl3Ni0svX6xAg989peFVL+9pLVIcfDthJxY +FVlNCjBxyQ/YmwHFC9vQkARJEd6eLUXsj8INtS0ubbp1VxCQRDDL0C/0z7OSoJJh +SwboqjEiTJExA2a+RArmEDTBRzdi3t+kT8G23JcqOivrITt17K6bQYyJXw7/vUdc +r/R+hfd5TqVq92VddzDT7RNJAxsbPPXjGnESlq1GALBDs+uBGYsP0fiEJb2nicSv +z9fBnBeERhut1gcE0C0iLRQZb+3r8TitBtxrZv+0BHgXrkKtXDwWTqGEKOwC4dBn +7nxkH2ECgYEA6+/DOTABGYOWOQftFkJMjcugzDrjoGpuXuVOTb65T+3FHAzU93zy +3bt3wQxrlugluyy9Sc/PL3ck2LgUsPHZ+s7zsdGvvGALBD6bOSSKATz9JgjwifO8 +PgqUz1kXRwez2CtKLOOCFFtcIzEdWIzsa1ubNqLzgN7rD+XBkUc2uEcCgYEA+yTy +72EDMQVoIZOygytHsDNdy0iS2RsBbdurT27wkYuFpFUVWdbNSL+8haE+wJHseHcw +BD4WIMpU+hnS4p4OO8+6V7PiXOS5E/se91EJigZAoixgDUiC8ihojWgK9PYEavUo +hULWbayO59SxYWeUI4Ze0GP8Jw8vdB86ib4ulF0CgYEAgyzRuLjk05+iZODwQyDn +WSquov3W0rh51s7cw0LX2wWSQm8r9NGGYhs5kJ5sLwGxAKj2MNSWF4jBdrCZ6Gr+ +y4BGY0X209/+IAUC3jlfdSLIiF4OBlT6AvB1HfclhvtUVUp0OhLfnpvQ1UwYScRI +KcRLvovIoIzP2g3emfwjAz8CgYEAxUHhOhm1mwRHJNBQTuxok0HVMrze8n1eov39 +0RcvBvJSVp+pdHXdqX1HwqHCmxhCZuAeq8ZkNP8WvZYY6HwCbAIdt5MHgbT4lXQR +f2l8F5gPnhFCpExG5ZLNg/urV3oAQE4stHap21zEpdyOMhZb6Yc5424U+EzaFdgN +b3EcPtUCgYAkKvUlSnBbgiJz1iaN6fuTqH0efavuFGMhjNmG7GtpNXdgyl1OWIuc +Yu+tZtHXtKYf3B99GwPrFzw/7yfDwae5YeWmi2/pFTH96wv3brJBqkAWY8G5Rsmd +qF50p34vIFqUBniNRwSArx8t2dq/CuAMgLAtSjh70Q6ZAnCF85PD8Q== +-----END RSA PRIVATE KEY----- +EOF + +echo Contents of kind${number}.yaml +cat << EOF | tee kind${number}.yaml +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: +- role: control-plane + image: ${kindest_node} + extraPortMappings: + - containerPort: 6443 + hostPort: 70${twodigits} + extraMounts: + - containerPath: /etc/kubernetes/oidc + hostPath: /tmp/oidc + labels: + ingress-ready: true + topology.kubernetes.io/region: ${region} + topology.kubernetes.io/zone: ${zone} +networking: + disableDefaultCNI: true + serviceSubnet: "10.$(echo $twodigits | sed 's/^0*//').0.0/16" + podSubnet: "10.1${twodigits}.0.0/16" +kubeadmConfigPatches: +- | + kind: ClusterConfiguration + apiServer: + extraArgs: + service-account-key-file: /etc/kubernetes/pki/sa.pub + service-account-key-file: /etc/kubernetes/oidc/sa-signer-pkcs8.pub + service-account-signing-key-file: /etc/kubernetes/oidc/sa-signer.key + service-account-issuer: https://solo-workshop-oidc.s3.us-east-1.amazonaws.com + api-audiences: sts.amazonaws.com + extraVolumes: + - name: oidc + hostPath: /etc/kubernetes/oidc + mountPath: /etc/kubernetes/oidc + readOnly: true + metadata: + name: config +containerdConfigPatches: +- |- + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:${reg_port}"] + endpoint = ["http://${reg_name}:${reg_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] + endpoint = ["http://docker:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-docker.pkg.dev"] + endpoint = ["http://us-docker:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-central1-docker.pkg.dev"] + endpoint = ["http://us-central1-docker:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."quay.io"] + endpoint = ["http://quay:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."gcr.io"] + endpoint = ["http://gcr:${cache_port}"] +EOF +echo ----------------------------------------------------- + +kind create cluster --name kind${number} --config kind${number}.yaml +ipkind=$(docker inspect kind${number}-control-plane | jq -r '.[0].NetworkSettings.Networks[].IPAddress') +networkkind=$(echo ${ipkind} | awk -F. '{ print $1"."$2 }') +kubectl config set-cluster kind-kind${number} --server=https://${myip}:70${twodigits} --insecure-skip-tls-verify=true + +# Preload images +cat << EOF >> images.txt +quay.io/metallb/controller:v0.13.12 +quay.io/metallb/speaker:v0.13.12 +EOF +cat images.txt | while read image; do + docker pull $image || true + kind load docker-image $image --name kind${number} || true +done + +docker network connect "kind" "${reg_name}" || true +docker network connect "kind" docker || true +docker network connect "kind" us-docker || true +docker network connect "kind" us-central1-docker || true +docker network connect "kind" quay || true +docker network connect "kind" gcr || true +# Calico for ipv4 +curl -sL https://raw.githubusercontent.com/projectcalico/calico/v3.28.1/manifests/calico.yaml | sed 's/250m/50m/g' | kubectl --context kind-kind${number} apply -f - + +for i in 1 2 3 4 5; do kubectl --context=kind-kind${number} apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml && break || sleep 15; done +kubectl --context=kind-kind${number} create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)" +kubectl --context=kind-kind${number} -n metallb-system rollout status deploy controller || true + +cat << EOF | tee metallb${number}.yaml +apiVersion: metallb.io/v1beta1 +kind: IPAddressPool +metadata: + name: first-pool + namespace: metallb-system +spec: + addresses: + - ${networkkind}.1${twodigits}.1-${networkkind}.1${twodigits}.254 +--- +apiVersion: metallb.io/v1beta1 +kind: L2Advertisement +metadata: + name: empty + namespace: metallb-system +EOF + +printf "Create IPAddressPool in kind-kind${number}\n" +for i in {1..10}; do +kubectl --context=kind-kind${number} apply -f metallb${number}.yaml && break +sleep 2 +done + +# connect the registry to the cluster network if not already connected +printf "Renaming context kind-kind${number} to ${name}\n" +for i in {1..100}; do + (kubectl config get-contexts -oname | grep ${name}) && break + kubectl config rename-context kind-kind${number} ${name} && break + printf " $i"/100 + sleep 2 + [ $i -lt 100 ] || exit 1 +done + +# Document the local registry +# https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry +cat </dev/null; then + myip=$(hostname -I | awk '{ print $1 }') +else + myip=$(ipconfig getifaddr en0) +fi + +# Function to determine the next available cluster number +get_next_cluster_number() { + if ! kind get clusters 2>&1 | grep "^kind" > /dev/null; then + echo 1 + else + highest_num=$(kind get clusters | grep "^kind" | tail -1 | cut -c 5-) + echo $((highest_num + 1)) + fi +} + +if [ -f /.dockerenv ]; then +myip=$HOST_IP +container=$(docker inspect $(docker ps -q) | jq -r ".[] | select(.Config.Hostname == \"$HOSTNAME\") | .Name" | cut -d/ -f2) +docker network connect "kind" $container || true +number=$(get_next_cluster_number) +twodigits=$(printf "%02d\n" $number) +fi + +reg_name='kind-registry' +reg_port='5000' +docker start "${reg_name}" 2>/dev/null || \ +docker run -d --restart=always -p "0.0.0.0:${reg_port}:5000" --name "${reg_name}" registry:2 + +cache_port='5000' +cat > registries < ${HOME}/.${cache_name}-config.yml </dev/null || \ +docker run -d --restart=always ${DEPLOY_EXTRA_PARAMS} -v ${HOME}/.${cache_name}-config.yml:/etc/docker/registry/config.yml --name "${cache_name}" registry:2 +done +mkdir -p /tmp/oidc + +cat <<'EOF' >/tmp/oidc/sa-signer-pkcs8.pub +-----BEGIN PUBLIC KEY----- +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA53YiBcrn7+ZK0Vb4odeA +1riYdvEb8To4H6/HtF+OKzuCIXFQ+bRy7yMrDGITYpfYPrTZOgfdeTLZqOiAj+cL +395nvxdly83SUrdh7ItfOPRluuuiPHnFn111wpyjBw5nut4Kx+M5MksNfA1hU0Zw +zIM9OviX8iEF8xHWUtz4BAMDG8N6+zpLo0pAzaei5hKuLZ9dZOzHBC8VOW82cQMm +5X5uOKsCHMtNSjqYUNB1DxN6xxM+odGWT/6xthPGk6YCxmO28YHPFZfiS2eAIpD8 +2p/16KQKU6TkZSrldkYxiHIPhu+5f9faZJG7dB9pLN1SfdTBio4PK5Mz9muLUCv9 +ywIDAQAB +-----END PUBLIC KEY----- +EOF + +cat <<'EOF' >/tmp/oidc/sa-signer.key +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA53YiBcrn7+ZK0Vb4odeA1riYdvEb8To4H6/HtF+OKzuCIXFQ ++bRy7yMrDGITYpfYPrTZOgfdeTLZqOiAj+cL395nvxdly83SUrdh7ItfOPRluuui +PHnFn111wpyjBw5nut4Kx+M5MksNfA1hU0ZwzIM9OviX8iEF8xHWUtz4BAMDG8N6 ++zpLo0pAzaei5hKuLZ9dZOzHBC8VOW82cQMm5X5uOKsCHMtNSjqYUNB1DxN6xxM+ +odGWT/6xthPGk6YCxmO28YHPFZfiS2eAIpD82p/16KQKU6TkZSrldkYxiHIPhu+5 +f9faZJG7dB9pLN1SfdTBio4PK5Mz9muLUCv9ywIDAQABAoIBAB8tro+RMYUDRHjG +el9ypAxIeWEsQVNRQFYkW4ZUiNYSAgl3Ni0svX6xAg989peFVL+9pLVIcfDthJxY +FVlNCjBxyQ/YmwHFC9vQkARJEd6eLUXsj8INtS0ubbp1VxCQRDDL0C/0z7OSoJJh +SwboqjEiTJExA2a+RArmEDTBRzdi3t+kT8G23JcqOivrITt17K6bQYyJXw7/vUdc +r/R+hfd5TqVq92VddzDT7RNJAxsbPPXjGnESlq1GALBDs+uBGYsP0fiEJb2nicSv +z9fBnBeERhut1gcE0C0iLRQZb+3r8TitBtxrZv+0BHgXrkKtXDwWTqGEKOwC4dBn +7nxkH2ECgYEA6+/DOTABGYOWOQftFkJMjcugzDrjoGpuXuVOTb65T+3FHAzU93zy +3bt3wQxrlugluyy9Sc/PL3ck2LgUsPHZ+s7zsdGvvGALBD6bOSSKATz9JgjwifO8 +PgqUz1kXRwez2CtKLOOCFFtcIzEdWIzsa1ubNqLzgN7rD+XBkUc2uEcCgYEA+yTy +72EDMQVoIZOygytHsDNdy0iS2RsBbdurT27wkYuFpFUVWdbNSL+8haE+wJHseHcw +BD4WIMpU+hnS4p4OO8+6V7PiXOS5E/se91EJigZAoixgDUiC8ihojWgK9PYEavUo +hULWbayO59SxYWeUI4Ze0GP8Jw8vdB86ib4ulF0CgYEAgyzRuLjk05+iZODwQyDn +WSquov3W0rh51s7cw0LX2wWSQm8r9NGGYhs5kJ5sLwGxAKj2MNSWF4jBdrCZ6Gr+ +y4BGY0X209/+IAUC3jlfdSLIiF4OBlT6AvB1HfclhvtUVUp0OhLfnpvQ1UwYScRI +KcRLvovIoIzP2g3emfwjAz8CgYEAxUHhOhm1mwRHJNBQTuxok0HVMrze8n1eov39 +0RcvBvJSVp+pdHXdqX1HwqHCmxhCZuAeq8ZkNP8WvZYY6HwCbAIdt5MHgbT4lXQR +f2l8F5gPnhFCpExG5ZLNg/urV3oAQE4stHap21zEpdyOMhZb6Yc5424U+EzaFdgN +b3EcPtUCgYAkKvUlSnBbgiJz1iaN6fuTqH0efavuFGMhjNmG7GtpNXdgyl1OWIuc +Yu+tZtHXtKYf3B99GwPrFzw/7yfDwae5YeWmi2/pFTH96wv3brJBqkAWY8G5Rsmd +qF50p34vIFqUBniNRwSArx8t2dq/CuAMgLAtSjh70Q6ZAnCF85PD8Q== +-----END RSA PRIVATE KEY----- +EOF + +echo Contents of kind${number}.yaml +cat << EOF | tee kind${number}.yaml +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: +- role: control-plane + image: ${kindest_node} + extraPortMappings: + - containerPort: 6443 + hostPort: 70${twodigits} + extraMounts: + - containerPath: /etc/kubernetes/oidc + hostPath: /tmp/oidc + labels: + ingress-ready: true + topology.kubernetes.io/region: ${region} + topology.kubernetes.io/zone: ${zone} +networking: + disableDefaultCNI: true + serviceSubnet: "10.$(echo $twodigits | sed 's/^0*//').0.0/16" + podSubnet: "10.1${twodigits}.0.0/16" +kubeadmConfigPatches: +- | + kind: ClusterConfiguration + apiServer: + extraArgs: + service-account-key-file: /etc/kubernetes/pki/sa.pub + service-account-key-file: /etc/kubernetes/oidc/sa-signer-pkcs8.pub + service-account-signing-key-file: /etc/kubernetes/oidc/sa-signer.key + service-account-issuer: https://solo-workshop-oidc.s3.us-east-1.amazonaws.com + api-audiences: sts.amazonaws.com + extraVolumes: + - name: oidc + hostPath: /etc/kubernetes/oidc + mountPath: /etc/kubernetes/oidc + readOnly: true + metadata: + name: config +containerdConfigPatches: +- |- + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:${reg_port}"] + endpoint = ["http://${reg_name}:${reg_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] + endpoint = ["http://docker:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-docker.pkg.dev"] + endpoint = ["http://us-docker:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-central1-docker.pkg.dev"] + endpoint = ["http://us-central1-docker:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."quay.io"] + endpoint = ["http://quay:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."gcr.io"] + endpoint = ["http://gcr:${cache_port}"] +EOF +echo ----------------------------------------------------- + +kind create cluster --name kind${number} --config kind${number}.yaml +ipkind=$(docker inspect kind${number}-control-plane | jq -r '.[0].NetworkSettings.Networks[].IPAddress') +networkkind=$(echo ${ipkind} | awk -F. '{ print $1"."$2 }') +kubectl config set-cluster kind-kind${number} --server=https://${myip}:70${twodigits} --insecure-skip-tls-verify=true + +# Preload images +cat << EOF >> images.txt +quay.io/metallb/controller:v0.13.12 +quay.io/metallb/speaker:v0.13.12 +EOF +cat images.txt | while read image; do + docker pull $image || true + kind load docker-image $image --name kind${number} || true +done + +docker network connect "kind" "${reg_name}" || true +docker network connect "kind" docker || true +docker network connect "kind" us-docker || true +docker network connect "kind" us-central1-docker || true +docker network connect "kind" quay || true +docker network connect "kind" gcr || true +# Calico for ipv4 +curl -sL https://raw.githubusercontent.com/projectcalico/calico/v3.28.1/manifests/calico.yaml | sed 's/250m/50m/g' | kubectl --context kind-kind${number} apply -f - + +for i in 1 2 3 4 5; do kubectl --context=kind-kind${number} apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml && break || sleep 15; done +kubectl --context=kind-kind${number} create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)" +kubectl --context=kind-kind${number} -n metallb-system rollout status deploy controller || true + +cat << EOF | tee metallb${number}.yaml +apiVersion: metallb.io/v1beta1 +kind: IPAddressPool +metadata: + name: first-pool + namespace: metallb-system +spec: + addresses: + - ${networkkind}.1${twodigits}.1-${networkkind}.1${twodigits}.254 +--- +apiVersion: metallb.io/v1beta1 +kind: L2Advertisement +metadata: + name: empty + namespace: metallb-system +EOF + +printf "Create IPAddressPool in kind-kind${number}\n" +for i in {1..10}; do +kubectl --context=kind-kind${number} apply -f metallb${number}.yaml && break +sleep 2 +done + +# connect the registry to the cluster network if not already connected +printf "Renaming context kind-kind${number} to ${name}\n" +for i in {1..100}; do + (kubectl config get-contexts -oname | grep ${name}) && break + kubectl config rename-context kind-kind${number} ${name} && break + printf " $i"/100 + sleep 2 + [ $i -lt 100 ] || exit 1 +done + +# Document the local registry +# https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry +cat </dev/null || true +source ./scripts/assert.sh +export MGMT=cluster1 +export CLUSTER1=cluster1 +export CLUSTER2=cluster2 +bash ./data/steps/deploy-kind-clusters/deploy-cluster1.sh +bash ./data/steps/deploy-kind-clusters/deploy-cluster2.sh +./scripts/check.sh cluster1 +./scripts/check.sh cluster2 +cat <<'EOF' > ./test.js +const helpers = require('./tests/chai-exec'); + +describe("Clusters are healthy", () => { + const clusters = ["cluster1", "cluster2"]; + + clusters.forEach(cluster => { + it(`Cluster ${cluster} is healthy`, () => helpers.k8sObjectIsPresent({ context: cluster, namespace: "default", k8sType: "service", k8sObj: "kubernetes" })); + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/deploy-kind-clusters/tests/cluster-healthy.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +export GLOO_MESH_VERSION=v2.6.6 +curl -sL https://run.solo.io/meshctl/install | sh - +export PATH=$HOME/.gloo-mesh/bin:$PATH +cat <<'EOF' > ./test.js +var chai = require('chai'); +var expect = chai.expect; + +describe("Required environment variables should contain value", () => { + afterEach(function(done){ + if(this.currentTest.currentRetry() > 0){ + process.stdout.write("."); + setTimeout(done, 1000); + } else { + done(); + } + }); + + it("Context environment variables should not be empty", () => { + expect(process.env.MGMT).not.to.be.empty + expect(process.env.CLUSTER1).not.to.be.empty + expect(process.env.CLUSTER2).not.to.be.empty + }); + + it("Gloo Mesh licence environment variables should not be empty", () => { + expect(process.env.GLOO_MESH_LICENSE_KEY).not.to.be.empty + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/deploy-and-register-gloo-mesh/tests/environment-variables.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl --context ${MGMT} create ns gloo-mesh + +helm upgrade --install gloo-platform-crds gloo-platform-crds \ + --repo https://storage.googleapis.com/gloo-platform/helm-charts \ + --namespace gloo-mesh \ + --kube-context ${MGMT} \ + --set featureGates.insightsConfiguration=true \ + --version 2.6.6 + +helm upgrade --install gloo-platform-mgmt gloo-platform \ + --repo https://storage.googleapis.com/gloo-platform/helm-charts \ + --namespace gloo-mesh \ + --kube-context ${MGMT} \ + --version 2.6.6 \ + -f -< ./test.js + +const helpers = require('./tests/chai-exec'); + +describe("MGMT server is healthy", () => { + let cluster = process.env.MGMT; + let deployments = ["gloo-mesh-mgmt-server","gloo-mesh-redis","gloo-telemetry-gateway","prometheus-server"]; + deployments.forEach(deploy => { + it(deploy + ' pods are ready in ' + cluster, () => helpers.checkDeployment({ context: cluster, namespace: "gloo-mesh", k8sObj: deploy })); + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/deploy-and-register-gloo-mesh/tests/check-deployment.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +cat <<'EOF' > ./test.js +const chaiExec = require("@jsdevtools/chai-exec"); +var chai = require('chai'); +var expect = chai.expect; +chai.use(chaiExec); + +afterEach(function (done) { + if (this.currentTest.currentRetry() > 0) { + process.stdout.write("."); + setTimeout(done, 1000); + } else { + done(); + } +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/deploy-and-register-gloo-mesh/tests/get-gloo-mesh-mgmt-server-ip.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +export ENDPOINT_GLOO_MESH=$(kubectl --context ${MGMT} -n gloo-mesh get svc gloo-mesh-mgmt-server -o jsonpath='{.status.loadBalancer.ingress[0].*}'):9900 +export HOST_GLOO_MESH=$(echo ${ENDPOINT_GLOO_MESH%:*}) +export ENDPOINT_TELEMETRY_GATEWAY=$(kubectl --context ${MGMT} -n gloo-mesh get svc gloo-telemetry-gateway -o jsonpath='{.status.loadBalancer.ingress[0].*}'):4317 +export ENDPOINT_GLOO_MESH_UI=$(kubectl --context ${MGMT} -n gloo-mesh get svc gloo-mesh-ui -o jsonpath='{.status.loadBalancer.ingress[0].*}'):8090 +cat <<'EOF' > ./test.js +const dns = require('dns'); +const chaiHttp = require("chai-http"); +const chai = require("chai"); +const expect = chai.expect; +chai.use(chaiHttp); +const { waitOnFailedTest } = require('./tests/utils'); + +afterEach(function(done) { waitOnFailedTest(done, this.currentTest.currentRetry())}); + +describe("Address '" + process.env.HOST_GLOO_MESH + "' can be resolved in DNS", () => { + it(process.env.HOST_GLOO_MESH + ' can be resolved', (done) => { + return dns.lookup(process.env.HOST_GLOO_MESH, (err, address, family) => { + expect(address).to.be.an.ip; + done(); + }); + }); +}); +EOF +echo "executing test ./gloo-mesh-2-0/tests/can-resolve.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl apply --context ${MGMT} -f - < ca.crt +kubectl create secret generic relay-root-tls-secret -n gloo-mesh --context ${CLUSTER2} --from-file ca.crt=ca.crt +rm ca.crt + +kubectl get secret relay-identity-token-secret -n gloo-mesh --context ${MGMT} -o jsonpath='{.data.token}' | base64 -d > token +kubectl create secret generic relay-identity-token-secret -n gloo-mesh --context ${CLUSTER2} --from-file token=token +rm token + +helm upgrade --install gloo-platform-crds gloo-platform-crds \ + --repo https://storage.googleapis.com/gloo-platform/helm-charts \ + --namespace gloo-mesh \ + --kube-context ${CLUSTER2} \ + --version 2.6.6 + +helm upgrade --install gloo-platform-agent gloo-platform \ + --repo https://storage.googleapis.com/gloo-platform/helm-charts \ + --namespace gloo-mesh \ + --kube-context ${CLUSTER2} \ + --version 2.6.6 \ + -f -< ./test.js +var chai = require('chai'); +var expect = chai.expect; +const helpers = require('./tests/chai-exec'); +describe("Cluster registration", () => { + it("cluster1 is registered", () => { + podName = helpers.getOutputForCommand({ command: "kubectl -n gloo-mesh get pods -l app=gloo-mesh-mgmt-server -o jsonpath='{.items[0].metadata.name}' --context " + process.env.MGMT }).replaceAll("'", ""); + command = helpers.getOutputForCommand({ command: "kubectl --context " + process.env.MGMT + " -n gloo-mesh debug -q -i " + podName + " --image=curlimages/curl -- curl -s http://localhost:9091/metrics" }).replaceAll("'", ""); + expect(command).to.contain("cluster1"); + }); + it("cluster2 is registered", () => { + podName = helpers.getOutputForCommand({ command: "kubectl -n gloo-mesh get pods -l app=gloo-mesh-mgmt-server -o jsonpath='{.items[0].metadata.name}' --context " + process.env.MGMT }).replaceAll("'", ""); + command = helpers.getOutputForCommand({ command: "kubectl --context " + process.env.MGMT + " -n gloo-mesh debug -q -i " + podName + " --image=curlimages/curl -- curl -s http://localhost:9091/metrics" }).replaceAll("'", ""); + expect(command).to.contain("cluster2"); + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/deploy-and-register-gloo-mesh/tests/cluster-registration.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +echo "Generating new certificates" +mkdir -p "./certs/${CLUSTER1}" +mkdir -p "./certs/${CLUSTER2}" + +if ! [ -x "$(command -v step)" ]; then + echo 'Error: Install the smallstep cli (https://github.com/smallstep/cli)' + exit 1 +fi + +step certificate create root.istio.ca ./certs/root-cert.pem ./certs/root-ca.key \ + --profile root-ca --no-password --insecure --san root.istio.ca \ + --not-after 87600h --kty RSA + +step certificate create $CLUSTER1 \ + ./certs/$CLUSTER1/ca-cert.pem \ + ./certs/$CLUSTER1/ca-key.pem \ + --ca ./certs/root-cert.pem \ + --ca-key ./certs/root-ca.key \ + --profile intermediate-ca \ + --not-after 87600h \ + --no-password \ + --san $CLUSTER1 \ + --kty RSA \ + --insecure + +step certificate create $CLUSTER2 \ + ./certs/$CLUSTER2/ca-cert.pem \ + ./certs/$CLUSTER2/ca-key.pem \ + --ca ./certs/root-cert.pem \ + --ca-key ./certs/root-ca.key \ + --profile intermediate-ca \ + --not-after 87600h \ + --no-password \ + --san $CLUSTER2 \ + --kty RSA \ + --insecure + +cat ./certs/$CLUSTER1/ca-cert.pem ./certs/root-cert.pem > ./certs/$CLUSTER1/cert-chain.pem +cat ./certs/$CLUSTER2/ca-cert.pem ./certs/root-cert.pem > ./certs/$CLUSTER2/cert-chain.pem +kubectl --context="${CLUSTER1}" create namespace istio-system || true +kubectl --context="${CLUSTER1}" create secret generic cacerts -n istio-system \ + --from-file=./certs/$CLUSTER1/ca-cert.pem \ + --from-file=./certs/$CLUSTER1/ca-key.pem \ + --from-file=./certs/root-cert.pem \ + --from-file=./certs/$CLUSTER1/cert-chain.pem + +kubectl --context="${CLUSTER2}" create namespace istio-system || true +kubectl --context="${CLUSTER2}" create secret generic cacerts -n istio-system \ + --from-file=./certs/$CLUSTER2/ca-cert.pem \ + --from-file=./certs/$CLUSTER2/ca-key.pem \ + --from-file=./certs/root-cert.pem \ + --from-file=./certs/$CLUSTER2/cert-chain.pem +curl -L https://istio.io/downloadIstio | sh - + +if [ -d "istio-"*/ ]; then + cd istio-*/ + export PATH=$PWD/bin:$PATH + cd .. +fi +cat <<'EOF' > ./test.js +const chaiExec = require("@jsdevtools/chai-exec"); +var chai = require('chai'); +var expect = chai.expect; +chai.use(chaiExec); + +afterEach(function (done) { + if (this.currentTest.currentRetry() > 0) { + process.stdout.write("."); + setTimeout(done, 1000); + } else { + done(); + } +}); +describe("istio_version is at least 1.23.0", () => { + it("version should be at least 1.23.0", () => { + // Compare the string istio_version to the number 1.23.0 + // example 1.23.0-patch0 is valid, but 1.22.6 is not + let version = "1.23.1"; + let versionParts = version.split('-')[0].split('.'); + let major = parseInt(versionParts[0]); + let minor = parseInt(versionParts[1]); + let patch = parseInt(versionParts[2]); + let minMajor = 1; + let minMinor = 23; + let minPatch = 0; + expect(major).to.be.at.least(minMajor); + if (major === minMajor) { + expect(minor).to.be.at.least(minMinor); + if (minor === minMinor) { + expect(patch).to.be.at.least(minPatch); + } + } + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/deploy-istio-helm/tests/istio-version.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl --context ${CLUSTER1} create ns istio-gateways + +kubectl apply --context ${CLUSTER1} -f - </base \ +--namespace istio-system \ +--kube-context=${CLUSTER1} \ +--version 1.23.1-solo \ +--create-namespace \ +-f - </istiod \ +--namespace istio-system \ +--kube-context=${CLUSTER1} \ +--version 1.23.1-solo \ +--create-namespace \ +-f - < + proxy: + clusterDomain: cluster.local + tag: 1.23.1-solo + multiCluster: + clusterName: cluster1 +profile: ambient +istio_cni: + enabled: true +meshConfig: + accessLogFile: /dev/stdout + defaultConfig: + proxyMetadata: + ISTIO_META_DNS_AUTO_ALLOCATE: "true" + ISTIO_META_DNS_CAPTURE: "true" + trustDomain: cluster.local +pilot: + enabled: true + env: + PILOT_ENABLE_IP_AUTOALLOCATE: "true" + PILOT_ENABLE_K8S_SELECT_WORKLOAD_ENTRIES: "false" + PILOT_SKIP_VALIDATE_TRUST_DOMAIN: "true" + podLabels: + hack: eastwest + platforms: + peering: + enabled: true +EOF + +helm upgrade --install istio-cni oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/cni \ +--namespace kube-system \ +--kube-context=${CLUSTER1} \ +--version 1.23.1-solo \ +--create-namespace \ +-f - < + proxy: 1.23.1-solo +profile: ambient +cni: + ambient: + dnsCapture: true + excludeNamespaces: + - istio-system + - kube-system +EOF + +helm upgrade --install ztunnel oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/ztunnel \ +--namespace istio-system \ +--kube-context=${CLUSTER1} \ +--version 1.23.1-solo \ +--create-namespace \ +-f - < +istioNamespace: istio-system +multiCluster: + clusterName: cluster1 +namespace: istio-system +profile: ambient +proxy: + clusterDomain: cluster.local +tag: 1.23.1-solo +terminationGracePeriodSeconds: 29 +variant: distroless +EOF + +helm upgrade --install istio-ingressgateway-1-23 oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/gateway \ +--namespace istio-gateways \ +--kube-context=${CLUSTER1} \ +--version 1.23.1-solo \ +--create-namespace \ +-f - </gateway \ +--namespace istio-gateways \ +--kube-context=${CLUSTER1} \ +--version 1.23.1-solo \ +--create-namespace \ +-f - < /dev/null || \ + { kubectl kustomize "github.com/kubernetes-sigs/gateway-api/config/crd?ref=v1.1.0" | kubectl --context ${CLUSTER1} apply -f -; } +kubectl --context ${CLUSTER2} get crd gateways.gateway.networking.k8s.io &> /dev/null || \ + { kubectl kustomize "github.com/kubernetes-sigs/gateway-api/config/crd?ref=v1.1.0" | kubectl --context ${CLUSTER2} apply -f -; } +helm upgrade --install istio-base oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/base \ +--namespace istio-system \ +--kube-context=${CLUSTER2} \ +--version 1.23.1-solo \ +--create-namespace \ +-f - </istiod \ +--namespace istio-system \ +--kube-context=${CLUSTER2} \ +--version 1.23.1-solo \ +--create-namespace \ +-f - < + proxy: + clusterDomain: cluster.local + tag: 1.23.1-solo + multiCluster: + clusterName: cluster2 +profile: ambient +istio_cni: + enabled: true +meshConfig: + accessLogFile: /dev/stdout + defaultConfig: + proxyMetadata: + ISTIO_META_DNS_AUTO_ALLOCATE: "true" + ISTIO_META_DNS_CAPTURE: "true" + trustDomain: cluster.local +pilot: + enabled: true + env: + PILOT_ENABLE_IP_AUTOALLOCATE: "true" + PILOT_ENABLE_K8S_SELECT_WORKLOAD_ENTRIES: "false" + PILOT_SKIP_VALIDATE_TRUST_DOMAIN: "true" + podLabels: + hack: eastwest + platforms: + peering: + enabled: true +EOF + +helm upgrade --install istio-cni oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/cni \ +--namespace kube-system \ +--kube-context=${CLUSTER2} \ +--version 1.23.1-solo \ +--create-namespace \ +-f - < + proxy: 1.23.1-solo +profile: ambient +cni: + ambient: + dnsCapture: true + excludeNamespaces: + - istio-system + - kube-system +EOF + +helm upgrade --install ztunnel oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/ztunnel \ +--namespace istio-system \ +--kube-context=${CLUSTER2} \ +--version 1.23.1-solo \ +--create-namespace \ +-f - < +istioNamespace: istio-system +multiCluster: + clusterName: cluster2 +namespace: istio-system +profile: ambient +proxy: + clusterDomain: cluster.local +tag: 1.23.1-solo +terminationGracePeriodSeconds: 29 +variant: distroless +EOF + +helm upgrade --install istio-ingressgateway-1-23 oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/gateway \ +--namespace istio-gateways \ +--kube-context=${CLUSTER2} \ +--version 1.23.1-solo \ +--create-namespace \ +-f - </gateway \ +--namespace istio-gateways \ +--kube-context=${CLUSTER2} \ +--version 1.23.1-solo \ +--create-namespace \ +-f - < /dev/null || \ + { kubectl kustomize "github.com/kubernetes-sigs/gateway-api/config/crd?ref=v1.1.0" | kubectl --context ${CLUSTER1} apply -f -; } +kubectl --context ${CLUSTER2} get crd gateways.gateway.networking.k8s.io &> /dev/null || \ + { kubectl kustomize "github.com/kubernetes-sigs/gateway-api/config/crd?ref=v1.1.0" | kubectl --context ${CLUSTER2} apply -f -; } +cat <<'EOF' > ./test.js + +const helpers = require('./tests/chai-exec'); + +const chaiExec = require("@jsdevtools/chai-exec"); +const helpersHttp = require('./tests/chai-http'); +const chai = require("chai"); +const expect = chai.expect; + +afterEach(function (done) { + if (this.currentTest.currentRetry() > 0) { + process.stdout.write("."); + setTimeout(done, 1000); + } else { + done(); + } +}); + +describe("Checking Istio installation", function() { + it('istiod pods are ready in cluster ' + process.env.CLUSTER1, () => helpers.checkDeploymentsWithLabels({ context: process.env.CLUSTER1, namespace: "istio-system", labels: "app=istiod", instances: 1 })); + it('gateway pods are ready in cluster ' + process.env.CLUSTER1, () => helpers.checkDeploymentsWithLabels({ context: process.env.CLUSTER1, namespace: "istio-gateways", labels: "app=istio-ingressgateway", instances: 2 })); + it('istiod pods are ready in cluster ' + process.env.CLUSTER2, () => helpers.checkDeploymentsWithLabels({ context: process.env.CLUSTER2, namespace: "istio-system", labels: "app=istiod", instances: 1 })); + it('gateway pods are ready in cluster ' + process.env.CLUSTER2, () => helpers.checkDeploymentsWithLabels({ context: process.env.CLUSTER2, namespace: "istio-gateways", labels: "app=istio-ingressgateway", instances: 2 })); + it("Gateways have an ip attached in cluster " + process.env.CLUSTER1, () => { + let cli = chaiExec("kubectl --context " + process.env.CLUSTER1 + " -n istio-gateways get svc -l app=istio-ingressgateway -o jsonpath='{.items}'"); + cli.stderr.should.be.empty; + let deployments = JSON.parse(cli.stdout.slice(1,-1)); + expect(deployments).to.have.lengthOf(2); + deployments.forEach((deployment) => { + expect(deployment.status.loadBalancer).to.have.property("ingress"); + }); + }); + it("Gateways have an ip attached in cluster " + process.env.CLUSTER2, () => { + let cli = chaiExec("kubectl --context " + process.env.CLUSTER2 + " -n istio-gateways get svc -l app=istio-ingressgateway -o jsonpath='{.items}'"); + cli.stderr.should.be.empty; + let deployments = JSON.parse(cli.stdout.slice(1,-1)); + expect(deployments).to.have.lengthOf(2); + deployments.forEach((deployment) => { + expect(deployment.status.loadBalancer).to.have.property("ingress"); + }); + }); +}); + +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/deploy-istio-helm/tests/istio-ready.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +timeout 2m bash -c "until [[ \$(kubectl --context ${CLUSTER1} -n istio-gateways get svc -l istio=ingressgateway -o json | jq '.items[0].status.loadBalancer | length') -gt 0 ]]; do + sleep 1 +done" +export HOST_GW_CLUSTER1="$(kubectl --context ${CLUSTER1} -n istio-gateways get svc -l istio=ingressgateway -o jsonpath='{.items[0].status.loadBalancer.ingress[0].*}')" +export HOST_GW_CLUSTER2="$(kubectl --context ${CLUSTER2} -n istio-gateways get svc -l istio=ingressgateway -o jsonpath='{.items[0].status.loadBalancer.ingress[0].*}')" +cat <<'EOF' > ./test.js +const dns = require('dns'); +const chaiHttp = require("chai-http"); +const chai = require("chai"); +const expect = chai.expect; +chai.use(chaiHttp); +const { waitOnFailedTest } = require('./tests/utils'); + +afterEach(function(done) { waitOnFailedTest(done, this.currentTest.currentRetry())}); + +describe("Address '" + process.env.HOST_GW_CLUSTER1 + "' can be resolved in DNS", () => { + it(process.env.HOST_GW_CLUSTER1 + ' can be resolved', (done) => { + return dns.lookup(process.env.HOST_GW_CLUSTER1, (err, address, family) => { + expect(address).to.be.an.ip; + done(); + }); + }); +}); +EOF +echo "executing test ./default/tests/can-resolve.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +cat <<'EOF' > ./test.js +const dns = require('dns'); +const chaiHttp = require("chai-http"); +const chai = require("chai"); +const expect = chai.expect; +chai.use(chaiHttp); +const { waitOnFailedTest } = require('./tests/utils'); + +afterEach(function(done) { waitOnFailedTest(done, this.currentTest.currentRetry())}); + +describe("Address '" + process.env.HOST_GW_CLUSTER2 + "' can be resolved in DNS", () => { + it(process.env.HOST_GW_CLUSTER2 + ' can be resolved', (done) => { + return dns.lookup(process.env.HOST_GW_CLUSTER2, (err, address, family) => { + expect(address).to.be.an.ip; + done(); + }); + }); +}); +EOF +echo "executing test ./default/tests/can-resolve.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl --context ${CLUSTER1} create ns httpbin +kubectl --context ${CLUSTER1} label namespace httpbin istio.io/dataplane-mode=ambient +kubectl apply --context ${CLUSTER1} -f - </dev/null +do + sleep 1 + echo -n . +done" +echo +kubectl apply --context ${CLUSTER1} -f - < ./test.js +const helpers = require('./tests/chai-exec'); + +describe("httpbin app", () => { + let cluster = process.env.CLUSTER1 + + let deployments = ["not-in-mesh", "in-mesh"]; + + deployments.forEach(deploy => { + it(deploy + ' pods are ready in ' + cluster, () => helpers.checkDeployment({ context: cluster, namespace: "httpbin", k8sObj: deploy })); + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/httpbin/deploy-httpbin/tests/check-httpbin.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl --context ${CLUSTER2} create ns httpbin +kubectl --context ${CLUSTER2} label namespace httpbin istio.io/dataplane-mode=ambient +kubectl apply --context ${CLUSTER2} -f - </dev/null +do + sleep 1 + echo -n . +done" +echo +kubectl apply --context ${CLUSTER2} -f - < ./test.js +const helpers = require('./tests/chai-exec'); + +describe("httpbin app", () => { + let cluster = process.env.CLUSTER1 + + let deployments = ["not-in-mesh", "in-mesh"]; + + deployments.forEach(deploy => { + it(deploy + ' pods are ready in ' + cluster, () => helpers.checkDeployment({ context: cluster, namespace: "httpbin", k8sObj: deploy })); + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/httpbin/deploy-httpbin/tests/check-httpbin.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl --context ${CLUSTER1} create ns clients + +kubectl apply --context ${CLUSTER1} -f - </dev/null +do + sleep 1 + echo -n . +done" +echo +kubectl apply --context ${CLUSTER1} -f - < ./test.js +const helpers = require('./tests/chai-exec'); + +describe("client apps", () => { + let cluster = process.env.CLUSTER1 + + let deployments = ["not-in-mesh", "in-mesh-with-sidecar", "in-ambient"]; + + deployments.forEach(deploy => { + it(deploy + ' pods are ready in ' + cluster, () => helpers.checkDeployment({ context: cluster, namespace: "clients", k8sObj: deploy })); + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/clients/deploy-clients/tests/check-clients.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl --context $CLUSTER1 label namespace istio-system topology.istio.io/network=$CLUSTER1 +kubectl --context $CLUSTER2 label namespace istio-system topology.istio.io/network=$CLUSTER2 + cat < ./test.js +const chaiExec = require("@jsdevtools/chai-exec"); +var chai = require('chai'); +var expect = chai.expect; +chai.use(chaiExec); +const helpers = require('./tests/chai-exec'); + + +describe("ensure traffic goes to workloads in both clusters", () => { + it('should have two origins', async () => { + const origins = new Set(); + for (let i = 0; i < 10; i++) { + const command = await helpers.curlInDeployment({ + curlCommand: 'curl in-ambient.httpbin.global:8000/get', + deploymentName: 'in-ambient', + namespace: 'clients', + context: `${process.env.CLUSTER1}` + }); + const origin = JSON.parse(command).origin; + origins.add(origin); + } + expect(origins.size).to.equal(2); + }); +}); + +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/link-clusters/tests/check-cross-cluster-traffic.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } diff --git a/gloo-mesh/core/2-6/ambient-multi-cluster/scripts/configure-domain-rewrite.sh b/gloo-mesh/core/2-6/ambient-multi-cluster/scripts/configure-domain-rewrite.sh index be6dbd6d8b..d6e684c9da 100755 --- a/gloo-mesh/core/2-6/ambient-multi-cluster/scripts/configure-domain-rewrite.sh +++ b/gloo-mesh/core/2-6/ambient-multi-cluster/scripts/configure-domain-rewrite.sh @@ -90,4 +90,4 @@ done # If the loop exits, it means the check failed consistently for 1 minute echo "DNS rewrite rule verification failed." -exit 1 +exit 1 \ No newline at end of file diff --git a/gloo-mesh/core/2-6/ambient-multi-cluster/scripts/register-domain.sh b/gloo-mesh/core/2-6/ambient-multi-cluster/scripts/register-domain.sh index f9084487e8..1cb84cd86a 100755 --- a/gloo-mesh/core/2-6/ambient-multi-cluster/scripts/register-domain.sh +++ b/gloo-mesh/core/2-6/ambient-multi-cluster/scripts/register-domain.sh @@ -14,7 +14,9 @@ hosts_file="/etc/hosts" # Function to check if the input is a valid IP address is_ip() { if [[ $1 =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then - return 0 # 0 = true + return 0 # 0 = true - valid IPv4 address + elif [[ $1 =~ ^[0-9a-f]+[:]+[0-9a-f]*[:]*[0-9a-f]*[:]*[0-9a-f]*[:]*[0-9a-f]*[:]*[0-9a-f]*[:]*[0-9]*$ ]]; then + return 0 # 0 = true - valid IPv6 address else return 1 # 1 = false fi @@ -38,14 +40,15 @@ else fi # Check if the entry already exists -if grep -q "$hostname" "$hosts_file"; then +if grep -q "$hostname\$" "$hosts_file"; then # Update the existing entry with the new IP tempfile=$(mktemp) - sed "s/^.*$hostname/$new_ip $hostname/" "$hosts_file" > "$tempfile" + sed "s/^.*$hostname\$/$new_ip $hostname/" "$hosts_file" > "$tempfile" sudo cp "$tempfile" "$hosts_file" + rm "$tempfile" echo "Updated $hostname in $hosts_file with new IP: $new_ip" else # Add a new entry if it doesn't exist echo "$new_ip $hostname" | sudo tee -a "$hosts_file" > /dev/null echo "Added $hostname to $hosts_file with IP: $new_ip" -fi \ No newline at end of file +fi diff --git a/gloo-mesh/core/2-6/ambient-multi-cluster/tests/chai-exec.js b/gloo-mesh/core/2-6/ambient-multi-cluster/tests/chai-exec.js index 67ba62f095..020262437f 100644 --- a/gloo-mesh/core/2-6/ambient-multi-cluster/tests/chai-exec.js +++ b/gloo-mesh/core/2-6/ambient-multi-cluster/tests/chai-exec.js @@ -139,7 +139,11 @@ global = { }, k8sObjectIsPresent: ({ context, namespace, k8sType, k8sObj }) => { - let command = "kubectl --context " + context + " -n " + namespace + " get " + k8sType + " " + k8sObj + " -o name"; + // covers both namespace scoped and cluster scoped objects + let command = "kubectl --context " + context + " get " + k8sType + " " + k8sObj + " -o name"; + if (namespace) { + command = "kubectl --context " + context + " -n " + namespace + " get " + k8sType + " " + k8sObj + " -o name"; + } debugLog(`Executing command: ${command}`); let cli = chaiExec(command); @@ -176,7 +180,6 @@ global = { debugLog(`Command output (stdout): ${cli.stdout}`); return cli.stdout; }, - curlInPod: ({ curlCommand, podName, namespace }) => { debugLog(`Executing curl command: ${curlCommand} on pod: ${podName} in namespace: ${namespace}`); const cli = chaiExec(curlCommand); diff --git a/gloo-mesh/core/2-6/ambient-multi-cluster/tests/chai-http.js b/gloo-mesh/core/2-6/ambient-multi-cluster/tests/chai-http.js index 67f43db003..92bf579690 100644 --- a/gloo-mesh/core/2-6/ambient-multi-cluster/tests/chai-http.js +++ b/gloo-mesh/core/2-6/ambient-multi-cluster/tests/chai-http.js @@ -25,7 +25,30 @@ global = { .send() .then(async function (res) { debugLog(`Response status code: ${res.status}`); - expect(res).to.have.status(retCode); + expect(res).to.have.property('status', retCode); + }); + }, + + checkURLWithIP: ({ ip, host, protocol = "http", path = "", headers = [], certFile = '', keyFile = '', retCode }) => { + debugLog(`Checking URL with IP: ${ip}, Host: ${host}, Path: ${path} with expected return code: ${retCode}`); + + let cert = certFile ? fs.readFileSync(certFile) : ''; + let key = keyFile ? fs.readFileSync(keyFile) : ''; + + let url = `${protocol}://${ip}`; + + // Use chai-http to make a request to the IP address, but set the Host header + let request = chai.request(url).head(path).redirects(0).cert(cert).key(key).set('Host', host); + + debugLog(`Setting headers: ${JSON.stringify(headers)}`); + headers.forEach(header => request.set(header.key, header.value)); + + return request + .send() + .then(async function (res) { + debugLog(`Response status code: ${res.status}`); + debugLog(`Response ${JSON.stringify(res)}`); + expect(res).to.have.property('status', retCode); }); }, @@ -124,7 +147,7 @@ global = { .send() .then(async function (res) { debugLog(`Response status code: ${res.status}`); - expect(res).to.have.status(retCode); + expect(res).to.have.property('status', retCode); }); } }; diff --git a/gloo-mesh/core/2-6/ambient-multi-cluster/tests/proxies-changes.test.js.liquid b/gloo-mesh/core/2-6/ambient-multi-cluster/tests/proxies-changes.test.js.liquid new file mode 100644 index 0000000000..1934ea13b6 --- /dev/null +++ b/gloo-mesh/core/2-6/ambient-multi-cluster/tests/proxies-changes.test.js.liquid @@ -0,0 +1,58 @@ +{%- assign version_1_18_or_after = "1.18.0" | minimumGlooGatewayVersion %} +const { execSync } = require('child_process'); +const { expect } = require('chai'); +const { diff } = require('jest-diff'); + +function delay(ms) { + return new Promise(resolve => setTimeout(resolve, ms)); +} + +describe('Gloo snapshot stability test', function() { + let contextName = process.env.{{ context | default: "CLUSTER1" }}; + let delaySeconds = {{ delay | default: 5 }}; + + let firstSnapshot; + + it('should retrieve initial snapshot', function() { + const output = execSync( + `kubectl --context ${contextName} -n gloo-system exec deploy/gloo -- wget -O - localhost:{% if version_1_18_or_after %}9095{% else %}9091{% endif %}/snapshots/proxies -q`, + { encoding: 'utf8' } + ); + + try { + firstSnapshot = JSON.parse(output); + } catch (err) { + throw new Error('Failed to parse JSON output from initial snapshot: ' + err.message); + } + expect(firstSnapshot).to.be.an('object'); + }); + + it('should not change after the given delay', async function() { + await delay(delaySeconds * 1000); + + let secondSnapshot; + try { + const output2 = execSync( + `kubectl --context ${contextName} -n gloo-system exec deploy/gloo -- wget -O - localhost:{% if version_1_18_or_after %}9095{% else %}9091{% endif %}/snapshots/proxies -q`, + { encoding: 'utf8' } + ); + secondSnapshot = JSON.parse(output2); + } catch (err) { + throw new Error('Failed to retrieve or parse the second snapshot: ' + err.message); + } + + const firstJson = JSON.stringify(firstSnapshot, null, 2); + const secondJson = JSON.stringify(secondSnapshot, null, 2); + + // Show only 2 lines of context around each change + const diffOutput = diff(firstJson, secondJson, { contextLines: 2, expand: false }); + + if (! diffOutput.includes("Compared values have no visual difference.")) { + console.error('Differences found between snapshots:\n' + diffOutput); + throw new Error('Snapshots differ after the delay.'); + } else { + console.log('No differences found. The snapshots are stable.'); + } + }); +}); + From a8e22e8a9eb12953c36332f04c84438869e5f521 Mon Sep 17 00:00:00 2001 From: soloio-bot <> Date: Fri, 3 Jan 2025 17:23:38 +0000 Subject: [PATCH 03/34] Update from https://github.com/solo-io/procgen/commit/b0fcf769a8ed8b42653f7aeb190716fb7add3277 --- gloo-mesh/core/2-6/default/README.md | 42 +- .../deploy-kind-clusters/deploy-cluster1.sh | 289 +++ .../deploy-kind-clusters/deploy-cluster2.sh | 289 +++ .../steps/deploy-kind-clusters/deploy-mgmt.sh | 289 +++ gloo-mesh/core/2-6/default/package.json | 44 + gloo-mesh/core/2-6/default/run.sh | 1659 +++++++++++++++++ .../scripts/configure-domain-rewrite.sh | 2 +- .../2-6/default/scripts/register-domain.sh | 11 +- gloo-mesh/core/2-6/default/tests/chai-exec.js | 7 +- gloo-mesh/core/2-6/default/tests/chai-http.js | 27 +- .../tests/proxies-changes.test.js.liquid | 58 + 11 files changed, 2679 insertions(+), 38 deletions(-) create mode 100644 gloo-mesh/core/2-6/default/data/steps/deploy-kind-clusters/deploy-cluster1.sh create mode 100644 gloo-mesh/core/2-6/default/data/steps/deploy-kind-clusters/deploy-cluster2.sh create mode 100644 gloo-mesh/core/2-6/default/data/steps/deploy-kind-clusters/deploy-mgmt.sh create mode 100644 gloo-mesh/core/2-6/default/package.json create mode 100644 gloo-mesh/core/2-6/default/run.sh create mode 100644 gloo-mesh/core/2-6/default/tests/proxies-changes.test.js.liquid diff --git a/gloo-mesh/core/2-6/default/README.md b/gloo-mesh/core/2-6/default/README.md index a512e1baa5..58635541a7 100644 --- a/gloo-mesh/core/2-6/default/README.md +++ b/gloo-mesh/core/2-6/default/README.md @@ -15,7 +15,7 @@ source ./scripts/assert.sh ## Table of Contents * [Introduction](#introduction) -* [Lab 1 - Deploy KinD clusters](#lab-1---deploy-kind-clusters-) +* [Lab 1 - Deploy KinD Cluster(s)](#lab-1---deploy-kind-cluster(s)-) * [Lab 2 - Deploy and register Gloo Mesh](#lab-2---deploy-and-register-gloo-mesh-) * [Lab 3 - Deploy Istio using Gloo Mesh Lifecycle Manager](#lab-3---deploy-istio-using-gloo-mesh-lifecycle-manager-) * [Lab 4 - Deploy the Bookinfo demo app](#lab-4---deploy-the-bookinfo-demo-app-) @@ -68,7 +68,7 @@ You can find more information about Gloo Mesh Core in the official documentation -## Lab 1 - Deploy KinD clusters +## Lab 1 - Deploy KinD Cluster(s) Clone this repository and go to the directory where this `README.md` file is. @@ -81,14 +81,13 @@ export CLUSTER1=cluster1 export CLUSTER2=cluster2 ``` -Run the following commands to deploy three Kubernetes clusters using [Kind](https://kind.sigs.k8s.io/): +Deploy the KinD clusters: ```bash -./scripts/deploy-aws.sh 1 mgmt -./scripts/deploy-aws.sh 2 cluster1 us-west us-west-1 -./scripts/deploy-aws.sh 3 cluster2 us-west us-west-2 +bash ./data/steps/deploy-kind-clusters/deploy-mgmt.sh +bash ./data/steps/deploy-kind-clusters/deploy-cluster1.sh +bash ./data/steps/deploy-kind-clusters/deploy-cluster2.sh ``` - Then run the following commands to wait for all the Pods to be ready: ```bash @@ -99,27 +98,8 @@ Then run the following commands to wait for all the Pods to be ready: **Note:** If you run the `check.sh` script immediately after the `deploy.sh` script, you may see a jsonpath error. If that happens, simply wait a few seconds and try again. -Once the `check.sh` script completes, when you execute the `kubectl get pods -A` command, you should see the following: - -``` -NAMESPACE NAME READY STATUS RESTARTS AGE -kube-system calico-kube-controllers-59d85c5c84-sbk4k 1/1 Running 0 4h26m -kube-system calico-node-przxs 1/1 Running 0 4h26m -kube-system coredns-6955765f44-ln8f5 1/1 Running 0 4h26m -kube-system coredns-6955765f44-s7xxx 1/1 Running 0 4h26m -kube-system etcd-cluster1-control-plane 1/1 Running 0 4h27m -kube-system kube-apiserver-cluster1-control-plane 1/1 Running 0 4h27m -kube-system kube-controller-manager-cluster1-control-plane1/1 Running 0 4h27m -kube-system kube-proxy-ksvzw 1/1 Running 0 4h26m -kube-system kube-scheduler-cluster1-control-plane 1/1 Running 0 4h27m -local-path-storage local-path-provisioner-58f6947c7-lfmdx 1/1 Running 0 4h26m -metallb-system controller-5c9894b5cd-cn9x2 1/1 Running 0 4h26m -metallb-system speaker-d7jkp 1/1 Running 0 4h26m -``` - -**Note:** The CNI pods might be different, depending on which CNI you have deployed. - -You can see that your currently connected to this cluster by executing the `kubectl config get-contexts` command: +Once the `check.sh` script completes, execute the `kubectl get pods -A` command, and verify that all pods are in a running state. + You can see that your currently connected to this cluster by executing the `kubectl config get-contexts` command: ``` CURRENT NAME CLUSTER AUTHINFO NAMESPACE @@ -138,7 +118,8 @@ cat <<'EOF' > ./test.js const helpers = require('./tests/chai-exec'); describe("Clusters are healthy", () => { - const clusters = [process.env.MGMT, process.env.CLUSTER1, process.env.CLUSTER2]; + const clusters = ["mgmt", "cluster1", "cluster2"]; + clusters.forEach(cluster => { it(`Cluster ${cluster} is healthy`, () => helpers.k8sObjectIsPresent({ context: cluster, namespace: "default", k8sType: "service", k8sObj: "kubernetes" })); }); @@ -150,6 +131,7 @@ timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || + ## Lab 2 - Deploy and register Gloo Mesh [VIDEO LINK](https://youtu.be/djfFiepK4GY "Video Link") @@ -190,6 +172,7 @@ EOF echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/deploy-and-register-gloo-mesh/tests/environment-variables.test.js.liquid" timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } --> + Run the following commands to deploy the Gloo Mesh management plane: ```bash @@ -490,6 +473,7 @@ timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || + ## Lab 3 - Deploy Istio using Gloo Mesh Lifecycle Manager [VIDEO LINK](https://youtu.be/f76-KOEjqHs "Video Link") diff --git a/gloo-mesh/core/2-6/default/data/steps/deploy-kind-clusters/deploy-cluster1.sh b/gloo-mesh/core/2-6/default/data/steps/deploy-kind-clusters/deploy-cluster1.sh new file mode 100644 index 0000000000..31b0806b9b --- /dev/null +++ b/gloo-mesh/core/2-6/default/data/steps/deploy-kind-clusters/deploy-cluster1.sh @@ -0,0 +1,289 @@ +#!/usr/bin/env bash +set -o errexit + +number="2" +name="cluster1" +region="" +zone="" +twodigits=$(printf "%02d\n" $number) + +kindest_node=${KINDEST_NODE} + +if [ -z "$kindest_node" ]; then + export k8s_version="1.28.0" + + [[ ${k8s_version::1} != 'v' ]] && export k8s_version=v${k8s_version} + kindest_node_ver=$(curl --silent "https://registry.hub.docker.com/v2/repositories/kindest/node/tags?page_size=100" \ + | jq -r '.results | .[] | select(.name==env.k8s_version) | .name+"@"+.digest') + + if [ -z "$kindest_node_ver" ]; then + echo "Incorrect Kubernetes version provided: ${k8s_version}." + exit 1 + fi + kindest_node=kindest/node:${kindest_node_ver} +fi +echo "Using KinD image: ${kindest_node}" + +if [ -z "$3" ]; then + case $name in + cluster1) + region=us-west-1 + ;; + cluster2) + region=us-west-2 + ;; + *) + region=us-east-1 + ;; + esac +fi + +if [ -z "$4" ]; then + case $name in + cluster1) + zone=us-west-1a + ;; + cluster2) + zone=us-west-2a + ;; + *) + zone=us-east-1a + ;; + esac +fi + +if hostname -I 2>/dev/null; then + myip=$(hostname -I | awk '{ print $1 }') +else + myip=$(ipconfig getifaddr en0) +fi + +# Function to determine the next available cluster number +get_next_cluster_number() { + if ! kind get clusters 2>&1 | grep "^kind" > /dev/null; then + echo 1 + else + highest_num=$(kind get clusters | grep "^kind" | tail -1 | cut -c 5-) + echo $((highest_num + 1)) + fi +} + +if [ -f /.dockerenv ]; then +myip=$HOST_IP +container=$(docker inspect $(docker ps -q) | jq -r ".[] | select(.Config.Hostname == \"$HOSTNAME\") | .Name" | cut -d/ -f2) +docker network connect "kind" $container || true +number=$(get_next_cluster_number) +twodigits=$(printf "%02d\n" $number) +fi + +reg_name='kind-registry' +reg_port='5000' +docker start "${reg_name}" 2>/dev/null || \ +docker run -d --restart=always -p "0.0.0.0:${reg_port}:5000" --name "${reg_name}" registry:2 + +cache_port='5000' +cat > registries < ${HOME}/.${cache_name}-config.yml </dev/null || \ +docker run -d --restart=always ${DEPLOY_EXTRA_PARAMS} -v ${HOME}/.${cache_name}-config.yml:/etc/docker/registry/config.yml --name "${cache_name}" registry:2 +done +mkdir -p /tmp/oidc + +cat <<'EOF' >/tmp/oidc/sa-signer-pkcs8.pub +-----BEGIN PUBLIC KEY----- +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA53YiBcrn7+ZK0Vb4odeA +1riYdvEb8To4H6/HtF+OKzuCIXFQ+bRy7yMrDGITYpfYPrTZOgfdeTLZqOiAj+cL +395nvxdly83SUrdh7ItfOPRluuuiPHnFn111wpyjBw5nut4Kx+M5MksNfA1hU0Zw +zIM9OviX8iEF8xHWUtz4BAMDG8N6+zpLo0pAzaei5hKuLZ9dZOzHBC8VOW82cQMm +5X5uOKsCHMtNSjqYUNB1DxN6xxM+odGWT/6xthPGk6YCxmO28YHPFZfiS2eAIpD8 +2p/16KQKU6TkZSrldkYxiHIPhu+5f9faZJG7dB9pLN1SfdTBio4PK5Mz9muLUCv9 +ywIDAQAB +-----END PUBLIC KEY----- +EOF + +cat <<'EOF' >/tmp/oidc/sa-signer.key +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA53YiBcrn7+ZK0Vb4odeA1riYdvEb8To4H6/HtF+OKzuCIXFQ ++bRy7yMrDGITYpfYPrTZOgfdeTLZqOiAj+cL395nvxdly83SUrdh7ItfOPRluuui +PHnFn111wpyjBw5nut4Kx+M5MksNfA1hU0ZwzIM9OviX8iEF8xHWUtz4BAMDG8N6 ++zpLo0pAzaei5hKuLZ9dZOzHBC8VOW82cQMm5X5uOKsCHMtNSjqYUNB1DxN6xxM+ +odGWT/6xthPGk6YCxmO28YHPFZfiS2eAIpD82p/16KQKU6TkZSrldkYxiHIPhu+5 +f9faZJG7dB9pLN1SfdTBio4PK5Mz9muLUCv9ywIDAQABAoIBAB8tro+RMYUDRHjG +el9ypAxIeWEsQVNRQFYkW4ZUiNYSAgl3Ni0svX6xAg989peFVL+9pLVIcfDthJxY +FVlNCjBxyQ/YmwHFC9vQkARJEd6eLUXsj8INtS0ubbp1VxCQRDDL0C/0z7OSoJJh +SwboqjEiTJExA2a+RArmEDTBRzdi3t+kT8G23JcqOivrITt17K6bQYyJXw7/vUdc +r/R+hfd5TqVq92VddzDT7RNJAxsbPPXjGnESlq1GALBDs+uBGYsP0fiEJb2nicSv +z9fBnBeERhut1gcE0C0iLRQZb+3r8TitBtxrZv+0BHgXrkKtXDwWTqGEKOwC4dBn +7nxkH2ECgYEA6+/DOTABGYOWOQftFkJMjcugzDrjoGpuXuVOTb65T+3FHAzU93zy +3bt3wQxrlugluyy9Sc/PL3ck2LgUsPHZ+s7zsdGvvGALBD6bOSSKATz9JgjwifO8 +PgqUz1kXRwez2CtKLOOCFFtcIzEdWIzsa1ubNqLzgN7rD+XBkUc2uEcCgYEA+yTy +72EDMQVoIZOygytHsDNdy0iS2RsBbdurT27wkYuFpFUVWdbNSL+8haE+wJHseHcw +BD4WIMpU+hnS4p4OO8+6V7PiXOS5E/se91EJigZAoixgDUiC8ihojWgK9PYEavUo +hULWbayO59SxYWeUI4Ze0GP8Jw8vdB86ib4ulF0CgYEAgyzRuLjk05+iZODwQyDn +WSquov3W0rh51s7cw0LX2wWSQm8r9NGGYhs5kJ5sLwGxAKj2MNSWF4jBdrCZ6Gr+ +y4BGY0X209/+IAUC3jlfdSLIiF4OBlT6AvB1HfclhvtUVUp0OhLfnpvQ1UwYScRI +KcRLvovIoIzP2g3emfwjAz8CgYEAxUHhOhm1mwRHJNBQTuxok0HVMrze8n1eov39 +0RcvBvJSVp+pdHXdqX1HwqHCmxhCZuAeq8ZkNP8WvZYY6HwCbAIdt5MHgbT4lXQR +f2l8F5gPnhFCpExG5ZLNg/urV3oAQE4stHap21zEpdyOMhZb6Yc5424U+EzaFdgN +b3EcPtUCgYAkKvUlSnBbgiJz1iaN6fuTqH0efavuFGMhjNmG7GtpNXdgyl1OWIuc +Yu+tZtHXtKYf3B99GwPrFzw/7yfDwae5YeWmi2/pFTH96wv3brJBqkAWY8G5Rsmd +qF50p34vIFqUBniNRwSArx8t2dq/CuAMgLAtSjh70Q6ZAnCF85PD8Q== +-----END RSA PRIVATE KEY----- +EOF + +echo Contents of kind${number}.yaml +cat << EOF | tee kind${number}.yaml +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: +- role: control-plane + image: ${kindest_node} + extraPortMappings: + - containerPort: 6443 + hostPort: 70${twodigits} + extraMounts: + - containerPath: /etc/kubernetes/oidc + hostPath: /tmp/oidc + labels: + ingress-ready: true + topology.kubernetes.io/region: ${region} + topology.kubernetes.io/zone: ${zone} +networking: + serviceSubnet: "10.$(echo $twodigits | sed 's/^0*//').0.0/16" + podSubnet: "10.1${twodigits}.0.0/16" +kubeadmConfigPatches: +- | + kind: ClusterConfiguration + apiServer: + extraArgs: + service-account-key-file: /etc/kubernetes/pki/sa.pub + service-account-key-file: /etc/kubernetes/oidc/sa-signer-pkcs8.pub + service-account-signing-key-file: /etc/kubernetes/oidc/sa-signer.key + service-account-issuer: https://solo-workshop-oidc.s3.us-east-1.amazonaws.com + api-audiences: sts.amazonaws.com + extraVolumes: + - name: oidc + hostPath: /etc/kubernetes/oidc + mountPath: /etc/kubernetes/oidc + readOnly: true + metadata: + name: config +containerdConfigPatches: +- |- + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:${reg_port}"] + endpoint = ["http://${reg_name}:${reg_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] + endpoint = ["http://docker:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-docker.pkg.dev"] + endpoint = ["http://us-docker:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-central1-docker.pkg.dev"] + endpoint = ["http://us-central1-docker:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."quay.io"] + endpoint = ["http://quay:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."gcr.io"] + endpoint = ["http://gcr:${cache_port}"] +EOF +echo ----------------------------------------------------- + +kind create cluster --name kind${number} --config kind${number}.yaml +ipkind=$(docker inspect kind${number}-control-plane | jq -r '.[0].NetworkSettings.Networks[].IPAddress') +networkkind=$(echo ${ipkind} | awk -F. '{ print $1"."$2 }') +kubectl config set-cluster kind-kind${number} --server=https://${myip}:70${twodigits} --insecure-skip-tls-verify=true + +# Preload images +cat << EOF >> images.txt +quay.io/metallb/controller:v0.13.12 +quay.io/metallb/speaker:v0.13.12 +EOF +cat images.txt | while read image; do + docker pull $image || true + kind load docker-image $image --name kind${number} || true +done + +docker network connect "kind" "${reg_name}" || true +docker network connect "kind" docker || true +docker network connect "kind" us-docker || true +docker network connect "kind" us-central1-docker || true +docker network connect "kind" quay || true +docker network connect "kind" gcr || true + +for i in 1 2 3 4 5; do kubectl --context=kind-kind${number} apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml && break || sleep 15; done +kubectl --context=kind-kind${number} create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)" +kubectl --context=kind-kind${number} -n metallb-system rollout status deploy controller || true + +cat << EOF | tee metallb${number}.yaml +apiVersion: metallb.io/v1beta1 +kind: IPAddressPool +metadata: + name: first-pool + namespace: metallb-system +spec: + addresses: + - ${networkkind}.1${twodigits}.1-${networkkind}.1${twodigits}.254 +--- +apiVersion: metallb.io/v1beta1 +kind: L2Advertisement +metadata: + name: empty + namespace: metallb-system +EOF + +printf "Create IPAddressPool in kind-kind${number}\n" +for i in {1..10}; do +kubectl --context=kind-kind${number} apply -f metallb${number}.yaml && break +sleep 2 +done + +# connect the registry to the cluster network if not already connected +printf "Renaming context kind-kind${number} to ${name}\n" +for i in {1..100}; do + (kubectl config get-contexts -oname | grep ${name}) && break + kubectl config rename-context kind-kind${number} ${name} && break + printf " $i"/100 + sleep 2 + [ $i -lt 100 ] || exit 1 +done + +# Document the local registry +# https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry +cat </dev/null; then + myip=$(hostname -I | awk '{ print $1 }') +else + myip=$(ipconfig getifaddr en0) +fi + +# Function to determine the next available cluster number +get_next_cluster_number() { + if ! kind get clusters 2>&1 | grep "^kind" > /dev/null; then + echo 1 + else + highest_num=$(kind get clusters | grep "^kind" | tail -1 | cut -c 5-) + echo $((highest_num + 1)) + fi +} + +if [ -f /.dockerenv ]; then +myip=$HOST_IP +container=$(docker inspect $(docker ps -q) | jq -r ".[] | select(.Config.Hostname == \"$HOSTNAME\") | .Name" | cut -d/ -f2) +docker network connect "kind" $container || true +number=$(get_next_cluster_number) +twodigits=$(printf "%02d\n" $number) +fi + +reg_name='kind-registry' +reg_port='5000' +docker start "${reg_name}" 2>/dev/null || \ +docker run -d --restart=always -p "0.0.0.0:${reg_port}:5000" --name "${reg_name}" registry:2 + +cache_port='5000' +cat > registries < ${HOME}/.${cache_name}-config.yml </dev/null || \ +docker run -d --restart=always ${DEPLOY_EXTRA_PARAMS} -v ${HOME}/.${cache_name}-config.yml:/etc/docker/registry/config.yml --name "${cache_name}" registry:2 +done +mkdir -p /tmp/oidc + +cat <<'EOF' >/tmp/oidc/sa-signer-pkcs8.pub +-----BEGIN PUBLIC KEY----- +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA53YiBcrn7+ZK0Vb4odeA +1riYdvEb8To4H6/HtF+OKzuCIXFQ+bRy7yMrDGITYpfYPrTZOgfdeTLZqOiAj+cL +395nvxdly83SUrdh7ItfOPRluuuiPHnFn111wpyjBw5nut4Kx+M5MksNfA1hU0Zw +zIM9OviX8iEF8xHWUtz4BAMDG8N6+zpLo0pAzaei5hKuLZ9dZOzHBC8VOW82cQMm +5X5uOKsCHMtNSjqYUNB1DxN6xxM+odGWT/6xthPGk6YCxmO28YHPFZfiS2eAIpD8 +2p/16KQKU6TkZSrldkYxiHIPhu+5f9faZJG7dB9pLN1SfdTBio4PK5Mz9muLUCv9 +ywIDAQAB +-----END PUBLIC KEY----- +EOF + +cat <<'EOF' >/tmp/oidc/sa-signer.key +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA53YiBcrn7+ZK0Vb4odeA1riYdvEb8To4H6/HtF+OKzuCIXFQ ++bRy7yMrDGITYpfYPrTZOgfdeTLZqOiAj+cL395nvxdly83SUrdh7ItfOPRluuui +PHnFn111wpyjBw5nut4Kx+M5MksNfA1hU0ZwzIM9OviX8iEF8xHWUtz4BAMDG8N6 ++zpLo0pAzaei5hKuLZ9dZOzHBC8VOW82cQMm5X5uOKsCHMtNSjqYUNB1DxN6xxM+ +odGWT/6xthPGk6YCxmO28YHPFZfiS2eAIpD82p/16KQKU6TkZSrldkYxiHIPhu+5 +f9faZJG7dB9pLN1SfdTBio4PK5Mz9muLUCv9ywIDAQABAoIBAB8tro+RMYUDRHjG +el9ypAxIeWEsQVNRQFYkW4ZUiNYSAgl3Ni0svX6xAg989peFVL+9pLVIcfDthJxY +FVlNCjBxyQ/YmwHFC9vQkARJEd6eLUXsj8INtS0ubbp1VxCQRDDL0C/0z7OSoJJh +SwboqjEiTJExA2a+RArmEDTBRzdi3t+kT8G23JcqOivrITt17K6bQYyJXw7/vUdc +r/R+hfd5TqVq92VddzDT7RNJAxsbPPXjGnESlq1GALBDs+uBGYsP0fiEJb2nicSv +z9fBnBeERhut1gcE0C0iLRQZb+3r8TitBtxrZv+0BHgXrkKtXDwWTqGEKOwC4dBn +7nxkH2ECgYEA6+/DOTABGYOWOQftFkJMjcugzDrjoGpuXuVOTb65T+3FHAzU93zy +3bt3wQxrlugluyy9Sc/PL3ck2LgUsPHZ+s7zsdGvvGALBD6bOSSKATz9JgjwifO8 +PgqUz1kXRwez2CtKLOOCFFtcIzEdWIzsa1ubNqLzgN7rD+XBkUc2uEcCgYEA+yTy +72EDMQVoIZOygytHsDNdy0iS2RsBbdurT27wkYuFpFUVWdbNSL+8haE+wJHseHcw +BD4WIMpU+hnS4p4OO8+6V7PiXOS5E/se91EJigZAoixgDUiC8ihojWgK9PYEavUo +hULWbayO59SxYWeUI4Ze0GP8Jw8vdB86ib4ulF0CgYEAgyzRuLjk05+iZODwQyDn +WSquov3W0rh51s7cw0LX2wWSQm8r9NGGYhs5kJ5sLwGxAKj2MNSWF4jBdrCZ6Gr+ +y4BGY0X209/+IAUC3jlfdSLIiF4OBlT6AvB1HfclhvtUVUp0OhLfnpvQ1UwYScRI +KcRLvovIoIzP2g3emfwjAz8CgYEAxUHhOhm1mwRHJNBQTuxok0HVMrze8n1eov39 +0RcvBvJSVp+pdHXdqX1HwqHCmxhCZuAeq8ZkNP8WvZYY6HwCbAIdt5MHgbT4lXQR +f2l8F5gPnhFCpExG5ZLNg/urV3oAQE4stHap21zEpdyOMhZb6Yc5424U+EzaFdgN +b3EcPtUCgYAkKvUlSnBbgiJz1iaN6fuTqH0efavuFGMhjNmG7GtpNXdgyl1OWIuc +Yu+tZtHXtKYf3B99GwPrFzw/7yfDwae5YeWmi2/pFTH96wv3brJBqkAWY8G5Rsmd +qF50p34vIFqUBniNRwSArx8t2dq/CuAMgLAtSjh70Q6ZAnCF85PD8Q== +-----END RSA PRIVATE KEY----- +EOF + +echo Contents of kind${number}.yaml +cat << EOF | tee kind${number}.yaml +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: +- role: control-plane + image: ${kindest_node} + extraPortMappings: + - containerPort: 6443 + hostPort: 70${twodigits} + extraMounts: + - containerPath: /etc/kubernetes/oidc + hostPath: /tmp/oidc + labels: + ingress-ready: true + topology.kubernetes.io/region: ${region} + topology.kubernetes.io/zone: ${zone} +networking: + serviceSubnet: "10.$(echo $twodigits | sed 's/^0*//').0.0/16" + podSubnet: "10.1${twodigits}.0.0/16" +kubeadmConfigPatches: +- | + kind: ClusterConfiguration + apiServer: + extraArgs: + service-account-key-file: /etc/kubernetes/pki/sa.pub + service-account-key-file: /etc/kubernetes/oidc/sa-signer-pkcs8.pub + service-account-signing-key-file: /etc/kubernetes/oidc/sa-signer.key + service-account-issuer: https://solo-workshop-oidc.s3.us-east-1.amazonaws.com + api-audiences: sts.amazonaws.com + extraVolumes: + - name: oidc + hostPath: /etc/kubernetes/oidc + mountPath: /etc/kubernetes/oidc + readOnly: true + metadata: + name: config +containerdConfigPatches: +- |- + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:${reg_port}"] + endpoint = ["http://${reg_name}:${reg_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] + endpoint = ["http://docker:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-docker.pkg.dev"] + endpoint = ["http://us-docker:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-central1-docker.pkg.dev"] + endpoint = ["http://us-central1-docker:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."quay.io"] + endpoint = ["http://quay:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."gcr.io"] + endpoint = ["http://gcr:${cache_port}"] +EOF +echo ----------------------------------------------------- + +kind create cluster --name kind${number} --config kind${number}.yaml +ipkind=$(docker inspect kind${number}-control-plane | jq -r '.[0].NetworkSettings.Networks[].IPAddress') +networkkind=$(echo ${ipkind} | awk -F. '{ print $1"."$2 }') +kubectl config set-cluster kind-kind${number} --server=https://${myip}:70${twodigits} --insecure-skip-tls-verify=true + +# Preload images +cat << EOF >> images.txt +quay.io/metallb/controller:v0.13.12 +quay.io/metallb/speaker:v0.13.12 +EOF +cat images.txt | while read image; do + docker pull $image || true + kind load docker-image $image --name kind${number} || true +done + +docker network connect "kind" "${reg_name}" || true +docker network connect "kind" docker || true +docker network connect "kind" us-docker || true +docker network connect "kind" us-central1-docker || true +docker network connect "kind" quay || true +docker network connect "kind" gcr || true + +for i in 1 2 3 4 5; do kubectl --context=kind-kind${number} apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml && break || sleep 15; done +kubectl --context=kind-kind${number} create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)" +kubectl --context=kind-kind${number} -n metallb-system rollout status deploy controller || true + +cat << EOF | tee metallb${number}.yaml +apiVersion: metallb.io/v1beta1 +kind: IPAddressPool +metadata: + name: first-pool + namespace: metallb-system +spec: + addresses: + - ${networkkind}.1${twodigits}.1-${networkkind}.1${twodigits}.254 +--- +apiVersion: metallb.io/v1beta1 +kind: L2Advertisement +metadata: + name: empty + namespace: metallb-system +EOF + +printf "Create IPAddressPool in kind-kind${number}\n" +for i in {1..10}; do +kubectl --context=kind-kind${number} apply -f metallb${number}.yaml && break +sleep 2 +done + +# connect the registry to the cluster network if not already connected +printf "Renaming context kind-kind${number} to ${name}\n" +for i in {1..100}; do + (kubectl config get-contexts -oname | grep ${name}) && break + kubectl config rename-context kind-kind${number} ${name} && break + printf " $i"/100 + sleep 2 + [ $i -lt 100 ] || exit 1 +done + +# Document the local registry +# https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry +cat </dev/null; then + myip=$(hostname -I | awk '{ print $1 }') +else + myip=$(ipconfig getifaddr en0) +fi + +# Function to determine the next available cluster number +get_next_cluster_number() { + if ! kind get clusters 2>&1 | grep "^kind" > /dev/null; then + echo 1 + else + highest_num=$(kind get clusters | grep "^kind" | tail -1 | cut -c 5-) + echo $((highest_num + 1)) + fi +} + +if [ -f /.dockerenv ]; then +myip=$HOST_IP +container=$(docker inspect $(docker ps -q) | jq -r ".[] | select(.Config.Hostname == \"$HOSTNAME\") | .Name" | cut -d/ -f2) +docker network connect "kind" $container || true +number=$(get_next_cluster_number) +twodigits=$(printf "%02d\n" $number) +fi + +reg_name='kind-registry' +reg_port='5000' +docker start "${reg_name}" 2>/dev/null || \ +docker run -d --restart=always -p "0.0.0.0:${reg_port}:5000" --name "${reg_name}" registry:2 + +cache_port='5000' +cat > registries < ${HOME}/.${cache_name}-config.yml </dev/null || \ +docker run -d --restart=always ${DEPLOY_EXTRA_PARAMS} -v ${HOME}/.${cache_name}-config.yml:/etc/docker/registry/config.yml --name "${cache_name}" registry:2 +done +mkdir -p /tmp/oidc + +cat <<'EOF' >/tmp/oidc/sa-signer-pkcs8.pub +-----BEGIN PUBLIC KEY----- +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA53YiBcrn7+ZK0Vb4odeA +1riYdvEb8To4H6/HtF+OKzuCIXFQ+bRy7yMrDGITYpfYPrTZOgfdeTLZqOiAj+cL +395nvxdly83SUrdh7ItfOPRluuuiPHnFn111wpyjBw5nut4Kx+M5MksNfA1hU0Zw +zIM9OviX8iEF8xHWUtz4BAMDG8N6+zpLo0pAzaei5hKuLZ9dZOzHBC8VOW82cQMm +5X5uOKsCHMtNSjqYUNB1DxN6xxM+odGWT/6xthPGk6YCxmO28YHPFZfiS2eAIpD8 +2p/16KQKU6TkZSrldkYxiHIPhu+5f9faZJG7dB9pLN1SfdTBio4PK5Mz9muLUCv9 +ywIDAQAB +-----END PUBLIC KEY----- +EOF + +cat <<'EOF' >/tmp/oidc/sa-signer.key +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA53YiBcrn7+ZK0Vb4odeA1riYdvEb8To4H6/HtF+OKzuCIXFQ ++bRy7yMrDGITYpfYPrTZOgfdeTLZqOiAj+cL395nvxdly83SUrdh7ItfOPRluuui +PHnFn111wpyjBw5nut4Kx+M5MksNfA1hU0ZwzIM9OviX8iEF8xHWUtz4BAMDG8N6 ++zpLo0pAzaei5hKuLZ9dZOzHBC8VOW82cQMm5X5uOKsCHMtNSjqYUNB1DxN6xxM+ +odGWT/6xthPGk6YCxmO28YHPFZfiS2eAIpD82p/16KQKU6TkZSrldkYxiHIPhu+5 +f9faZJG7dB9pLN1SfdTBio4PK5Mz9muLUCv9ywIDAQABAoIBAB8tro+RMYUDRHjG +el9ypAxIeWEsQVNRQFYkW4ZUiNYSAgl3Ni0svX6xAg989peFVL+9pLVIcfDthJxY +FVlNCjBxyQ/YmwHFC9vQkARJEd6eLUXsj8INtS0ubbp1VxCQRDDL0C/0z7OSoJJh +SwboqjEiTJExA2a+RArmEDTBRzdi3t+kT8G23JcqOivrITt17K6bQYyJXw7/vUdc +r/R+hfd5TqVq92VddzDT7RNJAxsbPPXjGnESlq1GALBDs+uBGYsP0fiEJb2nicSv +z9fBnBeERhut1gcE0C0iLRQZb+3r8TitBtxrZv+0BHgXrkKtXDwWTqGEKOwC4dBn +7nxkH2ECgYEA6+/DOTABGYOWOQftFkJMjcugzDrjoGpuXuVOTb65T+3FHAzU93zy +3bt3wQxrlugluyy9Sc/PL3ck2LgUsPHZ+s7zsdGvvGALBD6bOSSKATz9JgjwifO8 +PgqUz1kXRwez2CtKLOOCFFtcIzEdWIzsa1ubNqLzgN7rD+XBkUc2uEcCgYEA+yTy +72EDMQVoIZOygytHsDNdy0iS2RsBbdurT27wkYuFpFUVWdbNSL+8haE+wJHseHcw +BD4WIMpU+hnS4p4OO8+6V7PiXOS5E/se91EJigZAoixgDUiC8ihojWgK9PYEavUo +hULWbayO59SxYWeUI4Ze0GP8Jw8vdB86ib4ulF0CgYEAgyzRuLjk05+iZODwQyDn +WSquov3W0rh51s7cw0LX2wWSQm8r9NGGYhs5kJ5sLwGxAKj2MNSWF4jBdrCZ6Gr+ +y4BGY0X209/+IAUC3jlfdSLIiF4OBlT6AvB1HfclhvtUVUp0OhLfnpvQ1UwYScRI +KcRLvovIoIzP2g3emfwjAz8CgYEAxUHhOhm1mwRHJNBQTuxok0HVMrze8n1eov39 +0RcvBvJSVp+pdHXdqX1HwqHCmxhCZuAeq8ZkNP8WvZYY6HwCbAIdt5MHgbT4lXQR +f2l8F5gPnhFCpExG5ZLNg/urV3oAQE4stHap21zEpdyOMhZb6Yc5424U+EzaFdgN +b3EcPtUCgYAkKvUlSnBbgiJz1iaN6fuTqH0efavuFGMhjNmG7GtpNXdgyl1OWIuc +Yu+tZtHXtKYf3B99GwPrFzw/7yfDwae5YeWmi2/pFTH96wv3brJBqkAWY8G5Rsmd +qF50p34vIFqUBniNRwSArx8t2dq/CuAMgLAtSjh70Q6ZAnCF85PD8Q== +-----END RSA PRIVATE KEY----- +EOF + +echo Contents of kind${number}.yaml +cat << EOF | tee kind${number}.yaml +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: +- role: control-plane + image: ${kindest_node} + extraPortMappings: + - containerPort: 6443 + hostPort: 70${twodigits} + extraMounts: + - containerPath: /etc/kubernetes/oidc + hostPath: /tmp/oidc + labels: + ingress-ready: true + topology.kubernetes.io/region: ${region} + topology.kubernetes.io/zone: ${zone} +networking: + serviceSubnet: "10.$(echo $twodigits | sed 's/^0*//').0.0/16" + podSubnet: "10.1${twodigits}.0.0/16" +kubeadmConfigPatches: +- | + kind: ClusterConfiguration + apiServer: + extraArgs: + service-account-key-file: /etc/kubernetes/pki/sa.pub + service-account-key-file: /etc/kubernetes/oidc/sa-signer-pkcs8.pub + service-account-signing-key-file: /etc/kubernetes/oidc/sa-signer.key + service-account-issuer: https://solo-workshop-oidc.s3.us-east-1.amazonaws.com + api-audiences: sts.amazonaws.com + extraVolumes: + - name: oidc + hostPath: /etc/kubernetes/oidc + mountPath: /etc/kubernetes/oidc + readOnly: true + metadata: + name: config +containerdConfigPatches: +- |- + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:${reg_port}"] + endpoint = ["http://${reg_name}:${reg_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] + endpoint = ["http://docker:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-docker.pkg.dev"] + endpoint = ["http://us-docker:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-central1-docker.pkg.dev"] + endpoint = ["http://us-central1-docker:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."quay.io"] + endpoint = ["http://quay:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."gcr.io"] + endpoint = ["http://gcr:${cache_port}"] +EOF +echo ----------------------------------------------------- + +kind create cluster --name kind${number} --config kind${number}.yaml +ipkind=$(docker inspect kind${number}-control-plane | jq -r '.[0].NetworkSettings.Networks[].IPAddress') +networkkind=$(echo ${ipkind} | awk -F. '{ print $1"."$2 }') +kubectl config set-cluster kind-kind${number} --server=https://${myip}:70${twodigits} --insecure-skip-tls-verify=true + +# Preload images +cat << EOF >> images.txt +quay.io/metallb/controller:v0.13.12 +quay.io/metallb/speaker:v0.13.12 +EOF +cat images.txt | while read image; do + docker pull $image || true + kind load docker-image $image --name kind${number} || true +done + +docker network connect "kind" "${reg_name}" || true +docker network connect "kind" docker || true +docker network connect "kind" us-docker || true +docker network connect "kind" us-central1-docker || true +docker network connect "kind" quay || true +docker network connect "kind" gcr || true + +for i in 1 2 3 4 5; do kubectl --context=kind-kind${number} apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml && break || sleep 15; done +kubectl --context=kind-kind${number} create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)" +kubectl --context=kind-kind${number} -n metallb-system rollout status deploy controller || true + +cat << EOF | tee metallb${number}.yaml +apiVersion: metallb.io/v1beta1 +kind: IPAddressPool +metadata: + name: first-pool + namespace: metallb-system +spec: + addresses: + - ${networkkind}.1${twodigits}.1-${networkkind}.1${twodigits}.254 +--- +apiVersion: metallb.io/v1beta1 +kind: L2Advertisement +metadata: + name: empty + namespace: metallb-system +EOF + +printf "Create IPAddressPool in kind-kind${number}\n" +for i in {1..10}; do +kubectl --context=kind-kind${number} apply -f metallb${number}.yaml && break +sleep 2 +done + +# connect the registry to the cluster network if not already connected +printf "Renaming context kind-kind${number} to ${name}\n" +for i in {1..100}; do + (kubectl config get-contexts -oname | grep ${name}) && break + kubectl config rename-context kind-kind${number} ${name} && break + printf " $i"/100 + sleep 2 + [ $i -lt 100 ] || exit 1 +done + +# Document the local registry +# https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry +cat </dev/null || true +source ./scripts/assert.sh +export MGMT=mgmt +export CLUSTER1=cluster1 +export CLUSTER2=cluster2 +bash ./data/steps/deploy-kind-clusters/deploy-mgmt.sh +bash ./data/steps/deploy-kind-clusters/deploy-cluster1.sh +bash ./data/steps/deploy-kind-clusters/deploy-cluster2.sh +./scripts/check.sh mgmt +./scripts/check.sh cluster1 +./scripts/check.sh cluster2 +kubectl config use-context ${MGMT} +cat <<'EOF' > ./test.js +const helpers = require('./tests/chai-exec'); + +describe("Clusters are healthy", () => { + const clusters = ["mgmt", "cluster1", "cluster2"]; + + clusters.forEach(cluster => { + it(`Cluster ${cluster} is healthy`, () => helpers.k8sObjectIsPresent({ context: cluster, namespace: "default", k8sType: "service", k8sObj: "kubernetes" })); + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/deploy-kind-clusters/tests/cluster-healthy.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +export GLOO_MESH_VERSION=v2.6.6 +curl -sL https://run.solo.io/meshctl/install | sh - +export PATH=$HOME/.gloo-mesh/bin:$PATH +cat <<'EOF' > ./test.js +var chai = require('chai'); +var expect = chai.expect; + +describe("Required environment variables should contain value", () => { + afterEach(function(done){ + if(this.currentTest.currentRetry() > 0){ + process.stdout.write("."); + setTimeout(done, 1000); + } else { + done(); + } + }); + + it("Context environment variables should not be empty", () => { + expect(process.env.MGMT).not.to.be.empty + expect(process.env.CLUSTER1).not.to.be.empty + expect(process.env.CLUSTER2).not.to.be.empty + }); + + it("Gloo Mesh licence environment variables should not be empty", () => { + expect(process.env.GLOO_MESH_LICENSE_KEY).not.to.be.empty + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/deploy-and-register-gloo-mesh/tests/environment-variables.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl --context ${MGMT} create ns gloo-mesh + +helm upgrade --install gloo-platform-crds gloo-platform-crds \ + --repo https://storage.googleapis.com/gloo-platform/helm-charts \ + --namespace gloo-mesh \ + --kube-context ${MGMT} \ + --set featureGates.insightsConfiguration=true \ + --version 2.6.6 + +helm upgrade --install gloo-platform gloo-platform \ + --repo https://storage.googleapis.com/gloo-platform/helm-charts \ + --namespace gloo-mesh \ + --kube-context ${MGMT} \ + --version 2.6.6 \ + -f -< ./test.js + +const helpers = require('./tests/chai-exec'); + +describe("MGMT server is healthy", () => { + let cluster = process.env.MGMT; + let deployments = ["gloo-mesh-mgmt-server","gloo-mesh-redis","gloo-telemetry-gateway","prometheus-server"]; + deployments.forEach(deploy => { + it(deploy + ' pods are ready in ' + cluster, () => helpers.checkDeployment({ context: cluster, namespace: "gloo-mesh", k8sObj: deploy })); + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/deploy-and-register-gloo-mesh/tests/check-deployment.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +cat <<'EOF' > ./test.js +const chaiExec = require("@jsdevtools/chai-exec"); +var chai = require('chai'); +var expect = chai.expect; +chai.use(chaiExec); + +afterEach(function (done) { + if (this.currentTest.currentRetry() > 0) { + process.stdout.write("."); + setTimeout(done, 1000); + } else { + done(); + } +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/deploy-and-register-gloo-mesh/tests/get-gloo-mesh-mgmt-server-ip.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +export ENDPOINT_GLOO_MESH=$(kubectl --context ${MGMT} -n gloo-mesh get svc gloo-mesh-mgmt-server -o jsonpath='{.status.loadBalancer.ingress[0].*}'):9900 +export HOST_GLOO_MESH=$(echo ${ENDPOINT_GLOO_MESH%:*}) +export ENDPOINT_TELEMETRY_GATEWAY=$(kubectl --context ${MGMT} -n gloo-mesh get svc gloo-telemetry-gateway -o jsonpath='{.status.loadBalancer.ingress[0].*}'):4317 +export ENDPOINT_GLOO_MESH_UI=$(kubectl --context ${MGMT} -n gloo-mesh get svc gloo-mesh-ui -o jsonpath='{.status.loadBalancer.ingress[0].*}'):8090 +cat <<'EOF' > ./test.js +const dns = require('dns'); +const chaiHttp = require("chai-http"); +const chai = require("chai"); +const expect = chai.expect; +chai.use(chaiHttp); +const { waitOnFailedTest } = require('./tests/utils'); + +afterEach(function(done) { waitOnFailedTest(done, this.currentTest.currentRetry())}); + +describe("Address '" + process.env.HOST_GLOO_MESH + "' can be resolved in DNS", () => { + it(process.env.HOST_GLOO_MESH + ' can be resolved', (done) => { + return dns.lookup(process.env.HOST_GLOO_MESH, (err, address, family) => { + expect(address).to.be.an.ip; + done(); + }); + }); +}); +EOF +echo "executing test ./gloo-mesh-2-0/tests/can-resolve.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl apply --context ${MGMT} -f - < ca.crt +kubectl create secret generic relay-root-tls-secret -n gloo-mesh --context ${CLUSTER1} --from-file ca.crt=ca.crt +rm ca.crt + +kubectl get secret relay-identity-token-secret -n gloo-mesh --context ${MGMT} -o jsonpath='{.data.token}' | base64 -d > token +kubectl create secret generic relay-identity-token-secret -n gloo-mesh --context ${CLUSTER1} --from-file token=token +rm token + +helm upgrade --install gloo-platform-crds gloo-platform-crds \ + --repo https://storage.googleapis.com/gloo-platform/helm-charts \ + --namespace gloo-mesh \ + --kube-context ${CLUSTER1} \ + --version 2.6.6 + +helm upgrade --install gloo-platform gloo-platform \ + --repo https://storage.googleapis.com/gloo-platform/helm-charts \ + --namespace gloo-mesh \ + --kube-context ${CLUSTER1} \ + --version 2.6.6 \ + -f -< ca.crt +kubectl create secret generic relay-root-tls-secret -n gloo-mesh --context ${CLUSTER2} --from-file ca.crt=ca.crt +rm ca.crt + +kubectl get secret relay-identity-token-secret -n gloo-mesh --context ${MGMT} -o jsonpath='{.data.token}' | base64 -d > token +kubectl create secret generic relay-identity-token-secret -n gloo-mesh --context ${CLUSTER2} --from-file token=token +rm token + +helm upgrade --install gloo-platform-crds gloo-platform-crds \ + --repo https://storage.googleapis.com/gloo-platform/helm-charts \ + --namespace gloo-mesh \ + --kube-context ${CLUSTER2} \ + --version 2.6.6 + +helm upgrade --install gloo-platform gloo-platform \ + --repo https://storage.googleapis.com/gloo-platform/helm-charts \ + --namespace gloo-mesh \ + --kube-context ${CLUSTER2} \ + --version 2.6.6 \ + -f -< ./test.js +var chai = require('chai'); +var expect = chai.expect; +const helpers = require('./tests/chai-exec'); +describe("Cluster registration", () => { + it("cluster1 is registered", () => { + podName = helpers.getOutputForCommand({ command: "kubectl -n gloo-mesh get pods -l app=gloo-mesh-mgmt-server -o jsonpath='{.items[0].metadata.name}' --context " + process.env.MGMT }).replaceAll("'", ""); + command = helpers.getOutputForCommand({ command: "kubectl --context " + process.env.MGMT + " -n gloo-mesh debug -q -i " + podName + " --image=curlimages/curl -- curl -s http://localhost:9091/metrics" }).replaceAll("'", ""); + expect(command).to.contain("cluster1"); + }); + it("cluster2 is registered", () => { + podName = helpers.getOutputForCommand({ command: "kubectl -n gloo-mesh get pods -l app=gloo-mesh-mgmt-server -o jsonpath='{.items[0].metadata.name}' --context " + process.env.MGMT }).replaceAll("'", ""); + command = helpers.getOutputForCommand({ command: "kubectl --context " + process.env.MGMT + " -n gloo-mesh debug -q -i " + podName + " --image=curlimages/curl -- curl -s http://localhost:9091/metrics" }).replaceAll("'", ""); + expect(command).to.contain("cluster2"); + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/deploy-and-register-gloo-mesh/tests/cluster-registration.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +curl -L https://istio.io/downloadIstio | sh - + +if [ -d "istio-"*/ ]; then + cd istio-*/ + export PATH=$PWD/bin:$PATH + cd .. +fi +cat <<'EOF' > ./test.js +const chaiExec = require("@jsdevtools/chai-exec"); +var chai = require('chai'); +var expect = chai.expect; +chai.use(chaiExec); + +afterEach(function (done) { + if (this.currentTest.currentRetry() > 0) { + process.stdout.write("."); + setTimeout(done, 1000); + } else { + done(); + } +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/istio-lifecycle-manager-install/tests/istio-version.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl --context ${CLUSTER1} create ns istio-gateways + +kubectl apply --context ${CLUSTER1} -f - < ./test.js + +const helpers = require('./tests/chai-exec'); + +const chaiExec = require("@jsdevtools/chai-exec"); +const helpersHttp = require('./tests/chai-http'); +const chai = require("chai"); +const expect = chai.expect; + +afterEach(function (done) { + if (this.currentTest.currentRetry() > 0) { + process.stdout.write("."); + setTimeout(done, 1000); + } else { + done(); + } +}); + +describe("Checking Istio installation", function() { + it('istiod pods are ready in cluster ' + process.env.CLUSTER1, () => helpers.checkDeploymentsWithLabels({ context: process.env.CLUSTER1, namespace: "istio-system", labels: "app=istiod", instances: 1 })); + it('gateway pods are ready in cluster ' + process.env.CLUSTER1, () => helpers.checkDeploymentsWithLabels({ context: process.env.CLUSTER1, namespace: "istio-gateways", labels: "app=istio-ingressgateway", instances: 2 })); + it('istiod pods are ready in cluster ' + process.env.CLUSTER2, () => helpers.checkDeploymentsWithLabels({ context: process.env.CLUSTER2, namespace: "istio-system", labels: "app=istiod", instances: 1 })); + it('gateway pods are ready in cluster ' + process.env.CLUSTER2, () => helpers.checkDeploymentsWithLabels({ context: process.env.CLUSTER2, namespace: "istio-gateways", labels: "app=istio-ingressgateway", instances: 2 })); + it("Gateways have an ip attached in cluster " + process.env.CLUSTER1, () => { + let cli = chaiExec("kubectl --context " + process.env.CLUSTER1 + " -n istio-gateways get svc -l app=istio-ingressgateway -o jsonpath='{.items}'"); + cli.stderr.should.be.empty; + let deployments = JSON.parse(cli.stdout.slice(1,-1)); + expect(deployments).to.have.lengthOf(2); + deployments.forEach((deployment) => { + expect(deployment.status.loadBalancer).to.have.property("ingress"); + }); + }); + it("Gateways have an ip attached in cluster " + process.env.CLUSTER2, () => { + let cli = chaiExec("kubectl --context " + process.env.CLUSTER2 + " -n istio-gateways get svc -l app=istio-ingressgateway -o jsonpath='{.items}'"); + cli.stderr.should.be.empty; + let deployments = JSON.parse(cli.stdout.slice(1,-1)); + expect(deployments).to.have.lengthOf(2); + deployments.forEach((deployment) => { + expect(deployment.status.loadBalancer).to.have.property("ingress"); + }); + }); +}); + +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/istio-lifecycle-manager-install/tests/istio-ready.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +timeout 2m bash -c "until [[ \$(kubectl --context ${CLUSTER1} -n istio-gateways get svc -l istio=ingressgateway -o json | jq '.items[0].status.loadBalancer | length') -gt 0 ]]; do + sleep 1 +done" +export HOST_GW_CLUSTER1="$(kubectl --context ${CLUSTER1} -n istio-gateways get svc -l istio=ingressgateway -o jsonpath='{.items[0].status.loadBalancer.ingress[0].*}')" +export HOST_GW_CLUSTER2="$(kubectl --context ${CLUSTER2} -n istio-gateways get svc -l istio=ingressgateway -o jsonpath='{.items[0].status.loadBalancer.ingress[0].*}')" +cat <<'EOF' > ./test.js +const dns = require('dns'); +const chaiHttp = require("chai-http"); +const chai = require("chai"); +const expect = chai.expect; +chai.use(chaiHttp); +const { waitOnFailedTest } = require('./tests/utils'); + +afterEach(function(done) { waitOnFailedTest(done, this.currentTest.currentRetry())}); + +describe("Address '" + process.env.HOST_GW_CLUSTER1 + "' can be resolved in DNS", () => { + it(process.env.HOST_GW_CLUSTER1 + ' can be resolved', (done) => { + return dns.lookup(process.env.HOST_GW_CLUSTER1, (err, address, family) => { + expect(address).to.be.an.ip; + done(); + }); + }); +}); +EOF +echo "executing test ./gloo-mesh-2-0/tests/can-resolve.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +cat <<'EOF' > ./test.js +const dns = require('dns'); +const chaiHttp = require("chai-http"); +const chai = require("chai"); +const expect = chai.expect; +chai.use(chaiHttp); +const { waitOnFailedTest } = require('./tests/utils'); + +afterEach(function(done) { waitOnFailedTest(done, this.currentTest.currentRetry())}); + +describe("Address '" + process.env.HOST_GW_CLUSTER2 + "' can be resolved in DNS", () => { + it(process.env.HOST_GW_CLUSTER2 + ' can be resolved', (done) => { + return dns.lookup(process.env.HOST_GW_CLUSTER2, (err, address, family) => { + expect(address).to.be.an.ip; + done(); + }); + }); +}); +EOF +echo "executing test ./gloo-mesh-2-0/tests/can-resolve.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl --context ${CLUSTER1} create ns bookinfo-frontends +kubectl --context ${CLUSTER1} create ns bookinfo-backends +kubectl --context ${CLUSTER1} label namespace bookinfo-frontends istio.io/rev=1-23 --overwrite +kubectl --context ${CLUSTER1} label namespace bookinfo-backends istio.io/rev=1-23 --overwrite + +# Deploy the frontend bookinfo service in the bookinfo-frontends namespace +kubectl --context ${CLUSTER1} -n bookinfo-frontends apply -f data/steps/deploy-bookinfo/productpage-v1.yaml + +# Deploy the backend bookinfo services in the bookinfo-backends namespace for all versions less than v3 +kubectl --context ${CLUSTER1} -n bookinfo-backends apply \ + -f data/steps/deploy-bookinfo/details-v1.yaml \ + -f data/steps/deploy-bookinfo/ratings-v1.yaml \ + -f data/steps/deploy-bookinfo/reviews-v1-v2.yaml + +# Update the reviews service to display where it is coming from +kubectl --context ${CLUSTER1} -n bookinfo-backends set env deploy/reviews-v1 CLUSTER_NAME=${CLUSTER1} +kubectl --context ${CLUSTER1} -n bookinfo-backends set env deploy/reviews-v2 CLUSTER_NAME=${CLUSTER1} +echo -n Waiting for bookinfo pods to be ready... +timeout -v 5m bash -c " +until [[ \$(kubectl --context ${CLUSTER1} -n bookinfo-frontends get deploy -o json | jq '[.items[].status.readyReplicas] | add') -eq 1 && \\ + \$(kubectl --context ${CLUSTER1} -n bookinfo-backends get deploy -o json | jq '[.items[].status.readyReplicas] | add') -eq 4 ]] 2>/dev/null +do + sleep 1 + echo -n . +done" +echo +kubectl --context ${CLUSTER2} create ns bookinfo-frontends +kubectl --context ${CLUSTER2} create ns bookinfo-backends +kubectl --context ${CLUSTER2} label namespace bookinfo-frontends istio.io/rev=1-23 --overwrite +kubectl --context ${CLUSTER2} label namespace bookinfo-backends istio.io/rev=1-23 --overwrite + +# Deploy the frontend bookinfo service in the bookinfo-frontends namespace +kubectl --context ${CLUSTER2} -n bookinfo-frontends apply -f data/steps/deploy-bookinfo/productpage-v1.yaml +# Deploy the backend bookinfo services in the bookinfo-backends namespace for all versions +kubectl --context ${CLUSTER2} -n bookinfo-backends apply \ + -f data/steps/deploy-bookinfo/details-v1.yaml \ + -f data/steps/deploy-bookinfo/ratings-v1.yaml \ + -f data/steps/deploy-bookinfo/reviews-v1-v2.yaml \ + -f data/steps/deploy-bookinfo/reviews-v3.yaml +# Update the reviews service to display where it is coming from +kubectl --context ${CLUSTER2} -n bookinfo-backends set env deploy/reviews-v1 CLUSTER_NAME=${CLUSTER2} +kubectl --context ${CLUSTER2} -n bookinfo-backends set env deploy/reviews-v2 CLUSTER_NAME=${CLUSTER2} +kubectl --context ${CLUSTER2} -n bookinfo-backends set env deploy/reviews-v3 CLUSTER_NAME=${CLUSTER2} + +echo -n Waiting for bookinfo pods to be ready... +timeout -v 5m bash -c " +until [[ \$(kubectl --context ${CLUSTER2} -n bookinfo-frontends get deploy -o json | jq '[.items[].status.readyReplicas] | add') -eq 1 && \\ + \$(kubectl --context ${CLUSTER2} -n bookinfo-backends get deploy -o json | jq '[.items[].status.readyReplicas] | add') -eq 5 ]] 2>/dev/null +do + sleep 1 + echo -n . +done" +echo +kubectl --context ${CLUSTER2} -n bookinfo-frontends get pods && kubectl --context ${CLUSTER2} -n bookinfo-backends get pods +cat <<'EOF' > ./test.js +const helpers = require('./tests/chai-exec'); + +describe("Bookinfo app", () => { + let cluster = process.env.CLUSTER1 + let deployments = ["productpage-v1"]; + deployments.forEach(deploy => { + it(deploy + ' pods are ready in ' + cluster, () => helpers.checkDeployment({ context: cluster, namespace: "bookinfo-frontends", k8sObj: deploy })); + }); + deployments = ["ratings-v1", "details-v1", "reviews-v1", "reviews-v2"]; + deployments.forEach(deploy => { + it(deploy + ' pods are ready in ' + cluster, () => helpers.checkDeployment({ context: cluster, namespace: "bookinfo-backends", k8sObj: deploy })); + }); + cluster = process.env.CLUSTER2 + deployments = ["productpage-v1"]; + deployments.forEach(deploy => { + it(deploy + ' pods are ready in ' + cluster, () => helpers.checkDeployment({ context: cluster, namespace: "bookinfo-frontends", k8sObj: deploy })); + }); + deployments = ["ratings-v1", "details-v1", "reviews-v1", "reviews-v2", "reviews-v3"]; + deployments.forEach(deploy => { + it(deploy + ' pods are ready in ' + cluster, () => helpers.checkDeployment({ context: cluster, namespace: "bookinfo-backends", k8sObj: deploy })); + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/bookinfo/deploy-bookinfo/tests/check-bookinfo.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl --context ${CLUSTER1} create ns httpbin +kubectl apply --context ${CLUSTER1} -f - </dev/null +do + sleep 1 + echo -n . +done" +echo +kubectl --context ${CLUSTER1} -n httpbin get pods +cat <<'EOF' > ./test.js +const helpers = require('./tests/chai-exec'); + +describe("httpbin app", () => { + let cluster = process.env.CLUSTER1 + + let deployments = ["not-in-mesh", "in-mesh"]; + + deployments.forEach(deploy => { + it(deploy + ' pods are ready in ' + cluster, () => helpers.checkDeployment({ context: cluster, namespace: "httpbin", k8sObj: deploy })); + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/httpbin/deploy-httpbin/tests/check-httpbin.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl apply --context ${CLUSTER1} -f - < ./test.js +const helpers = require('./tests/chai-http'); + +describe("productpage is available (HTTP)", () => { + it('/productpage is available in cluster1', () => helpers.checkURL({ host: `http://cluster1-bookinfo.example.com`, path: '/productpage', retCode: 200 })); +}) +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/bookinfo/gateway-expose-istio/tests/productpage-available.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +openssl req -x509 -nodes -days 365 -newkey rsa:2048 \ + -keyout tls.key -out tls.crt -subj "/CN=*" +kubectl --context ${CLUSTER1} -n istio-gateways create secret generic tls-secret \ +--from-file=tls.key=tls.key \ +--from-file=tls.crt=tls.crt + +kubectl --context ${CLUSTER2} -n istio-gateways create secret generic tls-secret \ +--from-file=tls.key=tls.key \ +--from-file=tls.crt=tls.crt +kubectl apply --context ${CLUSTER1} -f - < ./test.js +const helpers = require('./tests/chai-http'); + +describe("productpage is available (HTTPS)", () => { + it('/productpage is available in cluster1', () => helpers.checkURL({ host: `https://cluster1-bookinfo.example.com`, path: '/productpage', retCode: 200 })); +}) +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/bookinfo/gateway-expose-istio/tests/productpage-available-secure.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +cat <<'EOF' > ./test.js +var chai = require('chai'); +var expect = chai.expect; +const helpers = require('./tests/chai-exec'); + +describe("Otel metrics", () => { + it("cluster1 is sending metrics to telemetryGateway", () => { + podName = helpers.getOutputForCommand({ command: "kubectl -n gloo-mesh get pods -l app.kubernetes.io/name=prometheus -o jsonpath='{.items[0].metadata.name}' --context " + process.env.MGMT }).replaceAll("'", ""); + command = helpers.getOutputForCommand({ command: "kubectl --context " + process.env.MGMT + " -n gloo-mesh debug -q -i " + podName + " --image=curlimages/curl -- curl -s http://localhost:9090/api/v1/query?query=istio_requests_total" }).replaceAll("'", ""); + expect(command).to.contain("cluster\":\"cluster1"); + }); +}); + + +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/bookinfo/gateway-expose-istio/tests/otel-metrics.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=150 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +cat <<'EOF' > ./test.js +const helpers = require('./tests/chai-http'); +const puppeteer = require('puppeteer'); +const chai = require('chai'); +const expect = chai.expect; +const GraphPage = require('./tests/pages/gloo-ui/graph-page'); +const { recognizeTextFromScreenshot } = require('./tests/utils/image-ocr-processor'); +const { enhanceBrowser } = require('./tests/utils/enhance-browser'); + +afterEach(function (done) { + if (this.currentTest.currentRetry() > 0) { + process.stdout.write("."); + setTimeout(done, 4000); + } else { + done(); + } +}); + +describe("graph page", function () { + // UI tests often require a longer timeout. + // So here we force it to a minimum of 30 seconds. + const currentTimeout = this.timeout(); + this.timeout(Math.max(currentTimeout, 30000)); + + let browser; + let page; + let graphPage; + + beforeEach(async function () { + browser = await puppeteer.launch({ + headless: "new", + slowMo: 40, + ignoreHTTPSErrors: true, + args: ['--no-sandbox', '--disable-setuid-sandbox'], + }); + browser = enhanceBrowser(browser, this.currentTest.title); + page = await browser.newPage(); + graphPage = new GraphPage(page); + await Promise.all(Array.from({ length: 20 }, () => + helpers.checkURL({ host: `https://cluster1-bookinfo.example.com`, path: '/productpage', retCode: 200 }))); + }); + + afterEach(async function () { + await browser.close(); + }); + + it("should show ingress gateway and product page", async function () { + await graphPage.navigateTo(`http://${process.env.ENDPOINT_GLOO_MESH_UI}/graph`); + + // Select the clusters and namespaces so that the graph shows + await graphPage.selectClusters(['cluster1', 'cluster2']); + await graphPage.selectNamespaces(['istio-gateways', 'bookinfo-backends', 'bookinfo-frontends']); + // Disabling Cilium nodes due to this issue: https://github.com/solo-io/gloo-mesh-enterprise/issues/18623 + await graphPage.toggleLayoutSettings(); + await graphPage.disableCiliumNodes(); + await graphPage.toggleLayoutSettings(); + + // Capture a screenshot of the canvas and run text recognition + await graphPage.fullscreenGraph(); + await graphPage.centerGraph(); + const screenshotPath = 'ui-test-data/canvas.png'; + await graphPage.captureCanvasScreenshot(screenshotPath); + + const recognizedTexts = await recognizeTextFromScreenshot( + screenshotPath, + ["istio-ingressgateway", "productpage-v1", "details-v1", "ratings-v1", "reviews-v1", "reviews-v2"]); + + const flattenedRecognizedText = recognizedTexts.join(",").replace(/\n/g, ''); + console.log("Flattened recognized text:", flattenedRecognizedText); + + // Validate recognized texts + expect(flattenedRecognizedText).to.include("istio-ingressgateway"); + expect(flattenedRecognizedText).to.include("productpage-v1"); + expect(flattenedRecognizedText).to.include("details-v1"); + expect(flattenedRecognizedText).to.include("ratings-v1"); + expect(flattenedRecognizedText).to.include("reviews-v1"); + expect(flattenedRecognizedText).to.include("reviews-v2"); + }); +}); + +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/bookinfo/gateway-expose-istio/tests/graph-shows-traffic.test.js.liquid" +timeout --signal=INT 7m mocha ./test.js --timeout 120000 --retries=3 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +cat <<'EOF' > ./test.js +const helpersHttp = require('./tests/chai-http'); +const InsightsPage = require('./tests/pages/insights-page'); +const constants = require('./tests/pages/constants'); +const puppeteer = require('puppeteer'); +var chai = require('chai'); +var expect = chai.expect; +const { enhanceBrowser } = require('./tests/utils/enhance-browser'); + +afterEach(function (done) { + if (this.currentTest.currentRetry() > 0) { + process.stdout.write("."); + setTimeout(done, 4000); + } else { + done(); + } +}); + +describe("Insights UI", function() { + // UI tests often require a longer timeout. + // So here we force it to a minimum of 30 seconds. + const currentTimeout = this.timeout(); + this.timeout(Math.max(currentTimeout, 30000)); + + let browser; + let insightsPage; + + // Use Mocha's 'before' hook to set up Puppeteer + beforeEach(async function() { + browser = await puppeteer.launch({ + headless: "new", + slowMo: 40, + ignoreHTTPSErrors: true, + args: ['--no-sandbox', '--disable-setuid-sandbox'], + }); + browser = enhanceBrowser(browser, this.currentTest.title); + let page = await browser.newPage(); + insightsPage = new InsightsPage(page); + }); + + // Use Mocha's 'after' hook to close Puppeteer + afterEach(async function() { + await browser.close(); + }); + + it("should displays BP0001 warning with text 'Globally scoped routing'", async () => { + await insightsPage.navigateTo(`http://${process.env.ENDPOINT_GLOO_MESH_UI}/insights`); + await insightsPage.selectClusters(['cluster1', 'cluster2']); + await insightsPage.selectInsightTypes([constants.InsightType.BP]); + const data = await insightsPage.getTableDataRows() + expect(data.some(item => item.includes("Globally scoped routing"))).to.be.true; + }); + + it("should have quick resource state filters", async () => { + await insightsPage.navigateTo(`http://${process.env.ENDPOINT_GLOO_MESH_UI}/insights`); + const healthy = await insightsPage.getHealthyResourcesCount(); + const warning = await insightsPage.getWarningResourcesCount(); + const error = await insightsPage.getErrorResourcesCount(); + expect(healthy).to.be.greaterThan(0); + expect(warning).to.be.greaterThan(0); + expect(error).to.be.a('number'); + }); +}); + +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/bookinfo/insights-intro/tests/insight-ui-BP0001.test.js.liquid" +timeout --signal=INT 5m mocha ./test.js --timeout 120000 --retries=20 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +cat <<'EOF' > ./test.js +var chai = require('chai'); +var expect = chai.expect; +const helpers = require('./tests/chai-exec'); + +describe("Insight generation", () => { + it("Insight BP0002 has been triggered in the source (MGMT)", () => { + helpers.getOutputForCommand({ command: `kubectl --context ${process.env.MGMT} -n gloo-mesh patch svc gloo-mesh-mgmt-server -p '{"spec":{"ports": [{"port": 9094,"name":"http-insights"}]}}'` }); + helpers.getOutputForCommand({ command: "kubectl -n gloo-mesh run debug --image=nginx:1.25.3 --context " + process.env.MGMT }); + command = helpers.getOutputForCommand({ command: "kubectl --context " + process.env.MGMT + " -n gloo-mesh exec debug -- curl -s http://gloo-mesh-mgmt-server.gloo-mesh:9094/metrics" }).replaceAll("'", ""); + const regex = /gloo_mesh_insights{.*BP0002.*} 1/; + const match = command.match(regex); + expect(match).to.not.be.null; + }); + + it("Insight BP0002 has been triggered in PROMETHEUS", () => { + helpers.getOutputForCommand({ command: `kubectl --context ${process.env.MGMT} -n gloo-mesh patch svc prometheus-server -p '{"spec":{"ports": [{"port": 9090,"name":"http-metrics"}]}}'` }); + command = helpers.getOutputForCommand({ command: "kubectl --context " + process.env.MGMT + " -n gloo-mesh exec debug -- curl -s 'http://prometheus-server.gloo-mesh:9090/api/v1/query?query=gloo_mesh_insights'" }).replaceAll("'", ""); + let result = JSON.parse(command); + let active = false; + result.data.result.forEach(item => { + if(item.metric.code == "BP0002" && item.value[1] > 0) { + active = true + } + }); + expect(active).to.be.true; + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/bookinfo/insights-intro/tests/insight-metrics.test.js.liquid" +timeout --signal=INT 5m mocha ./test.js --timeout 120000 --retries=20 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl apply --context ${MGMT} -f - < ./test.js +const helpersHttp = require('./tests/chai-http'); +const InsightsPage = require('./tests/pages/insights-page'); +const constants = require('./tests/pages/constants'); +const puppeteer = require('puppeteer'); +const { enhanceBrowser } = require('./tests/utils/enhance-browser'); +var chai = require('chai'); +var expect = chai.expect; + +afterEach(function (done) { + if (this.currentTest.currentRetry() > 0) { + process.stdout.write("."); + setTimeout(done, 4000); + } else { + done(); + } +}); + +describe("Insights UI", function() { + // UI tests often require a longer timeout. + // So here we force it to a minimum of 30 seconds. + const currentTimeout = this.timeout(); + this.timeout(Math.max(currentTimeout, 30000)); + + let browser; + let insightsPage; + + // Use Mocha's 'before' hook to set up Puppeteer + beforeEach(async function() { + browser = await puppeteer.launch({ + headless: "new", + slowMo: 40, + ignoreHTTPSErrors: true, + args: ['--no-sandbox', '--disable-setuid-sandbox'], + }); + browser = enhanceBrowser(browser, this.currentTest.title); + let page = await browser.newPage(); + await page.setViewport({ width: 1500, height: 1000 }); + insightsPage = new InsightsPage(page); + }); + + // Use Mocha's 'after' hook to close Puppeteer + afterEach(async function() { + await browser.close(); + }); + + it("should not display BP0002 in the UI", async () => { + await insightsPage.navigateTo(`http://${process.env.ENDPOINT_GLOO_MESH_UI}/insights`); + await insightsPage.selectClusters(['cluster1', 'cluster2']); + await insightsPage.selectInsightTypes([constants.InsightType.BP]); + const data = await insightsPage.getTableDataRows() + expect(data.some(item => item.includes("is not namespaced"))).to.be.false; + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/bookinfo/insights-intro/tests/insight-not-ui-BP0002.test.js.liquid" +timeout --signal=INT 5m mocha ./test.js --timeout 120000 --retries=20 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl apply --context ${CLUSTER1} -f - < ./test.js +const helpersHttp = require('./tests/chai-http'); +const InsightsPage = require('./tests/pages/insights-page'); +const constants = require('./tests/pages/constants'); +const puppeteer = require('puppeteer'); +const { enhanceBrowser } = require('./tests/utils/enhance-browser'); +var chai = require('chai'); +var expect = chai.expect; + +afterEach(function (done) { + if (this.currentTest.currentRetry() > 0) { + process.stdout.write("."); + setTimeout(done, 4000); + } else { + done(); + } +}); + +describe("Insights UI", function() { + // UI tests often require a longer timeout. + // So here we force it to a minimum of 30 seconds. + const currentTimeout = this.timeout(); + this.timeout(Math.max(currentTimeout, 30000)); + + let browser; + let insightsPage; + + // Use Mocha's 'before' hook to set up Puppeteer + beforeEach(async function() { + browser = await puppeteer.launch({ + headless: "new", + slowMo: 40, + ignoreHTTPSErrors: true, + args: ['--no-sandbox', '--disable-setuid-sandbox'], + }); + browser = enhanceBrowser(browser, this.currentTest.title); + let page = await browser.newPage(); + await page.setViewport({ width: 1500, height: 1000 }); + insightsPage = new InsightsPage(page); + }); + + // Use Mocha's 'after' hook to close Puppeteer + afterEach(async function() { + await browser.close(); + }); + + it("should not display BP0001 in the UI", async () => { + await insightsPage.navigateTo(`http://${process.env.ENDPOINT_GLOO_MESH_UI}/insights`); + await insightsPage.selectClusters(['cluster1', 'cluster2']); + await insightsPage.selectInsightTypes([constants.InsightType.BP]); + const data = await insightsPage.getTableDataRows() + expect(data.some(item => item.includes("is not namespaced"))).to.be.false; + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/bookinfo/insights-intro/tests/insight-not-ui-BP0001.test.js.liquid" +timeout --signal=INT 5m mocha ./test.js --timeout 120000 --retries=20 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl apply --context ${CLUSTER1} -f - < ./test.js +var chai = require('chai'); +var expect = chai.expect; +const helpers = require('./tests/chai-exec'); + +describe("Insight generation", () => { + it("Insight CFG0001 has been triggered in the source (MGMT)", () => { + helpers.getOutputForCommand({ command: `kubectl --context ${process.env.MGMT} -n gloo-mesh patch svc gloo-mesh-mgmt-server -p '{"spec":{"ports": [{"port": 9094,"name":"http-insights"}]}}'` }); + helpers.getOutputForCommand({ command: "kubectl -n gloo-mesh run debug --image=nginx: --context " + process.env.MGMT }); + command = helpers.getOutputForCommand({ command: "kubectl --context " + process.env.MGMT + " -n gloo-mesh exec debug -- curl -s http://gloo-mesh-mgmt-server.gloo-mesh:9094/metrics" }).replaceAll("'", ""); + const regex = /gloo_mesh_insights{.*CFG0001.*} 1/; + const match = command.match(regex); + expect(match).to.not.be.null; + }); + + it("Insight CFG0001 has been triggered in PROMETHEUS", () => { + helpers.getOutputForCommand({ command: `kubectl --context ${process.env.MGMT} -n gloo-mesh patch svc prometheus-server -p '{"spec":{"ports": [{"port": 9090,"name":"http-metrics"}]}}'` }); + command = helpers.getOutputForCommand({ command: "kubectl --context " + process.env.MGMT + " -n gloo-mesh exec debug -- curl -s 'http://prometheus-server.gloo-mesh:9090/api/v1/query?query=gloo_mesh_insights'" }).replaceAll("'", ""); + let result = JSON.parse(command); + let active = false; + result.data.result.forEach(item => { + if(item.metric.code == "CFG0001" && item.value[1] > 0) { + active = true + } + }); + expect(active).to.be.true; + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/bookinfo/insights-config/../insights-intro/tests/insight-metrics.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl apply --context ${CLUSTER1} -f - < ./test.js +var chai = require('chai'); +var expect = chai.expect; +const helpers = require('./tests/chai-exec'); + +describe("Insight generation", () => { + it("Insight CFG0001 has not been triggered in the source (MGMT)", () => { + helpers.getOutputForCommand({ command: `kubectl --context ${process.env.MGMT} -n gloo-mesh patch svc gloo-mesh-mgmt-server -p '{"spec":{"ports": [{"port": 9094,"name":"http-insights"}]}}'` }); + helpers.getOutputForCommand({ command: "kubectl -n gloo-mesh run debug --image=nginx: --context " + process.env.MGMT }); + command = helpers.getOutputForCommand({ command: "kubectl --context " + process.env.MGMT + " -n gloo-mesh exec debug -- curl -s http://gloo-mesh-mgmt-server.gloo-mesh:9094/metrics" }).replaceAll("'", ""); + const regex = /gloo_mesh_insights{.*CFG0001.*} 1/; + const match = command.match(regex); + expect(match).to.be.null; + }); + + it("Insight CFG0001 has not been triggered in PROMETHEUS", () => { + helpers.getOutputForCommand({ command: `kubectl --context ${process.env.MGMT} -n gloo-mesh patch svc prometheus-server -p '{"spec":{"ports": [{"port": 9090,"name":"http-metrics"}]}}'` }); + command = helpers.getOutputForCommand({ command: "kubectl --context " + process.env.MGMT + " -n gloo-mesh exec debug -- curl -s 'http://prometheus-server.gloo-mesh:9090/api/v1/query?query=gloo_mesh_insights'" }).replaceAll("'", ""); + let result = JSON.parse(command); + let active = false; + result.data.result.forEach(item => { + if(item.metric.code == "CFG0001" && item.value[1] > 0) { + active = true + } + }); + expect(active).to.be.false; + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/bookinfo/insights-config/../insights-intro/tests/insight-metrics.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl --context ${CLUSTER1} -n bookinfo-backends delete virtualservice reviews +kubectl --context ${CLUSTER1} -n bookinfo-backends delete destinationrule reviews +kubectl apply --context ${CLUSTER1} -f - < ./test.js +var chai = require('chai'); +var expect = chai.expect; +const helpers = require('./tests/chai-exec'); + +describe("Insight generation", () => { + it("Insight SEC0008 has been triggered in the source (MGMT)", () => { + helpers.getOutputForCommand({ command: `kubectl --context ${process.env.MGMT} -n gloo-mesh patch svc gloo-mesh-mgmt-server -p '{"spec":{"ports": [{"port": 9094,"name":"http-insights"}]}}'` }); + helpers.getOutputForCommand({ command: "kubectl -n gloo-mesh run debug --image=nginx: --context " + process.env.MGMT }); + command = helpers.getOutputForCommand({ command: "kubectl --context " + process.env.MGMT + " -n gloo-mesh exec debug -- curl -s http://gloo-mesh-mgmt-server.gloo-mesh:9094/metrics" }).replaceAll("'", ""); + const regex = /gloo_mesh_insights{.*SEC0008.*} 1/; + const match = command.match(regex); + expect(match).to.not.be.null; + }); + + it("Insight SEC0008 has been triggered in PROMETHEUS", () => { + helpers.getOutputForCommand({ command: `kubectl --context ${process.env.MGMT} -n gloo-mesh patch svc prometheus-server -p '{"spec":{"ports": [{"port": 9090,"name":"http-metrics"}]}}'` }); + command = helpers.getOutputForCommand({ command: "kubectl --context " + process.env.MGMT + " -n gloo-mesh exec debug -- curl -s 'http://prometheus-server.gloo-mesh:9090/api/v1/query?query=gloo_mesh_insights'" }).replaceAll("'", ""); + let result = JSON.parse(command); + let active = false; + result.data.result.forEach(item => { + if(item.metric.code == "SEC0008" && item.value[1] > 0) { + active = true + } + }); + expect(active).to.be.true; + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/bookinfo/insights-security/../insights-intro/tests/insight-metrics.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl apply --context ${CLUSTER1} -f - < ./test.js +var chai = require('chai'); +var expect = chai.expect; +const helpers = require('./tests/chai-exec'); + +describe("Insight generation", () => { + it("Insight SEC0008 has not been triggered in the source (MGMT)", () => { + helpers.getOutputForCommand({ command: `kubectl --context ${process.env.MGMT} -n gloo-mesh patch svc gloo-mesh-mgmt-server -p '{"spec":{"ports": [{"port": 9094,"name":"http-insights"}]}}'` }); + helpers.getOutputForCommand({ command: "kubectl -n gloo-mesh run debug --image=nginx: --context " + process.env.MGMT }); + command = helpers.getOutputForCommand({ command: "kubectl --context " + process.env.MGMT + " -n gloo-mesh exec debug -- curl -s http://gloo-mesh-mgmt-server.gloo-mesh:9094/metrics" }).replaceAll("'", ""); + const regex = /gloo_mesh_insights{.*SEC0008.*} 1/; + const match = command.match(regex); + expect(match).to.be.null; + }); + + it("Insight SEC0008 has not been triggered in PROMETHEUS", () => { + helpers.getOutputForCommand({ command: `kubectl --context ${process.env.MGMT} -n gloo-mesh patch svc prometheus-server -p '{"spec":{"ports": [{"port": 9090,"name":"http-metrics"}]}}'` }); + command = helpers.getOutputForCommand({ command: "kubectl --context " + process.env.MGMT + " -n gloo-mesh exec debug -- curl -s 'http://prometheus-server.gloo-mesh:9090/api/v1/query?query=gloo_mesh_insights'" }).replaceAll("'", ""); + let result = JSON.parse(command); + let active = false; + result.data.result.forEach(item => { + if(item.metric.code == "SEC0008" && item.value[1] > 0) { + active = true + } + }); + expect(active).to.be.false; + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/bookinfo/insights-security/../insights-intro/tests/insight-metrics.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl --context ${CLUSTER1} -n bookinfo-backends delete authorizationpolicy reviews +kubectl --context ${CLUSTER1} -n istio-system delete peerauthentication default diff --git a/gloo-mesh/core/2-6/default/scripts/configure-domain-rewrite.sh b/gloo-mesh/core/2-6/default/scripts/configure-domain-rewrite.sh index be6dbd6d8b..d6e684c9da 100755 --- a/gloo-mesh/core/2-6/default/scripts/configure-domain-rewrite.sh +++ b/gloo-mesh/core/2-6/default/scripts/configure-domain-rewrite.sh @@ -90,4 +90,4 @@ done # If the loop exits, it means the check failed consistently for 1 minute echo "DNS rewrite rule verification failed." -exit 1 +exit 1 \ No newline at end of file diff --git a/gloo-mesh/core/2-6/default/scripts/register-domain.sh b/gloo-mesh/core/2-6/default/scripts/register-domain.sh index f9084487e8..1cb84cd86a 100755 --- a/gloo-mesh/core/2-6/default/scripts/register-domain.sh +++ b/gloo-mesh/core/2-6/default/scripts/register-domain.sh @@ -14,7 +14,9 @@ hosts_file="/etc/hosts" # Function to check if the input is a valid IP address is_ip() { if [[ $1 =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then - return 0 # 0 = true + return 0 # 0 = true - valid IPv4 address + elif [[ $1 =~ ^[0-9a-f]+[:]+[0-9a-f]*[:]*[0-9a-f]*[:]*[0-9a-f]*[:]*[0-9a-f]*[:]*[0-9a-f]*[:]*[0-9]*$ ]]; then + return 0 # 0 = true - valid IPv6 address else return 1 # 1 = false fi @@ -38,14 +40,15 @@ else fi # Check if the entry already exists -if grep -q "$hostname" "$hosts_file"; then +if grep -q "$hostname\$" "$hosts_file"; then # Update the existing entry with the new IP tempfile=$(mktemp) - sed "s/^.*$hostname/$new_ip $hostname/" "$hosts_file" > "$tempfile" + sed "s/^.*$hostname\$/$new_ip $hostname/" "$hosts_file" > "$tempfile" sudo cp "$tempfile" "$hosts_file" + rm "$tempfile" echo "Updated $hostname in $hosts_file with new IP: $new_ip" else # Add a new entry if it doesn't exist echo "$new_ip $hostname" | sudo tee -a "$hosts_file" > /dev/null echo "Added $hostname to $hosts_file with IP: $new_ip" -fi \ No newline at end of file +fi diff --git a/gloo-mesh/core/2-6/default/tests/chai-exec.js b/gloo-mesh/core/2-6/default/tests/chai-exec.js index 67ba62f095..020262437f 100644 --- a/gloo-mesh/core/2-6/default/tests/chai-exec.js +++ b/gloo-mesh/core/2-6/default/tests/chai-exec.js @@ -139,7 +139,11 @@ global = { }, k8sObjectIsPresent: ({ context, namespace, k8sType, k8sObj }) => { - let command = "kubectl --context " + context + " -n " + namespace + " get " + k8sType + " " + k8sObj + " -o name"; + // covers both namespace scoped and cluster scoped objects + let command = "kubectl --context " + context + " get " + k8sType + " " + k8sObj + " -o name"; + if (namespace) { + command = "kubectl --context " + context + " -n " + namespace + " get " + k8sType + " " + k8sObj + " -o name"; + } debugLog(`Executing command: ${command}`); let cli = chaiExec(command); @@ -176,7 +180,6 @@ global = { debugLog(`Command output (stdout): ${cli.stdout}`); return cli.stdout; }, - curlInPod: ({ curlCommand, podName, namespace }) => { debugLog(`Executing curl command: ${curlCommand} on pod: ${podName} in namespace: ${namespace}`); const cli = chaiExec(curlCommand); diff --git a/gloo-mesh/core/2-6/default/tests/chai-http.js b/gloo-mesh/core/2-6/default/tests/chai-http.js index 67f43db003..92bf579690 100644 --- a/gloo-mesh/core/2-6/default/tests/chai-http.js +++ b/gloo-mesh/core/2-6/default/tests/chai-http.js @@ -25,7 +25,30 @@ global = { .send() .then(async function (res) { debugLog(`Response status code: ${res.status}`); - expect(res).to.have.status(retCode); + expect(res).to.have.property('status', retCode); + }); + }, + + checkURLWithIP: ({ ip, host, protocol = "http", path = "", headers = [], certFile = '', keyFile = '', retCode }) => { + debugLog(`Checking URL with IP: ${ip}, Host: ${host}, Path: ${path} with expected return code: ${retCode}`); + + let cert = certFile ? fs.readFileSync(certFile) : ''; + let key = keyFile ? fs.readFileSync(keyFile) : ''; + + let url = `${protocol}://${ip}`; + + // Use chai-http to make a request to the IP address, but set the Host header + let request = chai.request(url).head(path).redirects(0).cert(cert).key(key).set('Host', host); + + debugLog(`Setting headers: ${JSON.stringify(headers)}`); + headers.forEach(header => request.set(header.key, header.value)); + + return request + .send() + .then(async function (res) { + debugLog(`Response status code: ${res.status}`); + debugLog(`Response ${JSON.stringify(res)}`); + expect(res).to.have.property('status', retCode); }); }, @@ -124,7 +147,7 @@ global = { .send() .then(async function (res) { debugLog(`Response status code: ${res.status}`); - expect(res).to.have.status(retCode); + expect(res).to.have.property('status', retCode); }); } }; diff --git a/gloo-mesh/core/2-6/default/tests/proxies-changes.test.js.liquid b/gloo-mesh/core/2-6/default/tests/proxies-changes.test.js.liquid new file mode 100644 index 0000000000..1934ea13b6 --- /dev/null +++ b/gloo-mesh/core/2-6/default/tests/proxies-changes.test.js.liquid @@ -0,0 +1,58 @@ +{%- assign version_1_18_or_after = "1.18.0" | minimumGlooGatewayVersion %} +const { execSync } = require('child_process'); +const { expect } = require('chai'); +const { diff } = require('jest-diff'); + +function delay(ms) { + return new Promise(resolve => setTimeout(resolve, ms)); +} + +describe('Gloo snapshot stability test', function() { + let contextName = process.env.{{ context | default: "CLUSTER1" }}; + let delaySeconds = {{ delay | default: 5 }}; + + let firstSnapshot; + + it('should retrieve initial snapshot', function() { + const output = execSync( + `kubectl --context ${contextName} -n gloo-system exec deploy/gloo -- wget -O - localhost:{% if version_1_18_or_after %}9095{% else %}9091{% endif %}/snapshots/proxies -q`, + { encoding: 'utf8' } + ); + + try { + firstSnapshot = JSON.parse(output); + } catch (err) { + throw new Error('Failed to parse JSON output from initial snapshot: ' + err.message); + } + expect(firstSnapshot).to.be.an('object'); + }); + + it('should not change after the given delay', async function() { + await delay(delaySeconds * 1000); + + let secondSnapshot; + try { + const output2 = execSync( + `kubectl --context ${contextName} -n gloo-system exec deploy/gloo -- wget -O - localhost:{% if version_1_18_or_after %}9095{% else %}9091{% endif %}/snapshots/proxies -q`, + { encoding: 'utf8' } + ); + secondSnapshot = JSON.parse(output2); + } catch (err) { + throw new Error('Failed to retrieve or parse the second snapshot: ' + err.message); + } + + const firstJson = JSON.stringify(firstSnapshot, null, 2); + const secondJson = JSON.stringify(secondSnapshot, null, 2); + + // Show only 2 lines of context around each change + const diffOutput = diff(firstJson, secondJson, { contextLines: 2, expand: false }); + + if (! diffOutput.includes("Compared values have no visual difference.")) { + console.error('Differences found between snapshots:\n' + diffOutput); + throw new Error('Snapshots differ after the delay.'); + } else { + console.log('No differences found. The snapshots are stable.'); + } + }); +}); + From 2d3d484576e1f6fc0475e558bb97e131489775f8 Mon Sep 17 00:00:00 2001 From: soloio-bot <> Date: Fri, 3 Jan 2025 17:23:39 +0000 Subject: [PATCH 04/34] Update from https://github.com/solo-io/procgen/commit/b0fcf769a8ed8b42653f7aeb190716fb7add3277 --- .../2-7/ambient-interoperability/README.md | 63 +- .../deploy-kind-clusters/deploy-cluster1.sh | 235 +++ .../2-7/ambient-interoperability/package.json | 44 + .../core/2-7/ambient-interoperability/run.sh | 1349 +++++++++++++++++ .../scripts/configure-domain-rewrite.sh | 2 +- .../scripts/register-domain.sh | 11 +- .../tests/chai-exec.js | 7 +- .../tests/chai-http.js | 27 +- .../tests/proxies-changes.test.js.liquid | 58 + 9 files changed, 1748 insertions(+), 48 deletions(-) create mode 100644 gloo-mesh/core/2-7/ambient-interoperability/data/steps/deploy-kind-clusters/deploy-cluster1.sh create mode 100644 gloo-mesh/core/2-7/ambient-interoperability/package.json create mode 100644 gloo-mesh/core/2-7/ambient-interoperability/run.sh create mode 100644 gloo-mesh/core/2-7/ambient-interoperability/tests/proxies-changes.test.js.liquid diff --git a/gloo-mesh/core/2-7/ambient-interoperability/README.md b/gloo-mesh/core/2-7/ambient-interoperability/README.md index 88b4c82af9..6db56da1c3 100644 --- a/gloo-mesh/core/2-7/ambient-interoperability/README.md +++ b/gloo-mesh/core/2-7/ambient-interoperability/README.md @@ -15,7 +15,7 @@ source ./scripts/assert.sh ## Table of Contents * [Introduction](#introduction) -* [Lab 1 - Deploy a KinD cluster](#lab-1---deploy-a-kind-cluster-) +* [Lab 1 - Deploy KinD Cluster(s)](#lab-1---deploy-kind-cluster(s)-) * [Lab 2 - Deploy and register Gloo Mesh](#lab-2---deploy-and-register-gloo-mesh-) * [Lab 3 - Deploy Istio using Helm](#lab-3---deploy-istio-using-helm-) * [Lab 4 - Deploy the Bookinfo demo app](#lab-4---deploy-the-bookinfo-demo-app-) @@ -72,7 +72,7 @@ You can find more information about Gloo Mesh Core in the official documentation -## Lab 1 - Deploy a KinD cluster +## Lab 1 - Deploy KinD Cluster(s) Clone this repository and go to the directory where this `README.md` file is. @@ -84,12 +84,11 @@ export MGMT=cluster1 export CLUSTER1=cluster1 ``` -Run the following commands to deploy a Kubernetes cluster using [Kind](https://kind.sigs.k8s.io/): +Deploy the KinD clusters: ```bash -./scripts/deploy-multi-with-calico.sh 1 cluster1 us-west us-west-1 +bash ./data/steps/deploy-kind-clusters/deploy-cluster1.sh ``` - Then run the following commands to wait for all the Pods to be ready: ```bash @@ -98,38 +97,20 @@ Then run the following commands to wait for all the Pods to be ready: **Note:** If you run the `check.sh` script immediately after the `deploy.sh` script, you may see a jsonpath error. If that happens, simply wait a few seconds and try again. -Once the `check.sh` script completes, when you execute the `kubectl get pods -A` command, you should see the following: - -``` -NAMESPACE NAME READY STATUS RESTARTS AGE -kube-system calico-kube-controllers-59d85c5c84-sbk4k 1/1 Running 0 4h26m -kube-system calico-node-przxs 1/1 Running 0 4h26m -kube-system coredns-6955765f44-ln8f5 1/1 Running 0 4h26m -kube-system coredns-6955765f44-s7xxx 1/1 Running 0 4h26m -kube-system etcd-cluster1-control-plane 1/1 Running 0 4h27m -kube-system kube-apiserver-cluster1-control-plane 1/1 Running 0 4h27m -kube-system kube-controller-manager-cluster1-control-plane1/1 Running 0 4h27m -kube-system kube-proxy-ksvzw 1/1 Running 0 4h26m -kube-system kube-scheduler-cluster1-control-plane 1/1 Running 0 4h27m -local-path-storage local-path-provisioner-58f6947c7-lfmdx 1/1 Running 0 4h26m -metallb-system controller-5c9894b5cd-cn9x2 1/1 Running 0 4h26m -metallb-system speaker-d7jkp 1/1 Running 0 4h26m -``` - -**Note:** The CNI pods might be different, depending on which CNI you have deployed. - +Once the `check.sh` script completes, execute the `kubectl get pods -A` command, and verify that all pods are in a running state. @@ -175,6 +156,7 @@ EOF echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/deploy-and-register-gloo-mesh/tests/environment-variables.test.js.liquid" timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } --> + Run the following commands to deploy the Gloo Mesh management plane: ```bash @@ -236,6 +218,10 @@ EOF kubectl --context ${MGMT} -n gloo-mesh rollout status deploy/gloo-mesh-mgmt-server ``` +Set the endpoint for the Gloo Mesh UI: +```bash +export ENDPOINT_GLOO_MESH_UI=$(kubectl --context ${MGMT} -n gloo-mesh get svc gloo-mesh-ui -o jsonpath='{.status.loadBalancer.ingress[0].*}'):8090 +``` + Run the following commands to deploy the Gloo Mesh management plane: ```bash @@ -502,7 +487,8 @@ timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || -## Lab 3 - Deploy Istio using Helm + +## Lab 3 - Deploy Istio v1.24.1-patch1-distroless It is convenient to have the `istioctl` command line tool installed on your local machine. If you don't have it installed, you can install it by following the instructions below. @@ -544,7 +530,7 @@ describe("istio_version is at least 1.23.0", () => { it("version should be at least 1.23.0", () => { // Compare the string istio_version to the number 1.23.0 // example 1.23.0-patch0 is valid, but 1.22.6 is not - let version = "1.23.1"; + let version = "1.24.1-patch1-distroless"; let versionParts = version.split('-')[0].split('.'); let major = parseInt(versionParts[0]); let minor = parseInt(versionParts[1]); @@ -593,6 +579,7 @@ spec: selector: app: istio-ingressgateway istio: ingressgateway + revision: 1-23 type: LoadBalancer EOF @@ -650,6 +637,7 @@ spec: selector: app: istio-ingressgateway istio: eastwestgateway + revision: 1-23 type: LoadBalancer EOF kubectl --context ${CLUSTER2} create ns istio-gateways @@ -676,6 +664,7 @@ spec: selector: app: istio-ingressgateway istio: ingressgateway + revision: 1-23 type: LoadBalancer EOF @@ -733,6 +722,7 @@ spec: selector: app: istio-ingressgateway istio: eastwestgateway + revision: 1-23 type: LoadBalancer EOF ``` @@ -744,27 +734,29 @@ Let's deploy Istio using Helm in cluster1. We'll install the base Istio componen helm upgrade --install istio-base oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/base \ --namespace istio-system \ --kube-context=${CLUSTER1} \ ---version 1.23.1-solo \ +--version 1.24.1-patch1-solo-distroless \ --create-namespace \ -f - </istiod \ --namespace istio-system \ --kube-context=${CLUSTER1} \ ---version 1.23.1-solo \ +--version 1.24.1-patch1-solo-distroless \ --create-namespace \ -f - < proxy: clusterDomain: cluster.local - tag: 1.23.1-solo + tag: 1.24.1-patch1-solo-distroless multiCluster: clusterName: cluster1 profile: ambient +revision: 1-23 istio_cni: enabled: true meshConfig: @@ -785,13 +777,14 @@ EOF helm upgrade --install istio-cni oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/cni \ --namespace kube-system \ --kube-context=${CLUSTER1} \ ---version 1.23.1-solo \ +--version 1.24.1-patch1-solo-distroless \ --create-namespace \ -f - < - proxy: 1.23.1-solo + proxy: 1.24.1-patch1-solo-distroless profile: ambient +revision: 1-23 cni: ambient: dnsCapture: true @@ -803,11 +796,12 @@ EOF helm upgrade --install ztunnel oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/ztunnel \ --namespace istio-system \ --kube-context=${CLUSTER1} \ ---version 1.23.1-solo \ +--version 1.24.1-patch1-solo-distroless \ --create-namespace \ -f - < @@ -818,7 +812,7 @@ namespace: istio-system profile: ambient proxy: clusterDomain: cluster.local -tag: 1.23.1-solo +tag: 1.24.1-patch1-solo-distroless terminationGracePeriodSeconds: 29 variant: distroless EOF @@ -826,16 +820,18 @@ EOF helm upgrade --install istio-ingressgateway-1-23 oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/gateway \ --namespace istio-gateways \ --kube-context=${CLUSTER1} \ ---version 1.23.1-solo \ +--version 1.24.1-patch1-solo-distroless \ --create-namespace \ -f - </gateway \ --namespace istio-gateways \ --kube-context=${CLUSTER1} \ ---version 1.23.1-solo \ +--version 1.24.1-patch1-solo-distroless \ --create-namespace \ -f - </base \ --namespace istio-system \ --kube-context=${CLUSTER2} \ ---version 1.23.1-solo \ +--version 1.24.1-patch1-solo-distroless \ --create-namespace \ -f - </istiod \ --namespace istio-system \ --kube-context=${CLUSTER2} \ ---version 1.23.1-solo \ +--version 1.24.1-patch1-solo-distroless \ --create-namespace \ -f - < proxy: clusterDomain: cluster.local - tag: 1.23.1-solo + tag: 1.24.1-patch1-solo-distroless multiCluster: clusterName: cluster2 profile: ambient +revision: 1-23 istio_cni: enabled: true meshConfig: @@ -917,13 +917,14 @@ EOF helm upgrade --install istio-cni oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/cni \ --namespace kube-system \ --kube-context=${CLUSTER2} \ ---version 1.23.1-solo \ +--version 1.24.1-patch1-solo-distroless \ --create-namespace \ -f - < - proxy: 1.23.1-solo + proxy: 1.24.1-patch1-solo-distroless profile: ambient +revision: 1-23 cni: ambient: dnsCapture: true @@ -935,11 +936,12 @@ EOF helm upgrade --install ztunnel oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/ztunnel \ --namespace istio-system \ --kube-context=${CLUSTER2} \ ---version 1.23.1-solo \ +--version 1.24.1-patch1-solo-distroless \ --create-namespace \ -f - < @@ -950,7 +952,7 @@ namespace: istio-system profile: ambient proxy: clusterDomain: cluster.local -tag: 1.23.1-solo +tag: 1.24.1-patch1-solo-distroless terminationGracePeriodSeconds: 29 variant: distroless EOF @@ -958,16 +960,18 @@ EOF helm upgrade --install istio-ingressgateway-1-23 oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/gateway \ --namespace istio-gateways \ --kube-context=${CLUSTER2} \ ---version 1.23.1-solo \ +--version 1.24.1-patch1-solo-distroless \ --create-namespace \ -f - </gateway \ --namespace istio-gateways \ --kube-context=${CLUSTER2} \ ---version 1.23.1-solo \ +--version 1.24.1-patch1-solo-distroless \ --create-namespace \ -f - < [VIDEO LINK](https://youtu.be/nzYcrjalY5A "Video Link") @@ -1128,6 +1132,8 @@ kubectl --context ${CLUSTER1} label namespace bookinfo-frontends istio.io/datapl kubectl --context ${CLUSTER1} label namespace bookinfo-backends istio.io/dataplane-mode=ambient kubectl --context ${CLUSTER1} label namespace bookinfo-frontends istio-injection=disabled kubectl --context ${CLUSTER1} label namespace bookinfo-backends istio-injection=disabled +kubectl --context ${CLUSTER1} label namespace bookinfo-frontends istio.io/rev=1-23 --overwrite +kubectl --context ${CLUSTER1} label namespace bookinfo-backends istio.io/rev=1-23 --overwrite # Deploy the frontend bookinfo service in the bookinfo-frontends namespace kubectl --context ${CLUSTER1} -n bookinfo-frontends apply -f data/steps/deploy-bookinfo/productpage-v1.yaml @@ -1174,6 +1180,8 @@ kubectl --context ${CLUSTER2} label namespace bookinfo-frontends istio.io/datapl kubectl --context ${CLUSTER2} label namespace bookinfo-backends istio.io/dataplane-mode=ambient kubectl --context ${CLUSTER2} label namespace bookinfo-frontends istio-injection=disabled kubectl --context ${CLUSTER2} label namespace bookinfo-backends istio-injection=disabled +kubectl --context ${CLUSTER2} label namespace bookinfo-frontends istio.io/rev=1-23 --overwrite +kubectl --context ${CLUSTER2} label namespace bookinfo-backends istio.io/rev=1-23 --overwrite # Deploy the frontend bookinfo service in the bookinfo-frontends namespace kubectl --context ${CLUSTER2} -n bookinfo-frontends apply -f data/steps/deploy-bookinfo/productpage-v1.yaml @@ -1254,6 +1262,7 @@ Run the following commands to deploy the httpbin app on `cluster1`. The deployme ```bash kubectl --context ${CLUSTER1} create ns httpbin kubectl --context ${CLUSTER1} label namespace httpbin istio.io/dataplane-mode=ambient +kubectl --context ${CLUSTER1} label namespace httpbin istio.io/rev=1-23 kubectl apply --context ${CLUSTER1} -f - < +## Lab 13 - Upgrade Istio to v1.23.0-patch1 @@ -3031,9 +3040,10 @@ helm upgrade --install istio-base oci://us-docker.pkg.dev/gloo-mesh/istio-helm-< -f - </istiod \ +helm upgrade --install istiod-1-23-0-patch1 oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/istiod \ --namespace istio-system \ --kube-context=${CLUSTER1} \ --version 1.23.0-patch1-solo \ @@ -3047,6 +3057,7 @@ global: multiCluster: clusterName: cluster1 profile: ambient +revision: 1-23-0-patch1 istio_cni: enabled: true meshConfig: @@ -3074,6 +3085,7 @@ global: hub: us-docker.pkg.dev/gloo-mesh/istio- proxy: 1.23.0-patch1-solo profile: ambient +revision: 1-23-0-patch1 cni: ambient: dnsCapture: true @@ -3090,6 +3102,7 @@ helm upgrade --install ztunnel oci://us-docker.pkg.dev/gloo-mesh/istio-helm- @@ -3105,7 +3118,7 @@ terminationGracePeriodSeconds: 29 variant: distroless EOF -helm upgrade --install istio-ingressgateway-1-23 oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/gateway \ +helm upgrade --install istio-ingressgateway-1-23-0-patch1 oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/gateway \ --namespace istio-gateways \ --kube-context=${CLUSTER1} \ --version 1.23.0-patch1-solo \ @@ -3114,15 +3127,17 @@ helm upgrade --install istio-ingressgateway-1-23 oci://us-docker.pkg.dev/gloo-me autoscaling: enabled: false profile: ambient +revision: 1-23-0-patch1 imagePullPolicy: IfNotPresent labels: app: istio-ingressgateway istio: ingressgateway + revision: 1-23-0-patch1 service: type: None EOF -helm upgrade --install istio-eastwestgateway-1-23 oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/gateway \ +helm upgrade --install istio-eastwestgateway-1-23-0-patch1 oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/gateway \ --namespace istio-gateways \ --kube-context=${CLUSTER1} \ --version 1.23.0-patch1-solo \ @@ -3131,6 +3146,7 @@ helm upgrade --install istio-eastwestgateway-1-23 oci://us-docker.pkg.dev/gloo-m autoscaling: enabled: false profile: ambient +revision: 1-23-0-patch1 imagePullPolicy: IfNotPresent env: ISTIO_META_REQUESTED_NETWORK_VIEW: cluster1 @@ -3138,6 +3154,7 @@ env: labels: app: istio-ingressgateway istio: eastwestgateway + revision: 1-23-0-patch1 topology.istio.io/network: cluster1 service: type: None @@ -3156,9 +3173,10 @@ helm upgrade --install istio-base oci://us-docker.pkg.dev/gloo-mesh/istio-helm-< -f - </istiod \ +helm upgrade --install istiod-1-23-0-patch1 oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/istiod \ --namespace istio-system \ --kube-context=${CLUSTER2} \ --version 1.23.0-patch1-solo \ @@ -3172,6 +3190,7 @@ global: multiCluster: clusterName: cluster2 profile: ambient +revision: 1-23-0-patch1 istio_cni: enabled: true meshConfig: @@ -3199,6 +3218,7 @@ global: hub: us-docker.pkg.dev/gloo-mesh/istio- proxy: 1.23.0-patch1-solo profile: ambient +revision: 1-23-0-patch1 cni: ambient: dnsCapture: true @@ -3215,6 +3235,7 @@ helm upgrade --install ztunnel oci://us-docker.pkg.dev/gloo-mesh/istio-helm- @@ -3230,7 +3251,7 @@ terminationGracePeriodSeconds: 29 variant: distroless EOF -helm upgrade --install istio-ingressgateway-1-23 oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/gateway \ +helm upgrade --install istio-ingressgateway-1-23-0-patch1 oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/gateway \ --namespace istio-gateways \ --kube-context=${CLUSTER2} \ --version 1.23.0-patch1-solo \ @@ -3239,15 +3260,17 @@ helm upgrade --install istio-ingressgateway-1-23 oci://us-docker.pkg.dev/gloo-me autoscaling: enabled: false profile: ambient +revision: 1-23-0-patch1 imagePullPolicy: IfNotPresent labels: app: istio-ingressgateway istio: ingressgateway + revision: 1-23-0-patch1 service: type: None EOF -helm upgrade --install istio-eastwestgateway-1-23 oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/gateway \ +helm upgrade --install istio-eastwestgateway-1-23-0-patch1 oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/gateway \ --namespace istio-gateways \ --kube-context=${CLUSTER2} \ --version 1.23.0-patch1-solo \ @@ -3256,6 +3279,7 @@ helm upgrade --install istio-eastwestgateway-1-23 oci://us-docker.pkg.dev/gloo-m autoscaling: enabled: false profile: ambient +revision: 1-23-0-patch1 imagePullPolicy: IfNotPresent env: ISTIO_META_REQUESTED_NETWORK_VIEW: cluster2 @@ -3263,6 +3287,7 @@ env: labels: app: istio-ingressgateway istio: eastwestgateway + revision: 1-23-0-patch1 topology.istio.io/network: cluster2 service: type: None @@ -3289,10 +3314,10 @@ afterEach(function (done) { }); describe("Checking Istio installation", function() { - it('istiod pods are ready in cluster ' + process.env.CLUSTER1, () => helpers.checkDeploymentsWithLabels({ context: process.env.CLUSTER1, namespace: "istio-system", labels: "app=istiod", instances: 1 })); - it('gateway pods are ready in cluster ' + process.env.CLUSTER1, () => helpers.checkDeploymentsWithLabels({ context: process.env.CLUSTER1, namespace: "istio-gateways", labels: "app=istio-ingressgateway", instances: 2 })); - it('istiod pods are ready in cluster ' + process.env.CLUSTER2, () => helpers.checkDeploymentsWithLabels({ context: process.env.CLUSTER2, namespace: "istio-system", labels: "app=istiod", instances: 1 })); - it('gateway pods are ready in cluster ' + process.env.CLUSTER2, () => helpers.checkDeploymentsWithLabels({ context: process.env.CLUSTER2, namespace: "istio-gateways", labels: "app=istio-ingressgateway", instances: 2 })); + it('istiod pods are ready in cluster ' + process.env.CLUSTER1, () => helpers.checkDeploymentsWithLabels({ context: process.env.CLUSTER1, namespace: "istio-system", labels: "app=istiod", instances: 2 })); + it('gateway pods are ready in cluster ' + process.env.CLUSTER1, () => helpers.checkDeploymentsWithLabels({ context: process.env.CLUSTER1, namespace: "istio-gateways", labels: "app=istio-ingressgateway", instances: 4 })); + it('istiod pods are ready in cluster ' + process.env.CLUSTER2, () => helpers.checkDeploymentsWithLabels({ context: process.env.CLUSTER2, namespace: "istio-system", labels: "app=istiod", instances: 2 })); + it('gateway pods are ready in cluster ' + process.env.CLUSTER2, () => helpers.checkDeploymentsWithLabels({ context: process.env.CLUSTER2, namespace: "istio-gateways", labels: "app=istio-ingressgateway", instances: 4 })); it("Gateways have an ip attached in cluster " + process.env.CLUSTER1, () => { let cli = chaiExec("kubectl --context " + process.env.CLUSTER1 + " -n istio-gateways get svc -l app=istio-ingressgateway -o jsonpath='{.items}'"); cli.stderr.should.be.empty; @@ -3367,6 +3392,113 @@ timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || + +## Lab 14 - Migrate workloads to a new Istio revision + +Now, let's label all namespaces to use the new revision and rollout all deployments so that their proxies connect to the new revision: + +```bash +kubectl --context ${CLUSTER1} get ns -l istio.io/rev=1-23 -o json | jq -r '.items[].metadata.name' | while read ns; do + kubectl --context ${CLUSTER1} label ns ${ns} istio.io/rev=1-23-0-patch1 --overwrite +done +kubectl --context ${CLUSTER2} get ns -l istio.io/rev=1-23 -o json | jq -r '.items[].metadata.name' | while read ns; do + kubectl --context ${CLUSTER2} label ns ${ns} istio.io/rev=1-23-0-patch1 --overwrite +done +kubectl --context ${CLUSTER1} -n httpbin patch deploy in-mesh --patch "{\"spec\": {\"template\": {\"metadata\": {\"labels\": {\"istio.io/rev\": \"1-23-0-patch1\" }}}}}" +kubectl --context ${CLUSTER1} -n clients patch deploy in-mesh-with-sidecar --patch "{\"spec\": {\"template\": {\"metadata\": {\"labels\": {\"istio.io/rev\": \"1-23-0-patch1\" }}}}}" +``` + + +Test that you can still access the `productpage` service through the Istio Ingress Gateway corresponding to the old revision using the command below: + +```bash +curl -k "https:///productpage" -I +``` + +You should get a response similar to the following one: + +``` +HTTP/2 200 +server: istio-envoy +date: Wed, 24 Aug 2022 14:58:22 GMT +content-type: application/json +content-length: 670 +access-control-allow-origin: * +access-control-allow-credentials: true +x-envoy-upstream-service-time: 7 +``` + + + +All good, so we can now configure the Istio gateway service(s) to use both revisions: + +```bash +kubectl --context ${CLUSTER1} -n istio-gateways patch svc istio-ingressgateway --type=json --patch '[{"op": "remove", "path": "/spec/selector/revision"}]' +kubectl --context ${CLUSTER1} -n istio-gateways patch svc istio-eastwestgateway --type=json --patch '[{"op": "remove", "path": "/spec/selector/revision"}]' +kubectl --context ${CLUSTER2} -n istio-gateways patch svc istio-ingressgateway --type=json --patch '[{"op": "remove", "path": "/spec/selector/revision"}]' +kubectl --context ${CLUSTER2} -n istio-gateways patch svc istio-eastwestgateway --type=json --patch '[{"op": "remove", "path": "/spec/selector/revision"}]' +``` + +We don't switch the selector directly from one the old revision to the new one to avoid any request to be dropped. + +Test that you can still access the `productpage` service: + +```bash +curl -k "https:///productpage" -I +``` + +You should get a response similar to the following one: + +``` +HTTP/2 200 +server: istio-envoy +date: Wed, 24 Aug 2022 14:58:22 GMT +content-type: application/json +content-length: 670 +access-control-allow-origin: * +access-control-allow-credentials: true +``` + + + + + +
Waypoints are upgraded automatically The waypoints are upgraded by Istiod's Gateway Controller, so if you check the status you will see that it is on the newest "1.23.0-patch1" version: @@ -3403,46 +3535,150 @@ describe("istio in place upgrades", function() { }); }); EOF -echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/deploy-istio-helm/tests/waypoint-upgraded.test.js.liquid" +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/helm-migrate-workloads-to-revision/tests/waypoint-upgraded.test.js.liquid" timeout --signal=INT 1m mocha ./test.js --timeout 10000 --retries=60 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } --> -Test that you can still access the `productpage` service through the Istio Ingress Gateway corresponding to the old revision using the command below: -```shell -curl -k "https:///productpage" -I + + +## Lab 15 - Helm Cleanup Istio Revision + +Everything is working well with the new version, we can uninstall the previous version. + +Let's start with the gateways + +```bash +helm uninstall istio-ingressgateway-1-23 \ +--namespace istio-gateways \ +--kube-context=${CLUSTER1} + +helm uninstall istio-eastwestgateway-1-23 \ +--namespace istio-gateways \ +--kube-context=${CLUSTER1} + +helm uninstall istio-ingressgateway-1-23 \ +--namespace istio-gateways \ +--kube-context=${CLUSTER2} + +helm uninstall istio-eastwestgateway-1-23 \ +--namespace istio-gateways \ +--kube-context=${CLUSTER2} ``` + + -```http,nocopy -HTTP/2 200 -server: istio-envoy -date: Wed, 24 Aug 2022 14:58:22 GMT -content-type: application/json -content-length: 670 -access-control-allow-origin: * -access-control-allow-credentials: true -x-envoy-upstream-service-time: 7 +And then the control plane: + +```bash +helm uninstall istiod-1-23 \ +--namespace istio-system \ +--kube-context=${CLUSTER1} + +helm uninstall istiod-1-23 \ +--namespace istio-system \ +--kube-context=${CLUSTER2} ``` + +Run the following command: + +```bash +kubectl --context ${CLUSTER1} -n istio-system get pods && kubectl --context ${CLUSTER1} -n istio-gateways get pods +``` + +You should get the following output: + +``` +NAME READY STATUS RESTARTS AGE +istiod-1-23-796fffbdf5-n6xc9 1/1 Running 0 25m +NAME READY STATUS RESTARTS AGE +istio-eastwestgateway-1-23-546446c77b-zg5hd 1/1 Running 0 25m +istio-ingressgateway-1-23-784f69b4bb-lcfk9 1/1 Running 0 25m +``` + +It confirms that only the new version is running. - -## Lab 14 - Ambient Egress Traffic with Waypoint +## Lab 16 - Ambient Egress Traffic with Waypoint In this lab, we'll explore how to control and secure outbound traffic from your Ambient Mesh using Waypoints. We'll start by restricting all outgoing traffic from a specific namespace, then set up a shared Waypoint to manage egress traffic centrally. This approach allows for consistent policy enforcement across multiple services and namespaces. @@ -3735,7 +3971,7 @@ kubectl --context ${CLUSTER1} delete authorizationpolicy httpbin -n egress -## Lab 15 - Waypoint Deployment Options +## Lab 17 - Waypoint Deployment Options This lab explores different ways to deploy Waypoints in Istio's Ambient Mesh. We'll learn about deploying Waypoints for services and for workloads. diff --git a/gloo-mesh/core/2-7/ambient/data/steps/deploy-kind-clusters/deploy-cluster1.sh b/gloo-mesh/core/2-7/ambient/data/steps/deploy-kind-clusters/deploy-cluster1.sh new file mode 100644 index 0000000000..3fda068282 --- /dev/null +++ b/gloo-mesh/core/2-7/ambient/data/steps/deploy-kind-clusters/deploy-cluster1.sh @@ -0,0 +1,292 @@ +#!/usr/bin/env bash +set -o errexit + +number="2" +name="cluster1" +region="" +zone="" +twodigits=$(printf "%02d\n" $number) + +kindest_node=${KINDEST_NODE} + +if [ -z "$kindest_node" ]; then + export k8s_version="1.28.0" + + [[ ${k8s_version::1} != 'v' ]] && export k8s_version=v${k8s_version} + kindest_node_ver=$(curl --silent "https://registry.hub.docker.com/v2/repositories/kindest/node/tags?page_size=100" \ + | jq -r '.results | .[] | select(.name==env.k8s_version) | .name+"@"+.digest') + + if [ -z "$kindest_node_ver" ]; then + echo "Incorrect Kubernetes version provided: ${k8s_version}." + exit 1 + fi + kindest_node=kindest/node:${kindest_node_ver} +fi +echo "Using KinD image: ${kindest_node}" + +if [ -z "$3" ]; then + case $name in + cluster1) + region=us-west-1 + ;; + cluster2) + region=us-west-2 + ;; + *) + region=us-east-1 + ;; + esac +fi + +if [ -z "$4" ]; then + case $name in + cluster1) + zone=us-west-1a + ;; + cluster2) + zone=us-west-2a + ;; + *) + zone=us-east-1a + ;; + esac +fi + +if hostname -I 2>/dev/null; then + myip=$(hostname -I | awk '{ print $1 }') +else + myip=$(ipconfig getifaddr en0) +fi + +# Function to determine the next available cluster number +get_next_cluster_number() { + if ! kind get clusters 2>&1 | grep "^kind" > /dev/null; then + echo 1 + else + highest_num=$(kind get clusters | grep "^kind" | tail -1 | cut -c 5-) + echo $((highest_num + 1)) + fi +} + +if [ -f /.dockerenv ]; then +myip=$HOST_IP +container=$(docker inspect $(docker ps -q) | jq -r ".[] | select(.Config.Hostname == \"$HOSTNAME\") | .Name" | cut -d/ -f2) +docker network connect "kind" $container || true +number=$(get_next_cluster_number) +twodigits=$(printf "%02d\n" $number) +fi + +reg_name='kind-registry' +reg_port='5000' +docker start "${reg_name}" 2>/dev/null || \ +docker run -d --restart=always -p "0.0.0.0:${reg_port}:5000" --name "${reg_name}" registry:2 + +cache_port='5000' +cat > registries < ${HOME}/.${cache_name}-config.yml </dev/null || \ +docker run -d --restart=always ${DEPLOY_EXTRA_PARAMS} -v ${HOME}/.${cache_name}-config.yml:/etc/docker/registry/config.yml --name "${cache_name}" registry:2 +done +mkdir -p /tmp/oidc + +cat <<'EOF' >/tmp/oidc/sa-signer-pkcs8.pub +-----BEGIN PUBLIC KEY----- +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA53YiBcrn7+ZK0Vb4odeA +1riYdvEb8To4H6/HtF+OKzuCIXFQ+bRy7yMrDGITYpfYPrTZOgfdeTLZqOiAj+cL +395nvxdly83SUrdh7ItfOPRluuuiPHnFn111wpyjBw5nut4Kx+M5MksNfA1hU0Zw +zIM9OviX8iEF8xHWUtz4BAMDG8N6+zpLo0pAzaei5hKuLZ9dZOzHBC8VOW82cQMm +5X5uOKsCHMtNSjqYUNB1DxN6xxM+odGWT/6xthPGk6YCxmO28YHPFZfiS2eAIpD8 +2p/16KQKU6TkZSrldkYxiHIPhu+5f9faZJG7dB9pLN1SfdTBio4PK5Mz9muLUCv9 +ywIDAQAB +-----END PUBLIC KEY----- +EOF + +cat <<'EOF' >/tmp/oidc/sa-signer.key +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA53YiBcrn7+ZK0Vb4odeA1riYdvEb8To4H6/HtF+OKzuCIXFQ ++bRy7yMrDGITYpfYPrTZOgfdeTLZqOiAj+cL395nvxdly83SUrdh7ItfOPRluuui +PHnFn111wpyjBw5nut4Kx+M5MksNfA1hU0ZwzIM9OviX8iEF8xHWUtz4BAMDG8N6 ++zpLo0pAzaei5hKuLZ9dZOzHBC8VOW82cQMm5X5uOKsCHMtNSjqYUNB1DxN6xxM+ +odGWT/6xthPGk6YCxmO28YHPFZfiS2eAIpD82p/16KQKU6TkZSrldkYxiHIPhu+5 +f9faZJG7dB9pLN1SfdTBio4PK5Mz9muLUCv9ywIDAQABAoIBAB8tro+RMYUDRHjG +el9ypAxIeWEsQVNRQFYkW4ZUiNYSAgl3Ni0svX6xAg989peFVL+9pLVIcfDthJxY +FVlNCjBxyQ/YmwHFC9vQkARJEd6eLUXsj8INtS0ubbp1VxCQRDDL0C/0z7OSoJJh +SwboqjEiTJExA2a+RArmEDTBRzdi3t+kT8G23JcqOivrITt17K6bQYyJXw7/vUdc +r/R+hfd5TqVq92VddzDT7RNJAxsbPPXjGnESlq1GALBDs+uBGYsP0fiEJb2nicSv +z9fBnBeERhut1gcE0C0iLRQZb+3r8TitBtxrZv+0BHgXrkKtXDwWTqGEKOwC4dBn +7nxkH2ECgYEA6+/DOTABGYOWOQftFkJMjcugzDrjoGpuXuVOTb65T+3FHAzU93zy +3bt3wQxrlugluyy9Sc/PL3ck2LgUsPHZ+s7zsdGvvGALBD6bOSSKATz9JgjwifO8 +PgqUz1kXRwez2CtKLOOCFFtcIzEdWIzsa1ubNqLzgN7rD+XBkUc2uEcCgYEA+yTy +72EDMQVoIZOygytHsDNdy0iS2RsBbdurT27wkYuFpFUVWdbNSL+8haE+wJHseHcw +BD4WIMpU+hnS4p4OO8+6V7PiXOS5E/se91EJigZAoixgDUiC8ihojWgK9PYEavUo +hULWbayO59SxYWeUI4Ze0GP8Jw8vdB86ib4ulF0CgYEAgyzRuLjk05+iZODwQyDn +WSquov3W0rh51s7cw0LX2wWSQm8r9NGGYhs5kJ5sLwGxAKj2MNSWF4jBdrCZ6Gr+ +y4BGY0X209/+IAUC3jlfdSLIiF4OBlT6AvB1HfclhvtUVUp0OhLfnpvQ1UwYScRI +KcRLvovIoIzP2g3emfwjAz8CgYEAxUHhOhm1mwRHJNBQTuxok0HVMrze8n1eov39 +0RcvBvJSVp+pdHXdqX1HwqHCmxhCZuAeq8ZkNP8WvZYY6HwCbAIdt5MHgbT4lXQR +f2l8F5gPnhFCpExG5ZLNg/urV3oAQE4stHap21zEpdyOMhZb6Yc5424U+EzaFdgN +b3EcPtUCgYAkKvUlSnBbgiJz1iaN6fuTqH0efavuFGMhjNmG7GtpNXdgyl1OWIuc +Yu+tZtHXtKYf3B99GwPrFzw/7yfDwae5YeWmi2/pFTH96wv3brJBqkAWY8G5Rsmd +qF50p34vIFqUBniNRwSArx8t2dq/CuAMgLAtSjh70Q6ZAnCF85PD8Q== +-----END RSA PRIVATE KEY----- +EOF + +echo Contents of kind${number}.yaml +cat << EOF | tee kind${number}.yaml +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: +- role: control-plane + image: ${kindest_node} + extraPortMappings: + - containerPort: 6443 + hostPort: 70${twodigits} + extraMounts: + - containerPath: /etc/kubernetes/oidc + hostPath: /tmp/oidc + labels: + ingress-ready: true + topology.kubernetes.io/region: ${region} + topology.kubernetes.io/zone: ${zone} +networking: + disableDefaultCNI: true + serviceSubnet: "10.$(echo $twodigits | sed 's/^0*//').0.0/16" + podSubnet: "10.1${twodigits}.0.0/16" +kubeadmConfigPatches: +- | + kind: ClusterConfiguration + apiServer: + extraArgs: + service-account-key-file: /etc/kubernetes/pki/sa.pub + service-account-key-file: /etc/kubernetes/oidc/sa-signer-pkcs8.pub + service-account-signing-key-file: /etc/kubernetes/oidc/sa-signer.key + service-account-issuer: https://solo-workshop-oidc.s3.us-east-1.amazonaws.com + api-audiences: sts.amazonaws.com + extraVolumes: + - name: oidc + hostPath: /etc/kubernetes/oidc + mountPath: /etc/kubernetes/oidc + readOnly: true + metadata: + name: config +containerdConfigPatches: +- |- + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:${reg_port}"] + endpoint = ["http://${reg_name}:${reg_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] + endpoint = ["http://docker:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-docker.pkg.dev"] + endpoint = ["http://us-docker:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-central1-docker.pkg.dev"] + endpoint = ["http://us-central1-docker:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."quay.io"] + endpoint = ["http://quay:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."gcr.io"] + endpoint = ["http://gcr:${cache_port}"] +EOF +echo ----------------------------------------------------- + +kind create cluster --name kind${number} --config kind${number}.yaml +ipkind=$(docker inspect kind${number}-control-plane | jq -r '.[0].NetworkSettings.Networks[].IPAddress') +networkkind=$(echo ${ipkind} | awk -F. '{ print $1"."$2 }') +kubectl config set-cluster kind-kind${number} --server=https://${myip}:70${twodigits} --insecure-skip-tls-verify=true + +# Preload images +cat << EOF >> images.txt +quay.io/metallb/controller:v0.13.12 +quay.io/metallb/speaker:v0.13.12 +EOF +cat images.txt | while read image; do + docker pull $image || true + kind load docker-image $image --name kind${number} || true +done + +docker network connect "kind" "${reg_name}" || true +docker network connect "kind" docker || true +docker network connect "kind" us-docker || true +docker network connect "kind" us-central1-docker || true +docker network connect "kind" quay || true +docker network connect "kind" gcr || true +# Calico for ipv4 +curl -sL https://raw.githubusercontent.com/projectcalico/calico/v3.28.1/manifests/calico.yaml | sed 's/250m/50m/g' | kubectl --context kind-kind${number} apply -f - + +for i in 1 2 3 4 5; do kubectl --context=kind-kind${number} apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml && break || sleep 15; done +kubectl --context=kind-kind${number} create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)" +kubectl --context=kind-kind${number} -n metallb-system rollout status deploy controller || true + +cat << EOF | tee metallb${number}.yaml +apiVersion: metallb.io/v1beta1 +kind: IPAddressPool +metadata: + name: first-pool + namespace: metallb-system +spec: + addresses: + - ${networkkind}.1${twodigits}.1-${networkkind}.1${twodigits}.254 +--- +apiVersion: metallb.io/v1beta1 +kind: L2Advertisement +metadata: + name: empty + namespace: metallb-system +EOF + +printf "Create IPAddressPool in kind-kind${number}\n" +for i in {1..10}; do +kubectl --context=kind-kind${number} apply -f metallb${number}.yaml && break +sleep 2 +done + +# connect the registry to the cluster network if not already connected +printf "Renaming context kind-kind${number} to ${name}\n" +for i in {1..100}; do + (kubectl config get-contexts -oname | grep ${name}) && break + kubectl config rename-context kind-kind${number} ${name} && break + printf " $i"/100 + sleep 2 + [ $i -lt 100 ] || exit 1 +done + +# Document the local registry +# https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry +cat </dev/null; then + myip=$(hostname -I | awk '{ print $1 }') +else + myip=$(ipconfig getifaddr en0) +fi + +# Function to determine the next available cluster number +get_next_cluster_number() { + if ! kind get clusters 2>&1 | grep "^kind" > /dev/null; then + echo 1 + else + highest_num=$(kind get clusters | grep "^kind" | tail -1 | cut -c 5-) + echo $((highest_num + 1)) + fi +} + +if [ -f /.dockerenv ]; then +myip=$HOST_IP +container=$(docker inspect $(docker ps -q) | jq -r ".[] | select(.Config.Hostname == \"$HOSTNAME\") | .Name" | cut -d/ -f2) +docker network connect "kind" $container || true +number=$(get_next_cluster_number) +twodigits=$(printf "%02d\n" $number) +fi + +reg_name='kind-registry' +reg_port='5000' +docker start "${reg_name}" 2>/dev/null || \ +docker run -d --restart=always -p "0.0.0.0:${reg_port}:5000" --name "${reg_name}" registry:2 + +cache_port='5000' +cat > registries < ${HOME}/.${cache_name}-config.yml </dev/null || \ +docker run -d --restart=always ${DEPLOY_EXTRA_PARAMS} -v ${HOME}/.${cache_name}-config.yml:/etc/docker/registry/config.yml --name "${cache_name}" registry:2 +done +mkdir -p /tmp/oidc + +cat <<'EOF' >/tmp/oidc/sa-signer-pkcs8.pub +-----BEGIN PUBLIC KEY----- +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA53YiBcrn7+ZK0Vb4odeA +1riYdvEb8To4H6/HtF+OKzuCIXFQ+bRy7yMrDGITYpfYPrTZOgfdeTLZqOiAj+cL +395nvxdly83SUrdh7ItfOPRluuuiPHnFn111wpyjBw5nut4Kx+M5MksNfA1hU0Zw +zIM9OviX8iEF8xHWUtz4BAMDG8N6+zpLo0pAzaei5hKuLZ9dZOzHBC8VOW82cQMm +5X5uOKsCHMtNSjqYUNB1DxN6xxM+odGWT/6xthPGk6YCxmO28YHPFZfiS2eAIpD8 +2p/16KQKU6TkZSrldkYxiHIPhu+5f9faZJG7dB9pLN1SfdTBio4PK5Mz9muLUCv9 +ywIDAQAB +-----END PUBLIC KEY----- +EOF + +cat <<'EOF' >/tmp/oidc/sa-signer.key +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA53YiBcrn7+ZK0Vb4odeA1riYdvEb8To4H6/HtF+OKzuCIXFQ ++bRy7yMrDGITYpfYPrTZOgfdeTLZqOiAj+cL395nvxdly83SUrdh7ItfOPRluuui +PHnFn111wpyjBw5nut4Kx+M5MksNfA1hU0ZwzIM9OviX8iEF8xHWUtz4BAMDG8N6 ++zpLo0pAzaei5hKuLZ9dZOzHBC8VOW82cQMm5X5uOKsCHMtNSjqYUNB1DxN6xxM+ +odGWT/6xthPGk6YCxmO28YHPFZfiS2eAIpD82p/16KQKU6TkZSrldkYxiHIPhu+5 +f9faZJG7dB9pLN1SfdTBio4PK5Mz9muLUCv9ywIDAQABAoIBAB8tro+RMYUDRHjG +el9ypAxIeWEsQVNRQFYkW4ZUiNYSAgl3Ni0svX6xAg989peFVL+9pLVIcfDthJxY +FVlNCjBxyQ/YmwHFC9vQkARJEd6eLUXsj8INtS0ubbp1VxCQRDDL0C/0z7OSoJJh +SwboqjEiTJExA2a+RArmEDTBRzdi3t+kT8G23JcqOivrITt17K6bQYyJXw7/vUdc +r/R+hfd5TqVq92VddzDT7RNJAxsbPPXjGnESlq1GALBDs+uBGYsP0fiEJb2nicSv +z9fBnBeERhut1gcE0C0iLRQZb+3r8TitBtxrZv+0BHgXrkKtXDwWTqGEKOwC4dBn +7nxkH2ECgYEA6+/DOTABGYOWOQftFkJMjcugzDrjoGpuXuVOTb65T+3FHAzU93zy +3bt3wQxrlugluyy9Sc/PL3ck2LgUsPHZ+s7zsdGvvGALBD6bOSSKATz9JgjwifO8 +PgqUz1kXRwez2CtKLOOCFFtcIzEdWIzsa1ubNqLzgN7rD+XBkUc2uEcCgYEA+yTy +72EDMQVoIZOygytHsDNdy0iS2RsBbdurT27wkYuFpFUVWdbNSL+8haE+wJHseHcw +BD4WIMpU+hnS4p4OO8+6V7PiXOS5E/se91EJigZAoixgDUiC8ihojWgK9PYEavUo +hULWbayO59SxYWeUI4Ze0GP8Jw8vdB86ib4ulF0CgYEAgyzRuLjk05+iZODwQyDn +WSquov3W0rh51s7cw0LX2wWSQm8r9NGGYhs5kJ5sLwGxAKj2MNSWF4jBdrCZ6Gr+ +y4BGY0X209/+IAUC3jlfdSLIiF4OBlT6AvB1HfclhvtUVUp0OhLfnpvQ1UwYScRI +KcRLvovIoIzP2g3emfwjAz8CgYEAxUHhOhm1mwRHJNBQTuxok0HVMrze8n1eov39 +0RcvBvJSVp+pdHXdqX1HwqHCmxhCZuAeq8ZkNP8WvZYY6HwCbAIdt5MHgbT4lXQR +f2l8F5gPnhFCpExG5ZLNg/urV3oAQE4stHap21zEpdyOMhZb6Yc5424U+EzaFdgN +b3EcPtUCgYAkKvUlSnBbgiJz1iaN6fuTqH0efavuFGMhjNmG7GtpNXdgyl1OWIuc +Yu+tZtHXtKYf3B99GwPrFzw/7yfDwae5YeWmi2/pFTH96wv3brJBqkAWY8G5Rsmd +qF50p34vIFqUBniNRwSArx8t2dq/CuAMgLAtSjh70Q6ZAnCF85PD8Q== +-----END RSA PRIVATE KEY----- +EOF + +echo Contents of kind${number}.yaml +cat << EOF | tee kind${number}.yaml +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: +- role: control-plane + image: ${kindest_node} + extraPortMappings: + - containerPort: 6443 + hostPort: 70${twodigits} + extraMounts: + - containerPath: /etc/kubernetes/oidc + hostPath: /tmp/oidc + labels: + ingress-ready: true + topology.kubernetes.io/region: ${region} + topology.kubernetes.io/zone: ${zone} +networking: + disableDefaultCNI: true + serviceSubnet: "10.$(echo $twodigits | sed 's/^0*//').0.0/16" + podSubnet: "10.1${twodigits}.0.0/16" +kubeadmConfigPatches: +- | + kind: ClusterConfiguration + apiServer: + extraArgs: + service-account-key-file: /etc/kubernetes/pki/sa.pub + service-account-key-file: /etc/kubernetes/oidc/sa-signer-pkcs8.pub + service-account-signing-key-file: /etc/kubernetes/oidc/sa-signer.key + service-account-issuer: https://solo-workshop-oidc.s3.us-east-1.amazonaws.com + api-audiences: sts.amazonaws.com + extraVolumes: + - name: oidc + hostPath: /etc/kubernetes/oidc + mountPath: /etc/kubernetes/oidc + readOnly: true + metadata: + name: config +containerdConfigPatches: +- |- + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:${reg_port}"] + endpoint = ["http://${reg_name}:${reg_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] + endpoint = ["http://docker:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-docker.pkg.dev"] + endpoint = ["http://us-docker:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-central1-docker.pkg.dev"] + endpoint = ["http://us-central1-docker:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."quay.io"] + endpoint = ["http://quay:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."gcr.io"] + endpoint = ["http://gcr:${cache_port}"] +EOF +echo ----------------------------------------------------- + +kind create cluster --name kind${number} --config kind${number}.yaml +ipkind=$(docker inspect kind${number}-control-plane | jq -r '.[0].NetworkSettings.Networks[].IPAddress') +networkkind=$(echo ${ipkind} | awk -F. '{ print $1"."$2 }') +kubectl config set-cluster kind-kind${number} --server=https://${myip}:70${twodigits} --insecure-skip-tls-verify=true + +# Preload images +cat << EOF >> images.txt +quay.io/metallb/controller:v0.13.12 +quay.io/metallb/speaker:v0.13.12 +EOF +cat images.txt | while read image; do + docker pull $image || true + kind load docker-image $image --name kind${number} || true +done + +docker network connect "kind" "${reg_name}" || true +docker network connect "kind" docker || true +docker network connect "kind" us-docker || true +docker network connect "kind" us-central1-docker || true +docker network connect "kind" quay || true +docker network connect "kind" gcr || true +# Calico for ipv4 +curl -sL https://raw.githubusercontent.com/projectcalico/calico/v3.28.1/manifests/calico.yaml | sed 's/250m/50m/g' | kubectl --context kind-kind${number} apply -f - + +for i in 1 2 3 4 5; do kubectl --context=kind-kind${number} apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml && break || sleep 15; done +kubectl --context=kind-kind${number} create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)" +kubectl --context=kind-kind${number} -n metallb-system rollout status deploy controller || true + +cat << EOF | tee metallb${number}.yaml +apiVersion: metallb.io/v1beta1 +kind: IPAddressPool +metadata: + name: first-pool + namespace: metallb-system +spec: + addresses: + - ${networkkind}.1${twodigits}.1-${networkkind}.1${twodigits}.254 +--- +apiVersion: metallb.io/v1beta1 +kind: L2Advertisement +metadata: + name: empty + namespace: metallb-system +EOF + +printf "Create IPAddressPool in kind-kind${number}\n" +for i in {1..10}; do +kubectl --context=kind-kind${number} apply -f metallb${number}.yaml && break +sleep 2 +done + +# connect the registry to the cluster network if not already connected +printf "Renaming context kind-kind${number} to ${name}\n" +for i in {1..100}; do + (kubectl config get-contexts -oname | grep ${name}) && break + kubectl config rename-context kind-kind${number} ${name} && break + printf " $i"/100 + sleep 2 + [ $i -lt 100 ] || exit 1 +done + +# Document the local registry +# https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry +cat </dev/null; then + myip=$(hostname -I | awk '{ print $1 }') +else + myip=$(ipconfig getifaddr en0) +fi + +# Function to determine the next available cluster number +get_next_cluster_number() { + if ! kind get clusters 2>&1 | grep "^kind" > /dev/null; then + echo 1 + else + highest_num=$(kind get clusters | grep "^kind" | tail -1 | cut -c 5-) + echo $((highest_num + 1)) + fi +} + +if [ -f /.dockerenv ]; then +myip=$HOST_IP +container=$(docker inspect $(docker ps -q) | jq -r ".[] | select(.Config.Hostname == \"$HOSTNAME\") | .Name" | cut -d/ -f2) +docker network connect "kind" $container || true +number=$(get_next_cluster_number) +twodigits=$(printf "%02d\n" $number) +fi + +reg_name='kind-registry' +reg_port='5000' +docker start "${reg_name}" 2>/dev/null || \ +docker run -d --restart=always -p "0.0.0.0:${reg_port}:5000" --name "${reg_name}" registry:2 + +cache_port='5000' +cat > registries < ${HOME}/.${cache_name}-config.yml </dev/null || \ +docker run -d --restart=always ${DEPLOY_EXTRA_PARAMS} -v ${HOME}/.${cache_name}-config.yml:/etc/docker/registry/config.yml --name "${cache_name}" registry:2 +done +mkdir -p /tmp/oidc + +cat <<'EOF' >/tmp/oidc/sa-signer-pkcs8.pub +-----BEGIN PUBLIC KEY----- +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA53YiBcrn7+ZK0Vb4odeA +1riYdvEb8To4H6/HtF+OKzuCIXFQ+bRy7yMrDGITYpfYPrTZOgfdeTLZqOiAj+cL +395nvxdly83SUrdh7ItfOPRluuuiPHnFn111wpyjBw5nut4Kx+M5MksNfA1hU0Zw +zIM9OviX8iEF8xHWUtz4BAMDG8N6+zpLo0pAzaei5hKuLZ9dZOzHBC8VOW82cQMm +5X5uOKsCHMtNSjqYUNB1DxN6xxM+odGWT/6xthPGk6YCxmO28YHPFZfiS2eAIpD8 +2p/16KQKU6TkZSrldkYxiHIPhu+5f9faZJG7dB9pLN1SfdTBio4PK5Mz9muLUCv9 +ywIDAQAB +-----END PUBLIC KEY----- +EOF + +cat <<'EOF' >/tmp/oidc/sa-signer.key +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA53YiBcrn7+ZK0Vb4odeA1riYdvEb8To4H6/HtF+OKzuCIXFQ ++bRy7yMrDGITYpfYPrTZOgfdeTLZqOiAj+cL395nvxdly83SUrdh7ItfOPRluuui +PHnFn111wpyjBw5nut4Kx+M5MksNfA1hU0ZwzIM9OviX8iEF8xHWUtz4BAMDG8N6 ++zpLo0pAzaei5hKuLZ9dZOzHBC8VOW82cQMm5X5uOKsCHMtNSjqYUNB1DxN6xxM+ +odGWT/6xthPGk6YCxmO28YHPFZfiS2eAIpD82p/16KQKU6TkZSrldkYxiHIPhu+5 +f9faZJG7dB9pLN1SfdTBio4PK5Mz9muLUCv9ywIDAQABAoIBAB8tro+RMYUDRHjG +el9ypAxIeWEsQVNRQFYkW4ZUiNYSAgl3Ni0svX6xAg989peFVL+9pLVIcfDthJxY +FVlNCjBxyQ/YmwHFC9vQkARJEd6eLUXsj8INtS0ubbp1VxCQRDDL0C/0z7OSoJJh +SwboqjEiTJExA2a+RArmEDTBRzdi3t+kT8G23JcqOivrITt17K6bQYyJXw7/vUdc +r/R+hfd5TqVq92VddzDT7RNJAxsbPPXjGnESlq1GALBDs+uBGYsP0fiEJb2nicSv +z9fBnBeERhut1gcE0C0iLRQZb+3r8TitBtxrZv+0BHgXrkKtXDwWTqGEKOwC4dBn +7nxkH2ECgYEA6+/DOTABGYOWOQftFkJMjcugzDrjoGpuXuVOTb65T+3FHAzU93zy +3bt3wQxrlugluyy9Sc/PL3ck2LgUsPHZ+s7zsdGvvGALBD6bOSSKATz9JgjwifO8 +PgqUz1kXRwez2CtKLOOCFFtcIzEdWIzsa1ubNqLzgN7rD+XBkUc2uEcCgYEA+yTy +72EDMQVoIZOygytHsDNdy0iS2RsBbdurT27wkYuFpFUVWdbNSL+8haE+wJHseHcw +BD4WIMpU+hnS4p4OO8+6V7PiXOS5E/se91EJigZAoixgDUiC8ihojWgK9PYEavUo +hULWbayO59SxYWeUI4Ze0GP8Jw8vdB86ib4ulF0CgYEAgyzRuLjk05+iZODwQyDn +WSquov3W0rh51s7cw0LX2wWSQm8r9NGGYhs5kJ5sLwGxAKj2MNSWF4jBdrCZ6Gr+ +y4BGY0X209/+IAUC3jlfdSLIiF4OBlT6AvB1HfclhvtUVUp0OhLfnpvQ1UwYScRI +KcRLvovIoIzP2g3emfwjAz8CgYEAxUHhOhm1mwRHJNBQTuxok0HVMrze8n1eov39 +0RcvBvJSVp+pdHXdqX1HwqHCmxhCZuAeq8ZkNP8WvZYY6HwCbAIdt5MHgbT4lXQR +f2l8F5gPnhFCpExG5ZLNg/urV3oAQE4stHap21zEpdyOMhZb6Yc5424U+EzaFdgN +b3EcPtUCgYAkKvUlSnBbgiJz1iaN6fuTqH0efavuFGMhjNmG7GtpNXdgyl1OWIuc +Yu+tZtHXtKYf3B99GwPrFzw/7yfDwae5YeWmi2/pFTH96wv3brJBqkAWY8G5Rsmd +qF50p34vIFqUBniNRwSArx8t2dq/CuAMgLAtSjh70Q6ZAnCF85PD8Q== +-----END RSA PRIVATE KEY----- +EOF + +echo Contents of kind${number}.yaml +cat << EOF | tee kind${number}.yaml +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: +- role: control-plane + image: ${kindest_node} + extraPortMappings: + - containerPort: 6443 + hostPort: 70${twodigits} + extraMounts: + - containerPath: /etc/kubernetes/oidc + hostPath: /tmp/oidc + labels: + ingress-ready: true + topology.kubernetes.io/region: ${region} + topology.kubernetes.io/zone: ${zone} +networking: + disableDefaultCNI: true + serviceSubnet: "10.$(echo $twodigits | sed 's/^0*//').0.0/16" + podSubnet: "10.1${twodigits}.0.0/16" +kubeadmConfigPatches: +- | + kind: ClusterConfiguration + apiServer: + extraArgs: + service-account-key-file: /etc/kubernetes/pki/sa.pub + service-account-key-file: /etc/kubernetes/oidc/sa-signer-pkcs8.pub + service-account-signing-key-file: /etc/kubernetes/oidc/sa-signer.key + service-account-issuer: https://solo-workshop-oidc.s3.us-east-1.amazonaws.com + api-audiences: sts.amazonaws.com + extraVolumes: + - name: oidc + hostPath: /etc/kubernetes/oidc + mountPath: /etc/kubernetes/oidc + readOnly: true + metadata: + name: config +containerdConfigPatches: +- |- + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:${reg_port}"] + endpoint = ["http://${reg_name}:${reg_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] + endpoint = ["http://docker:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-docker.pkg.dev"] + endpoint = ["http://us-docker:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-central1-docker.pkg.dev"] + endpoint = ["http://us-central1-docker:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."quay.io"] + endpoint = ["http://quay:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."gcr.io"] + endpoint = ["http://gcr:${cache_port}"] +EOF +echo ----------------------------------------------------- + +kind create cluster --name kind${number} --config kind${number}.yaml +ipkind=$(docker inspect kind${number}-control-plane | jq -r '.[0].NetworkSettings.Networks[].IPAddress') +networkkind=$(echo ${ipkind} | awk -F. '{ print $1"."$2 }') +kubectl config set-cluster kind-kind${number} --server=https://${myip}:70${twodigits} --insecure-skip-tls-verify=true + +# Preload images +cat << EOF >> images.txt +quay.io/metallb/controller:v0.13.12 +quay.io/metallb/speaker:v0.13.12 +EOF +cat images.txt | while read image; do + docker pull $image || true + kind load docker-image $image --name kind${number} || true +done + +docker network connect "kind" "${reg_name}" || true +docker network connect "kind" docker || true +docker network connect "kind" us-docker || true +docker network connect "kind" us-central1-docker || true +docker network connect "kind" quay || true +docker network connect "kind" gcr || true +# Calico for ipv4 +curl -sL https://raw.githubusercontent.com/projectcalico/calico/v3.28.1/manifests/calico.yaml | sed 's/250m/50m/g' | kubectl --context kind-kind${number} apply -f - + +for i in 1 2 3 4 5; do kubectl --context=kind-kind${number} apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml && break || sleep 15; done +kubectl --context=kind-kind${number} create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)" +kubectl --context=kind-kind${number} -n metallb-system rollout status deploy controller || true + +cat << EOF | tee metallb${number}.yaml +apiVersion: metallb.io/v1beta1 +kind: IPAddressPool +metadata: + name: first-pool + namespace: metallb-system +spec: + addresses: + - ${networkkind}.1${twodigits}.1-${networkkind}.1${twodigits}.254 +--- +apiVersion: metallb.io/v1beta1 +kind: L2Advertisement +metadata: + name: empty + namespace: metallb-system +EOF + +printf "Create IPAddressPool in kind-kind${number}\n" +for i in {1..10}; do +kubectl --context=kind-kind${number} apply -f metallb${number}.yaml && break +sleep 2 +done + +# connect the registry to the cluster network if not already connected +printf "Renaming context kind-kind${number} to ${name}\n" +for i in {1..100}; do + (kubectl config get-contexts -oname | grep ${name}) && break + kubectl config rename-context kind-kind${number} ${name} && break + printf " $i"/100 + sleep 2 + [ $i -lt 100 ] || exit 1 +done + +# Document the local registry +# https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry +cat </dev/null || true +source ./scripts/assert.sh +export MGMT=mgmt +export CLUSTER1=cluster1 +export CLUSTER2=cluster2 +bash ./data/steps/deploy-kind-clusters/deploy-mgmt.sh +bash ./data/steps/deploy-kind-clusters/deploy-cluster1.sh +bash ./data/steps/deploy-kind-clusters/deploy-cluster2.sh +./scripts/check.sh mgmt +./scripts/check.sh cluster1 +./scripts/check.sh cluster2 +kubectl config use-context ${MGMT} +cat <<'EOF' > ./test.js +const helpers = require('./tests/chai-exec'); + +describe("Clusters are healthy", () => { + const clusters = ["mgmt", "cluster1", "cluster2"]; + + clusters.forEach(cluster => { + it(`Cluster ${cluster} is healthy`, () => helpers.k8sObjectIsPresent({ context: cluster, namespace: "default", k8sType: "service", k8sObj: "kubernetes" })); + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/deploy-kind-clusters/tests/cluster-healthy.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +export GLOO_MESH_VERSION=v2.7.0-beta1 +curl -sL https://run.solo.io/meshctl/install | sh - +export PATH=$HOME/.gloo-mesh/bin:$PATH +cat <<'EOF' > ./test.js +var chai = require('chai'); +var expect = chai.expect; + +describe("Required environment variables should contain value", () => { + afterEach(function(done){ + if(this.currentTest.currentRetry() > 0){ + process.stdout.write("."); + setTimeout(done, 1000); + } else { + done(); + } + }); + + it("Context environment variables should not be empty", () => { + expect(process.env.MGMT).not.to.be.empty + expect(process.env.CLUSTER1).not.to.be.empty + expect(process.env.CLUSTER2).not.to.be.empty + }); + + it("Gloo Mesh licence environment variables should not be empty", () => { + expect(process.env.GLOO_MESH_LICENSE_KEY).not.to.be.empty + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/deploy-and-register-gloo-mesh/tests/environment-variables.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl --context ${MGMT} create ns gloo-mesh + +helm upgrade --install gloo-platform-crds gloo-platform-crds \ + --repo https://storage.googleapis.com/gloo-platform/helm-charts \ + --namespace gloo-mesh \ + --kube-context ${MGMT} \ + --set featureGates.insightsConfiguration=true \ + --version 2.7.0-beta1 + +helm upgrade --install gloo-platform gloo-platform \ + --repo https://storage.googleapis.com/gloo-platform/helm-charts \ + --namespace gloo-mesh \ + --kube-context ${MGMT} \ + --version 2.7.0-beta1 \ + -f -< ./test.js + +const helpers = require('./tests/chai-exec'); + +describe("MGMT server is healthy", () => { + let cluster = process.env.MGMT; + let deployments = ["gloo-mesh-mgmt-server","gloo-mesh-redis","gloo-telemetry-gateway","prometheus-server"]; + deployments.forEach(deploy => { + it(deploy + ' pods are ready in ' + cluster, () => helpers.checkDeployment({ context: cluster, namespace: "gloo-mesh", k8sObj: deploy })); + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/deploy-and-register-gloo-mesh/tests/check-deployment.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +cat <<'EOF' > ./test.js +const chaiExec = require("@jsdevtools/chai-exec"); +var chai = require('chai'); +var expect = chai.expect; +chai.use(chaiExec); + +afterEach(function (done) { + if (this.currentTest.currentRetry() > 0) { + process.stdout.write("."); + setTimeout(done, 1000); + } else { + done(); + } +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/deploy-and-register-gloo-mesh/tests/get-gloo-mesh-mgmt-server-ip.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +export ENDPOINT_GLOO_MESH=$(kubectl --context ${MGMT} -n gloo-mesh get svc gloo-mesh-mgmt-server -o jsonpath='{.status.loadBalancer.ingress[0].*}'):9900 +export HOST_GLOO_MESH=$(echo ${ENDPOINT_GLOO_MESH%:*}) +export ENDPOINT_TELEMETRY_GATEWAY=$(kubectl --context ${MGMT} -n gloo-mesh get svc gloo-telemetry-gateway -o jsonpath='{.status.loadBalancer.ingress[0].*}'):4317 +export ENDPOINT_GLOO_MESH_UI=$(kubectl --context ${MGMT} -n gloo-mesh get svc gloo-mesh-ui -o jsonpath='{.status.loadBalancer.ingress[0].*}'):8090 +cat <<'EOF' > ./test.js +const dns = require('dns'); +const chaiHttp = require("chai-http"); +const chai = require("chai"); +const expect = chai.expect; +chai.use(chaiHttp); +const { waitOnFailedTest } = require('./tests/utils'); + +afterEach(function(done) { waitOnFailedTest(done, this.currentTest.currentRetry())}); + +describe("Address '" + process.env.HOST_GLOO_MESH + "' can be resolved in DNS", () => { + it(process.env.HOST_GLOO_MESH + ' can be resolved', (done) => { + return dns.lookup(process.env.HOST_GLOO_MESH, (err, address, family) => { + expect(address).to.be.an.ip; + done(); + }); + }); +}); +EOF +echo "executing test ./gloo-mesh-2-0/tests/can-resolve.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl apply --context ${MGMT} -f - < ca.crt +kubectl create secret generic relay-root-tls-secret -n gloo-mesh --context ${CLUSTER1} --from-file ca.crt=ca.crt +rm ca.crt + +kubectl get secret relay-identity-token-secret -n gloo-mesh --context ${MGMT} -o jsonpath='{.data.token}' | base64 -d > token +kubectl create secret generic relay-identity-token-secret -n gloo-mesh --context ${CLUSTER1} --from-file token=token +rm token + +helm upgrade --install gloo-platform-crds gloo-platform-crds \ + --repo https://storage.googleapis.com/gloo-platform/helm-charts \ + --namespace gloo-mesh \ + --kube-context ${CLUSTER1} \ + --version 2.7.0-beta1 + +helm upgrade --install gloo-platform gloo-platform \ + --repo https://storage.googleapis.com/gloo-platform/helm-charts \ + --namespace gloo-mesh \ + --kube-context ${CLUSTER1} \ + --version 2.7.0-beta1 \ + -f -< ca.crt +kubectl create secret generic relay-root-tls-secret -n gloo-mesh --context ${CLUSTER2} --from-file ca.crt=ca.crt +rm ca.crt + +kubectl get secret relay-identity-token-secret -n gloo-mesh --context ${MGMT} -o jsonpath='{.data.token}' | base64 -d > token +kubectl create secret generic relay-identity-token-secret -n gloo-mesh --context ${CLUSTER2} --from-file token=token +rm token + +helm upgrade --install gloo-platform-crds gloo-platform-crds \ + --repo https://storage.googleapis.com/gloo-platform/helm-charts \ + --namespace gloo-mesh \ + --kube-context ${CLUSTER2} \ + --version 2.7.0-beta1 + +helm upgrade --install gloo-platform gloo-platform \ + --repo https://storage.googleapis.com/gloo-platform/helm-charts \ + --namespace gloo-mesh \ + --kube-context ${CLUSTER2} \ + --version 2.7.0-beta1 \ + -f -< ./test.js +var chai = require('chai'); +var expect = chai.expect; +const helpers = require('./tests/chai-exec'); +describe("Cluster registration", () => { + it("cluster1 is registered", () => { + podName = helpers.getOutputForCommand({ command: "kubectl -n gloo-mesh get pods -l app=gloo-mesh-mgmt-server -o jsonpath='{.items[0].metadata.name}' --context " + process.env.MGMT }).replaceAll("'", ""); + command = helpers.getOutputForCommand({ command: "kubectl --context " + process.env.MGMT + " -n gloo-mesh debug -q -i " + podName + " --image=curlimages/curl -- curl -s http://localhost:9091/metrics" }).replaceAll("'", ""); + expect(command).to.contain("cluster1"); + }); + it("cluster2 is registered", () => { + podName = helpers.getOutputForCommand({ command: "kubectl -n gloo-mesh get pods -l app=gloo-mesh-mgmt-server -o jsonpath='{.items[0].metadata.name}' --context " + process.env.MGMT }).replaceAll("'", ""); + command = helpers.getOutputForCommand({ command: "kubectl --context " + process.env.MGMT + " -n gloo-mesh debug -q -i " + podName + " --image=curlimages/curl -- curl -s http://localhost:9091/metrics" }).replaceAll("'", ""); + expect(command).to.contain("cluster2"); + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/deploy-and-register-gloo-mesh/tests/cluster-registration.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +curl -L https://istio.io/downloadIstio | sh - + +if [ -d "istio-"*/ ]; then + cd istio-*/ + export PATH=$PWD/bin:$PATH + cd .. +fi +cat <<'EOF' > ./test.js +const chaiExec = require("@jsdevtools/chai-exec"); +var chai = require('chai'); +var expect = chai.expect; +chai.use(chaiExec); + +afterEach(function (done) { + if (this.currentTest.currentRetry() > 0) { + process.stdout.write("."); + setTimeout(done, 1000); + } else { + done(); + } +}); +describe("istio_version is at least 1.23.0", () => { + it("version should be at least 1.23.0", () => { + // Compare the string istio_version to the number 1.23.0 + // example 1.23.0-patch0 is valid, but 1.22.6 is not + let version = "1.24.1-patch1-distroless"; + let versionParts = version.split('-')[0].split('.'); + let major = parseInt(versionParts[0]); + let minor = parseInt(versionParts[1]); + let patch = parseInt(versionParts[2]); + let minMajor = 1; + let minMinor = 23; + let minPatch = 0; + expect(major).to.be.at.least(minMajor); + if (major === minMajor) { + expect(minor).to.be.at.least(minMinor); + if (minor === minMinor) { + expect(patch).to.be.at.least(minPatch); + } + } + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/deploy-istio-helm/tests/istio-version.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl --context ${CLUSTER1} create ns istio-gateways + +kubectl apply --context ${CLUSTER1} -f - </base \ +--namespace istio-system \ +--kube-context=${CLUSTER1} \ +--version 1.24.1-patch1-solo-distroless \ +--create-namespace \ +-f - </istiod \ +--namespace istio-system \ +--kube-context=${CLUSTER1} \ +--version 1.24.1-patch1-solo-distroless \ +--create-namespace \ +-f - < + proxy: + clusterDomain: cluster.local + tag: 1.24.1-patch1-solo-distroless + multiCluster: + clusterName: cluster1 +profile: ambient +revision: 1-23 +istio_cni: + enabled: true +meshConfig: + accessLogFile: /dev/stdout + defaultConfig: + proxyMetadata: + ISTIO_META_DNS_AUTO_ALLOCATE: "true" + ISTIO_META_DNS_CAPTURE: "true" + trustDomain: cluster1 +pilot: + enabled: true + env: + PILOT_ENABLE_IP_AUTOALLOCATE: "true" + PILOT_ENABLE_K8S_SELECT_WORKLOAD_ENTRIES: "false" + PILOT_SKIP_VALIDATE_TRUST_DOMAIN: "true" +EOF + +helm upgrade --install istio-cni oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/cni \ +--namespace kube-system \ +--kube-context=${CLUSTER1} \ +--version 1.24.1-patch1-solo-distroless \ +--create-namespace \ +-f - < + proxy: 1.24.1-patch1-solo-distroless +profile: ambient +revision: 1-23 +cni: + ambient: + dnsCapture: true + excludeNamespaces: + - istio-system + - kube-system +EOF + +helm upgrade --install ztunnel oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/ztunnel \ +--namespace istio-system \ +--kube-context=${CLUSTER1} \ +--version 1.24.1-patch1-solo-distroless \ +--create-namespace \ +-f - < +istioNamespace: istio-system +multiCluster: + clusterName: cluster1 +namespace: istio-system +profile: ambient +proxy: + clusterDomain: cluster.local +tag: 1.24.1-patch1-solo-distroless +terminationGracePeriodSeconds: 29 +variant: distroless +EOF + +helm upgrade --install istio-ingressgateway-1-23 oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/gateway \ +--namespace istio-gateways \ +--kube-context=${CLUSTER1} \ +--version 1.24.1-patch1-solo-distroless \ +--create-namespace \ +-f - </gateway \ +--namespace istio-gateways \ +--kube-context=${CLUSTER1} \ +--version 1.24.1-patch1-solo-distroless \ +--create-namespace \ +-f - < /dev/null || \ + { kubectl kustomize "github.com/kubernetes-sigs/gateway-api/config/crd?ref=v1.1.0" | kubectl --context ${CLUSTER1} apply -f -; } +kubectl --context ${CLUSTER2} get crd gateways.gateway.networking.k8s.io &> /dev/null || \ + { kubectl kustomize "github.com/kubernetes-sigs/gateway-api/config/crd?ref=v1.1.0" | kubectl --context ${CLUSTER2} apply -f -; } +helm upgrade --install istio-base oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/base \ +--namespace istio-system \ +--kube-context=${CLUSTER2} \ +--version 1.24.1-patch1-solo-distroless \ +--create-namespace \ +-f - </istiod \ +--namespace istio-system \ +--kube-context=${CLUSTER2} \ +--version 1.24.1-patch1-solo-distroless \ +--create-namespace \ +-f - < + proxy: + clusterDomain: cluster.local + tag: 1.24.1-patch1-solo-distroless + multiCluster: + clusterName: cluster2 +profile: ambient +revision: 1-23 +istio_cni: + enabled: true +meshConfig: + accessLogFile: /dev/stdout + defaultConfig: + proxyMetadata: + ISTIO_META_DNS_AUTO_ALLOCATE: "true" + ISTIO_META_DNS_CAPTURE: "true" + trustDomain: cluster2 +pilot: + enabled: true + env: + PILOT_ENABLE_IP_AUTOALLOCATE: "true" + PILOT_ENABLE_K8S_SELECT_WORKLOAD_ENTRIES: "false" + PILOT_SKIP_VALIDATE_TRUST_DOMAIN: "true" +EOF + +helm upgrade --install istio-cni oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/cni \ +--namespace kube-system \ +--kube-context=${CLUSTER2} \ +--version 1.24.1-patch1-solo-distroless \ +--create-namespace \ +-f - < + proxy: 1.24.1-patch1-solo-distroless +profile: ambient +revision: 1-23 +cni: + ambient: + dnsCapture: true + excludeNamespaces: + - istio-system + - kube-system +EOF + +helm upgrade --install ztunnel oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/ztunnel \ +--namespace istio-system \ +--kube-context=${CLUSTER2} \ +--version 1.24.1-patch1-solo-distroless \ +--create-namespace \ +-f - < +istioNamespace: istio-system +multiCluster: + clusterName: cluster2 +namespace: istio-system +profile: ambient +proxy: + clusterDomain: cluster.local +tag: 1.24.1-patch1-solo-distroless +terminationGracePeriodSeconds: 29 +variant: distroless +EOF + +helm upgrade --install istio-ingressgateway-1-23 oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/gateway \ +--namespace istio-gateways \ +--kube-context=${CLUSTER2} \ +--version 1.24.1-patch1-solo-distroless \ +--create-namespace \ +-f - </gateway \ +--namespace istio-gateways \ +--kube-context=${CLUSTER2} \ +--version 1.24.1-patch1-solo-distroless \ +--create-namespace \ +-f - < /dev/null || \ + { kubectl kustomize "github.com/kubernetes-sigs/gateway-api/config/crd?ref=v1.1.0" | kubectl --context ${CLUSTER1} apply -f -; } +kubectl --context ${CLUSTER2} get crd gateways.gateway.networking.k8s.io &> /dev/null || \ + { kubectl kustomize "github.com/kubernetes-sigs/gateway-api/config/crd?ref=v1.1.0" | kubectl --context ${CLUSTER2} apply -f -; } +cat <<'EOF' > ./test.js + +const helpers = require('./tests/chai-exec'); + +const chaiExec = require("@jsdevtools/chai-exec"); +const helpersHttp = require('./tests/chai-http'); +const chai = require("chai"); +const expect = chai.expect; + +afterEach(function (done) { + if (this.currentTest.currentRetry() > 0) { + process.stdout.write("."); + setTimeout(done, 1000); + } else { + done(); + } +}); + +describe("Checking Istio installation", function() { + it('istiod pods are ready in cluster ' + process.env.CLUSTER1, () => helpers.checkDeploymentsWithLabels({ context: process.env.CLUSTER1, namespace: "istio-system", labels: "app=istiod", instances: 1 })); + it('gateway pods are ready in cluster ' + process.env.CLUSTER1, () => helpers.checkDeploymentsWithLabels({ context: process.env.CLUSTER1, namespace: "istio-gateways", labels: "app=istio-ingressgateway", instances: 2 })); + it('istiod pods are ready in cluster ' + process.env.CLUSTER2, () => helpers.checkDeploymentsWithLabels({ context: process.env.CLUSTER2, namespace: "istio-system", labels: "app=istiod", instances: 1 })); + it('gateway pods are ready in cluster ' + process.env.CLUSTER2, () => helpers.checkDeploymentsWithLabels({ context: process.env.CLUSTER2, namespace: "istio-gateways", labels: "app=istio-ingressgateway", instances: 2 })); + it("Gateways have an ip attached in cluster " + process.env.CLUSTER1, () => { + let cli = chaiExec("kubectl --context " + process.env.CLUSTER1 + " -n istio-gateways get svc -l app=istio-ingressgateway -o jsonpath='{.items}'"); + cli.stderr.should.be.empty; + let deployments = JSON.parse(cli.stdout.slice(1,-1)); + expect(deployments).to.have.lengthOf(2); + deployments.forEach((deployment) => { + expect(deployment.status.loadBalancer).to.have.property("ingress"); + }); + }); + it("Gateways have an ip attached in cluster " + process.env.CLUSTER2, () => { + let cli = chaiExec("kubectl --context " + process.env.CLUSTER2 + " -n istio-gateways get svc -l app=istio-ingressgateway -o jsonpath='{.items}'"); + cli.stderr.should.be.empty; + let deployments = JSON.parse(cli.stdout.slice(1,-1)); + expect(deployments).to.have.lengthOf(2); + deployments.forEach((deployment) => { + expect(deployment.status.loadBalancer).to.have.property("ingress"); + }); + }); +}); + +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/deploy-istio-helm/tests/istio-ready.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +timeout 2m bash -c "until [[ \$(kubectl --context ${CLUSTER1} -n istio-gateways get svc -l istio=ingressgateway -o json | jq '.items[0].status.loadBalancer | length') -gt 0 ]]; do + sleep 1 +done" +export HOST_GW_CLUSTER1="$(kubectl --context ${CLUSTER1} -n istio-gateways get svc -l istio=ingressgateway -o jsonpath='{.items[0].status.loadBalancer.ingress[0].*}')" +export HOST_GW_CLUSTER2="$(kubectl --context ${CLUSTER2} -n istio-gateways get svc -l istio=ingressgateway -o jsonpath='{.items[0].status.loadBalancer.ingress[0].*}')" +cat <<'EOF' > ./test.js +const dns = require('dns'); +const chaiHttp = require("chai-http"); +const chai = require("chai"); +const expect = chai.expect; +chai.use(chaiHttp); +const { waitOnFailedTest } = require('./tests/utils'); + +afterEach(function(done) { waitOnFailedTest(done, this.currentTest.currentRetry())}); + +describe("Address '" + process.env.HOST_GW_CLUSTER1 + "' can be resolved in DNS", () => { + it(process.env.HOST_GW_CLUSTER1 + ' can be resolved', (done) => { + return dns.lookup(process.env.HOST_GW_CLUSTER1, (err, address, family) => { + expect(address).to.be.an.ip; + done(); + }); + }); +}); +EOF +echo "executing test ./default/tests/can-resolve.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +cat <<'EOF' > ./test.js +const dns = require('dns'); +const chaiHttp = require("chai-http"); +const chai = require("chai"); +const expect = chai.expect; +chai.use(chaiHttp); +const { waitOnFailedTest } = require('./tests/utils'); + +afterEach(function(done) { waitOnFailedTest(done, this.currentTest.currentRetry())}); + +describe("Address '" + process.env.HOST_GW_CLUSTER2 + "' can be resolved in DNS", () => { + it(process.env.HOST_GW_CLUSTER2 + ' can be resolved', (done) => { + return dns.lookup(process.env.HOST_GW_CLUSTER2, (err, address, family) => { + expect(address).to.be.an.ip; + done(); + }); + }); +}); +EOF +echo "executing test ./default/tests/can-resolve.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl --context ${CLUSTER1} create ns bookinfo-frontends +kubectl --context ${CLUSTER1} create ns bookinfo-backends +kubectl --context ${CLUSTER1} label namespace bookinfo-frontends istio.io/dataplane-mode=ambient +kubectl --context ${CLUSTER1} label namespace bookinfo-backends istio.io/dataplane-mode=ambient +kubectl --context ${CLUSTER1} label namespace bookinfo-frontends istio-injection=disabled +kubectl --context ${CLUSTER1} label namespace bookinfo-backends istio-injection=disabled +kubectl --context ${CLUSTER1} label namespace bookinfo-frontends istio.io/rev=1-23 --overwrite +kubectl --context ${CLUSTER1} label namespace bookinfo-backends istio.io/rev=1-23 --overwrite + +# Deploy the frontend bookinfo service in the bookinfo-frontends namespace +kubectl --context ${CLUSTER1} -n bookinfo-frontends apply -f data/steps/deploy-bookinfo/productpage-v1.yaml + +# Deploy the backend bookinfo services in the bookinfo-backends namespace for all versions less than v3 +kubectl --context ${CLUSTER1} -n bookinfo-backends apply \ + -f data/steps/deploy-bookinfo/details-v1.yaml \ + -f data/steps/deploy-bookinfo/ratings-v1.yaml \ + -f data/steps/deploy-bookinfo/reviews-v1-v2.yaml + +# Update the reviews service to display where it is coming from +kubectl --context ${CLUSTER1} -n bookinfo-backends set env deploy/reviews-v1 CLUSTER_NAME=${CLUSTER1} +kubectl --context ${CLUSTER1} -n bookinfo-backends set env deploy/reviews-v2 CLUSTER_NAME=${CLUSTER1} +echo -n Waiting for bookinfo pods to be ready... +timeout -v 5m bash -c " +until [[ \$(kubectl --context ${CLUSTER1} -n bookinfo-frontends get deploy -o json | jq '[.items[].status.readyReplicas] | add') -eq 1 && \\ + \$(kubectl --context ${CLUSTER1} -n bookinfo-backends get deploy -o json | jq '[.items[].status.readyReplicas] | add') -eq 4 ]] 2>/dev/null +do + sleep 1 + echo -n . +done" +echo +kubectl --context ${CLUSTER2} create ns bookinfo-frontends +kubectl --context ${CLUSTER2} create ns bookinfo-backends +kubectl --context ${CLUSTER2} label namespace bookinfo-frontends istio.io/dataplane-mode=ambient +kubectl --context ${CLUSTER2} label namespace bookinfo-backends istio.io/dataplane-mode=ambient +kubectl --context ${CLUSTER2} label namespace bookinfo-frontends istio-injection=disabled +kubectl --context ${CLUSTER2} label namespace bookinfo-backends istio-injection=disabled +kubectl --context ${CLUSTER2} label namespace bookinfo-frontends istio.io/rev=1-23 --overwrite +kubectl --context ${CLUSTER2} label namespace bookinfo-backends istio.io/rev=1-23 --overwrite + +# Deploy the frontend bookinfo service in the bookinfo-frontends namespace +kubectl --context ${CLUSTER2} -n bookinfo-frontends apply -f data/steps/deploy-bookinfo/productpage-v1.yaml +# Deploy the backend bookinfo services in the bookinfo-backends namespace for all versions +kubectl --context ${CLUSTER2} -n bookinfo-backends apply \ + -f data/steps/deploy-bookinfo/details-v1.yaml \ + -f data/steps/deploy-bookinfo/ratings-v1.yaml \ + -f data/steps/deploy-bookinfo/reviews-v1-v2.yaml \ + -f data/steps/deploy-bookinfo/reviews-v3.yaml +# Update the reviews service to display where it is coming from +kubectl --context ${CLUSTER2} -n bookinfo-backends set env deploy/reviews-v1 CLUSTER_NAME=${CLUSTER2} +kubectl --context ${CLUSTER2} -n bookinfo-backends set env deploy/reviews-v2 CLUSTER_NAME=${CLUSTER2} +kubectl --context ${CLUSTER2} -n bookinfo-backends set env deploy/reviews-v3 CLUSTER_NAME=${CLUSTER2} + +echo -n Waiting for bookinfo pods to be ready... +timeout -v 5m bash -c " +until [[ \$(kubectl --context ${CLUSTER2} -n bookinfo-frontends get deploy -o json | jq '[.items[].status.readyReplicas] | add') -eq 1 && \\ + \$(kubectl --context ${CLUSTER2} -n bookinfo-backends get deploy -o json | jq '[.items[].status.readyReplicas] | add') -eq 5 ]] 2>/dev/null +do + sleep 1 + echo -n . +done" +echo +kubectl --context ${CLUSTER2} -n bookinfo-frontends get pods && kubectl --context ${CLUSTER2} -n bookinfo-backends get pods +cat <<'EOF' > ./test.js +const helpers = require('./tests/chai-exec'); + +describe("Bookinfo app", () => { + let cluster = process.env.CLUSTER1 + let deployments = ["productpage-v1"]; + deployments.forEach(deploy => { + it(deploy + ' pods are ready in ' + cluster, () => helpers.checkDeployment({ context: cluster, namespace: "bookinfo-frontends", k8sObj: deploy })); + }); + deployments = ["ratings-v1", "details-v1", "reviews-v1", "reviews-v2"]; + deployments.forEach(deploy => { + it(deploy + ' pods are ready in ' + cluster, () => helpers.checkDeployment({ context: cluster, namespace: "bookinfo-backends", k8sObj: deploy })); + }); + cluster = process.env.CLUSTER2 + deployments = ["productpage-v1"]; + deployments.forEach(deploy => { + it(deploy + ' pods are ready in ' + cluster, () => helpers.checkDeployment({ context: cluster, namespace: "bookinfo-frontends", k8sObj: deploy })); + }); + deployments = ["ratings-v1", "details-v1", "reviews-v1", "reviews-v2", "reviews-v3"]; + deployments.forEach(deploy => { + it(deploy + ' pods are ready in ' + cluster, () => helpers.checkDeployment({ context: cluster, namespace: "bookinfo-backends", k8sObj: deploy })); + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/bookinfo/deploy-bookinfo/tests/check-bookinfo.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl --context ${CLUSTER1} create ns httpbin +kubectl --context ${CLUSTER1} label namespace httpbin istio.io/dataplane-mode=ambient +kubectl --context ${CLUSTER1} label namespace httpbin istio.io/rev=1-23 +kubectl apply --context ${CLUSTER1} -f - </dev/null +do + sleep 1 + echo -n . +done" +echo +kubectl apply --context ${CLUSTER1} -f - < ./test.js +const helpers = require('./tests/chai-exec'); + +describe("httpbin app", () => { + let cluster = process.env.CLUSTER1 + + let deployments = ["not-in-mesh", "in-mesh"]; + + deployments.forEach(deploy => { + it(deploy + ' pods are ready in ' + cluster, () => helpers.checkDeployment({ context: cluster, namespace: "httpbin", k8sObj: deploy })); + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/httpbin/deploy-httpbin/tests/check-httpbin.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl --context ${CLUSTER1} create ns clients + +kubectl apply --context ${CLUSTER1} -f - </dev/null +do + sleep 1 + echo -n . +done" +echo +kubectl apply --context ${CLUSTER1} -f - < ./test.js +const helpers = require('./tests/chai-exec'); + +describe("client apps", () => { + let cluster = process.env.CLUSTER1 + + let deployments = ["not-in-mesh", "in-mesh-with-sidecar", "in-ambient"]; + + deployments.forEach(deploy => { + it(deploy + ' pods are ready in ' + cluster, () => helpers.checkDeployment({ context: cluster, namespace: "clients", k8sObj: deploy })); + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/clients/deploy-clients/tests/check-clients.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl apply --context ${CLUSTER1} -f - < ./test.js +const helpers = require('./tests/chai-http'); + +describe("productpage is available (HTTP)", () => { + it('/productpage is available in cluster1', () => helpers.checkURL({ host: `http://cluster1-bookinfo.example.com`, path: '/productpage', retCode: 200 })); +}) +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/bookinfo/gateway-expose-istio/tests/productpage-available.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +openssl req -x509 -nodes -days 365 -newkey rsa:2048 \ + -keyout tls.key -out tls.crt -subj "/CN=*" +kubectl --context ${CLUSTER1} -n istio-gateways create secret generic tls-secret \ +--from-file=tls.key=tls.key \ +--from-file=tls.crt=tls.crt + +kubectl --context ${CLUSTER2} -n istio-gateways create secret generic tls-secret \ +--from-file=tls.key=tls.key \ +--from-file=tls.crt=tls.crt +kubectl apply --context ${CLUSTER1} -f - < ./test.js +const helpers = require('./tests/chai-http'); + +describe("productpage is available (HTTPS)", () => { + it('/productpage is available in cluster1', () => helpers.checkURL({ host: `https://cluster1-bookinfo.example.com`, path: '/productpage', retCode: 200 })); +}) +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/bookinfo/gateway-expose-istio/tests/productpage-available-secure.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +cat <<'EOF' > ./test.js +var chai = require('chai'); +var expect = chai.expect; +const helpers = require('./tests/chai-exec'); + +describe("Otel metrics", () => { + it("cluster1 is sending metrics to telemetryGateway", () => { + podName = helpers.getOutputForCommand({ command: "kubectl -n gloo-mesh get pods -l app.kubernetes.io/name=prometheus -o jsonpath='{.items[0].metadata.name}' --context " + process.env.MGMT }).replaceAll("'", ""); + command = helpers.getOutputForCommand({ command: "kubectl --context " + process.env.MGMT + " -n gloo-mesh debug -q -i " + podName + " --image=curlimages/curl -- curl -s http://localhost:9090/api/v1/query?query=istio_requests_total" }).replaceAll("'", ""); + expect(command).to.contain("cluster\":\"cluster1"); + }); +}); + + +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/bookinfo/gateway-expose-istio/tests/otel-metrics.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=150 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +cat <<'EOF' > ./test.js +const helpers = require('./tests/chai-http'); +const puppeteer = require('puppeteer'); +const chai = require('chai'); +const expect = chai.expect; +const GraphPage = require('./tests/pages/gloo-ui/graph-page'); +const { recognizeTextFromScreenshot } = require('./tests/utils/image-ocr-processor'); +const { enhanceBrowser } = require('./tests/utils/enhance-browser'); + +afterEach(function (done) { + if (this.currentTest.currentRetry() > 0) { + process.stdout.write("."); + setTimeout(done, 4000); + } else { + done(); + } +}); + +describe("graph page", function () { + // UI tests often require a longer timeout. + // So here we force it to a minimum of 30 seconds. + const currentTimeout = this.timeout(); + this.timeout(Math.max(currentTimeout, 30000)); + + let browser; + let page; + let graphPage; + + beforeEach(async function () { + browser = await puppeteer.launch({ + headless: "new", + slowMo: 40, + ignoreHTTPSErrors: true, + args: ['--no-sandbox', '--disable-setuid-sandbox'], + }); + browser = enhanceBrowser(browser, this.currentTest.title); + page = await browser.newPage(); + graphPage = new GraphPage(page); + await Promise.all(Array.from({ length: 20 }, () => + helpers.checkURL({ host: `https://cluster1-bookinfo.example.com`, path: '/productpage', retCode: 200 }))); + }); + + afterEach(async function () { + await browser.close(); + }); + + it("should show ingress gateway and product page", async function () { + await graphPage.navigateTo(`http://${process.env.ENDPOINT_GLOO_MESH_UI}/graph`); + + // Select the clusters and namespaces so that the graph shows + await graphPage.selectClusters(['cluster1', 'cluster2']); + await graphPage.selectNamespaces(['istio-gateways', 'bookinfo-backends', 'bookinfo-frontends']); + // Disabling Cilium nodes due to this issue: https://github.com/solo-io/gloo-mesh-enterprise/issues/18623 + await graphPage.toggleLayoutSettings(); + await graphPage.disableCiliumNodes(); + await graphPage.toggleLayoutSettings(); + + // Capture a screenshot of the canvas and run text recognition + await graphPage.fullscreenGraph(); + await graphPage.centerGraph(); + const screenshotPath = 'ui-test-data/canvas.png'; + await graphPage.captureCanvasScreenshot(screenshotPath); + + const recognizedTexts = await recognizeTextFromScreenshot( + screenshotPath, + ["istio-ingressgateway", "productpage-v1", "details-v1", "ratings-v1", "reviews-v1", "reviews-v2"]); + + const flattenedRecognizedText = recognizedTexts.join(",").replace(/\n/g, ''); + console.log("Flattened recognized text:", flattenedRecognizedText); + + // Validate recognized texts + expect(flattenedRecognizedText).to.include("istio-ingressgateway"); + expect(flattenedRecognizedText).to.include("productpage-v1"); + expect(flattenedRecognizedText).to.include("details-v1"); + expect(flattenedRecognizedText).to.include("ratings-v1"); + expect(flattenedRecognizedText).to.include("reviews-v1"); + expect(flattenedRecognizedText).to.include("reviews-v2"); + }); +}); + +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/bookinfo/gateway-expose-istio/tests/graph-shows-traffic.test.js.liquid" +timeout --signal=INT 7m mocha ./test.js --timeout 120000 --retries=3 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl --context ${CLUSTER1} apply -f - < ./test.js +const helpers = require('./tests/chai-http'); + +afterEach(function (done) { + if (this.currentTest.currentRetry() > 0) { + process.stdout.write("."); + setTimeout(done, 4000); + } else { + done(); + } +}); + +describe("Productpage is available (HTTPS)", () => { + it('/productpage is available in cluster1', () => helpers.checkURL({ host: `https://cluster1-bookinfo.example.com`, path: '/productpage', retCode: 200 })); + + it('should reject traffic to bookinfo-backends details', () => { + return helpers.checkBody({ + host: `https://cluster1-bookinfo.example.com`, + path: '/productpage', + retCode: 200, + body: 'Error fetching product details', + match: true + }) + }); + + it('should reject traffic to bookinfo-backends reviews', () => { + return helpers.checkBody({ + host: `https://cluster1-bookinfo.example.com`, + path: '/productpage', + retCode: 200, + body: 'Error fetching product reviews', + match: true + }) + }); +}) +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/ambient/authorization-policies/tests/bookinfo-backend-services-unavailable.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 60000 --retries=60 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl --context ${CLUSTER1} apply -f - < ./test.js +const helpers = require('./tests/chai-http'); + +afterEach(function (done) { + if (this.currentTest.currentRetry() > 0) { + process.stdout.write("."); + setTimeout(done, 4000); + } else { + done(); + } +}); + +describe("Productpage is available (HTTPS)", () => { + it('/productpage is available in cluster1', () => helpers.checkURL({ host: `https://cluster1-bookinfo.example.com`, path: '/productpage', retCode: 200 })); + + it('should admit traffic to bookinfo-backends details', () => { + return helpers.checkBody({ + host: `https://cluster1-bookinfo.example.com`, + path: '/productpage', + retCode: 200, + body: 'Book Details', + match: true + }) + }); + + it('should admit traffic to bookinfo-backends reviews', () => { + return helpers.checkBody({ + host: `https://cluster1-bookinfo.example.com`, + path: '/productpage', + retCode: 200, + body: 'Book Reviews', + match: true + }) + }); +}) +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/ambient/authorization-policies/tests/bookinfo-backend-services-available.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 60000 --retries=60 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl --context ${CLUSTER1} apply -f - < ./test.js +const helpers = require('./tests/chai-http'); + +afterEach(function (done) { + if (this.currentTest.currentRetry() > 0) { + process.stdout.write("."); + setTimeout(done, 4000); + } else { + done(); + } +}); + +describe("Productpage is available (HTTPS)", () => { + it('/productpage is available in cluster1', () => helpers.checkURL({ host: `https://cluster1-bookinfo.example.com`, path: '/productpage', retCode: 200 })); + + it('should reject traffic to bookinfo-backends details', () => { + return helpers.checkBody({ + host: `https://cluster1-bookinfo.example.com`, + path: '/productpage', + retCode: 200, + body: 'Error fetching product details', + match: true + }) + }); + + it('should reject traffic to bookinfo-backends reviews', () => { + return helpers.checkBody({ + host: `https://cluster1-bookinfo.example.com`, + path: '/productpage', + retCode: 200, + body: 'Error fetching product reviews', + match: true + }) + }); +}) +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/ambient/authorization-policies/tests/bookinfo-backend-services-unavailable.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 60000 --retries=60 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl --context ${CLUSTER1} apply -f - < ./test.js +const helpers = require('./tests/chai-http'); + +afterEach(function (done) { + if (this.currentTest.currentRetry() > 0) { + process.stdout.write("."); + setTimeout(done, 4000); + } else { + done(); + } +}); + +describe("Productpage is available (HTTPS)", () => { + it('/productpage is available in cluster1', () => helpers.checkURL({ host: `https://cluster1-bookinfo.example.com`, path: '/productpage', retCode: 200 })); + + it('should admit traffic to bookinfo-backends details', () => { + return helpers.checkBody({ + host: `https://cluster1-bookinfo.example.com`, + path: '/productpage', + retCode: 200, + body: 'Book Details', + match: true + }) + }); + + it('should admit traffic to bookinfo-backends reviews', () => { + return helpers.checkBody({ + host: `https://cluster1-bookinfo.example.com`, + path: '/productpage', + retCode: 200, + body: 'Book Reviews', + match: true + }) + }); +}) +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/ambient/authorization-policies/tests/bookinfo-backend-services-available.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 60000 --retries=60 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl --context ${CLUSTER1} -n bookinfo-backends delete authorizationpolicy policy +for i in {1..20}; do curl -k "http://cluster1-bookinfo.example.com/productpage" -I; done +kubectl --context ${CLUSTER1} debug -n istio-system "$pod" -it --image=curlimages/curl -- curl http://localhost:15020/metrics | grep istio_request_ +cat <<'EOF' > ./test.js +const chaiExec = require("@jsdevtools/chai-exec"); +var chai = require('chai'); +var expect = chai.expect; +chai.use(chaiExec); + +afterEach(function (done) { + if (this.currentTest.currentRetry() > 0) { + process.stdout.write("."); + setTimeout(done, 1000); + } else { + done(); + } +}); + +describe("L4 metrics available", function() { + it("ztunnel contains L4 and l7 metrics", () => { + let node = chaiExec(`kubectl --context ${process.env.CLUSTER1} -n bookinfo-frontends get pods -l app=productpage -o jsonpath='{.items[0].spec.nodeName}'`).stdout.replaceAll("'", ""); + let pods = JSON.parse(chaiExec(`kubectl --context ${process.env.CLUSTER1} -n istio-system get pods -l app=ztunnel -o json`).stdout).items; + let pod = ""; + pods.forEach(item => { + if(item.spec.nodeName == node) { + pod = item.metadata.name; + } + }); + let cli = chaiExec(`kubectl --context ${process.env.CLUSTER1} -n istio-system debug ${pod} -it --image=curlimages/curl -- curl http://localhost:15020/metrics`); + expect(cli).to.exit.with.code(0); + expect(cli).output.to.contain("istio_tcp_sent_bytes_total"); + expect(cli).output.to.contain("istio_requests_total"); + expect(cli).output.to.contain("istio_request_duration_milliseconds"); + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/ambient/l7-observability/tests/l4-l7-metrics-available.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl --context "${CLUSTER1}" -n istio-system logs ds/ztunnel +cat <<'EOF' > ./test.js +const helpersHttp = require('./tests/chai-http'); +const InsightsPage = require('./tests/pages/insights-page'); +const constants = require('./tests/pages/constants'); +const puppeteer = require('puppeteer'); +var chai = require('chai'); +var expect = chai.expect; +const { enhanceBrowser } = require('./tests/utils/enhance-browser'); + +afterEach(function (done) { + if (this.currentTest.currentRetry() > 0) { + process.stdout.write("."); + setTimeout(done, 4000); + } else { + done(); + } +}); + +describe("Insights UI", function() { + // UI tests often require a longer timeout. + // So here we force it to a minimum of 30 seconds. + const currentTimeout = this.timeout(); + this.timeout(Math.max(currentTimeout, 30000)); + + let browser; + let insightsPage; + + // Use Mocha's 'before' hook to set up Puppeteer + beforeEach(async function() { + browser = await puppeteer.launch({ + headless: "new", + slowMo: 40, + ignoreHTTPSErrors: true, + args: ['--no-sandbox', '--disable-setuid-sandbox'], + }); + browser = enhanceBrowser(browser, this.currentTest.title); + let page = await browser.newPage(); + insightsPage = new InsightsPage(page); + }); + + // Use Mocha's 'after' hook to close Puppeteer + afterEach(async function() { + await browser.close(); + }); + + it("should displays BP0001 warning with text 'Globally scoped routing'", async () => { + await insightsPage.navigateTo(`http://${process.env.ENDPOINT_GLOO_MESH_UI}/insights`); + await insightsPage.selectClusters(['cluster1', 'cluster2']); + await insightsPage.selectInsightTypes([constants.InsightType.BP]); + const data = await insightsPage.getTableDataRows() + expect(data.some(item => item.includes("Globally scoped routing"))).to.be.true; + }); + + it("should have quick resource state filters", async () => { + await insightsPage.navigateTo(`http://${process.env.ENDPOINT_GLOO_MESH_UI}/insights`); + const healthy = await insightsPage.getHealthyResourcesCount(); + const warning = await insightsPage.getWarningResourcesCount(); + const error = await insightsPage.getErrorResourcesCount(); + expect(healthy).to.be.greaterThan(0); + expect(warning).to.be.greaterThan(0); + expect(error).to.be.a('number'); + }); +}); + +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/bookinfo/insights-intro/tests/insight-ui-BP0001.test.js.liquid" +timeout --signal=INT 5m mocha ./test.js --timeout 120000 --retries=20 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +cat <<'EOF' > ./test.js +var chai = require('chai'); +var expect = chai.expect; +const helpers = require('./tests/chai-exec'); + +describe("Insight generation", () => { + it("Insight BP0002 has been triggered in the source (MGMT)", () => { + helpers.getOutputForCommand({ command: `kubectl --context ${process.env.MGMT} -n gloo-mesh patch svc gloo-mesh-mgmt-server -p '{"spec":{"ports": [{"port": 9094,"name":"http-insights"}]}}'` }); + helpers.getOutputForCommand({ command: "kubectl -n gloo-mesh run debug --image=nginx:1.25.3 --context " + process.env.MGMT }); + command = helpers.getOutputForCommand({ command: "kubectl --context " + process.env.MGMT + " -n gloo-mesh exec debug -- curl -s http://gloo-mesh-mgmt-server.gloo-mesh:9094/metrics" }).replaceAll("'", ""); + const regex = /gloo_mesh_insights{.*BP0002.*} 1/; + const match = command.match(regex); + expect(match).to.not.be.null; + }); + + it("Insight BP0002 has been triggered in PROMETHEUS", () => { + helpers.getOutputForCommand({ command: `kubectl --context ${process.env.MGMT} -n gloo-mesh patch svc prometheus-server -p '{"spec":{"ports": [{"port": 9090,"name":"http-metrics"}]}}'` }); + command = helpers.getOutputForCommand({ command: "kubectl --context " + process.env.MGMT + " -n gloo-mesh exec debug -- curl -s 'http://prometheus-server.gloo-mesh:9090/api/v1/query?query=gloo_mesh_insights'" }).replaceAll("'", ""); + let result = JSON.parse(command); + let active = false; + result.data.result.forEach(item => { + if(item.metric.code == "BP0002" && item.value[1] > 0) { + active = true + } + }); + expect(active).to.be.true; + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/bookinfo/insights-intro/tests/insight-metrics.test.js.liquid" +timeout --signal=INT 5m mocha ./test.js --timeout 120000 --retries=20 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl apply --context ${MGMT} -f - < ./test.js +const helpersHttp = require('./tests/chai-http'); +const InsightsPage = require('./tests/pages/insights-page'); +const constants = require('./tests/pages/constants'); +const puppeteer = require('puppeteer'); +const { enhanceBrowser } = require('./tests/utils/enhance-browser'); +var chai = require('chai'); +var expect = chai.expect; + +afterEach(function (done) { + if (this.currentTest.currentRetry() > 0) { + process.stdout.write("."); + setTimeout(done, 4000); + } else { + done(); + } +}); + +describe("Insights UI", function() { + // UI tests often require a longer timeout. + // So here we force it to a minimum of 30 seconds. + const currentTimeout = this.timeout(); + this.timeout(Math.max(currentTimeout, 30000)); + + let browser; + let insightsPage; + + // Use Mocha's 'before' hook to set up Puppeteer + beforeEach(async function() { + browser = await puppeteer.launch({ + headless: "new", + slowMo: 40, + ignoreHTTPSErrors: true, + args: ['--no-sandbox', '--disable-setuid-sandbox'], + }); + browser = enhanceBrowser(browser, this.currentTest.title); + let page = await browser.newPage(); + await page.setViewport({ width: 1500, height: 1000 }); + insightsPage = new InsightsPage(page); + }); + + // Use Mocha's 'after' hook to close Puppeteer + afterEach(async function() { + await browser.close(); + }); + + it("should not display BP0002 in the UI", async () => { + await insightsPage.navigateTo(`http://${process.env.ENDPOINT_GLOO_MESH_UI}/insights`); + await insightsPage.selectClusters(['cluster1', 'cluster2']); + await insightsPage.selectInsightTypes([constants.InsightType.BP]); + const data = await insightsPage.getTableDataRows() + expect(data.some(item => item.includes("is not namespaced"))).to.be.false; + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/bookinfo/insights-intro/tests/insight-not-ui-BP0002.test.js.liquid" +timeout --signal=INT 5m mocha ./test.js --timeout 120000 --retries=20 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl apply --context ${CLUSTER1} -f - < ./test.js +const helpersHttp = require('./tests/chai-http'); +const InsightsPage = require('./tests/pages/insights-page'); +const constants = require('./tests/pages/constants'); +const puppeteer = require('puppeteer'); +const { enhanceBrowser } = require('./tests/utils/enhance-browser'); +var chai = require('chai'); +var expect = chai.expect; + +afterEach(function (done) { + if (this.currentTest.currentRetry() > 0) { + process.stdout.write("."); + setTimeout(done, 4000); + } else { + done(); + } +}); + +describe("Insights UI", function() { + // UI tests often require a longer timeout. + // So here we force it to a minimum of 30 seconds. + const currentTimeout = this.timeout(); + this.timeout(Math.max(currentTimeout, 30000)); + + let browser; + let insightsPage; + + // Use Mocha's 'before' hook to set up Puppeteer + beforeEach(async function() { + browser = await puppeteer.launch({ + headless: "new", + slowMo: 40, + ignoreHTTPSErrors: true, + args: ['--no-sandbox', '--disable-setuid-sandbox'], + }); + browser = enhanceBrowser(browser, this.currentTest.title); + let page = await browser.newPage(); + await page.setViewport({ width: 1500, height: 1000 }); + insightsPage = new InsightsPage(page); + }); + + // Use Mocha's 'after' hook to close Puppeteer + afterEach(async function() { + await browser.close(); + }); + + it("should not display BP0001 in the UI", async () => { + await insightsPage.navigateTo(`http://${process.env.ENDPOINT_GLOO_MESH_UI}/insights`); + await insightsPage.selectClusters(['cluster1', 'cluster2']); + await insightsPage.selectInsightTypes([constants.InsightType.BP]); + const data = await insightsPage.getTableDataRows() + expect(data.some(item => item.includes("is not namespaced"))).to.be.false; + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/bookinfo/insights-intro/tests/insight-not-ui-BP0001.test.js.liquid" +timeout --signal=INT 5m mocha ./test.js --timeout 120000 --retries=20 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl apply --context ${CLUSTER1} -f - < ./test.js +var chai = require('chai'); +var expect = chai.expect; +const helpers = require('./tests/chai-exec'); + +describe("Insight generation", () => { + it("Insight CFG0001 has been triggered in the source (MGMT)", () => { + helpers.getOutputForCommand({ command: `kubectl --context ${process.env.MGMT} -n gloo-mesh patch svc gloo-mesh-mgmt-server -p '{"spec":{"ports": [{"port": 9094,"name":"http-insights"}]}}'` }); + helpers.getOutputForCommand({ command: "kubectl -n gloo-mesh run debug --image=nginx: --context " + process.env.MGMT }); + command = helpers.getOutputForCommand({ command: "kubectl --context " + process.env.MGMT + " -n gloo-mesh exec debug -- curl -s http://gloo-mesh-mgmt-server.gloo-mesh:9094/metrics" }).replaceAll("'", ""); + const regex = /gloo_mesh_insights{.*CFG0001.*} 1/; + const match = command.match(regex); + expect(match).to.not.be.null; + }); + + it("Insight CFG0001 has been triggered in PROMETHEUS", () => { + helpers.getOutputForCommand({ command: `kubectl --context ${process.env.MGMT} -n gloo-mesh patch svc prometheus-server -p '{"spec":{"ports": [{"port": 9090,"name":"http-metrics"}]}}'` }); + command = helpers.getOutputForCommand({ command: "kubectl --context " + process.env.MGMT + " -n gloo-mesh exec debug -- curl -s 'http://prometheus-server.gloo-mesh:9090/api/v1/query?query=gloo_mesh_insights'" }).replaceAll("'", ""); + let result = JSON.parse(command); + let active = false; + result.data.result.forEach(item => { + if(item.metric.code == "CFG0001" && item.value[1] > 0) { + active = true + } + }); + expect(active).to.be.true; + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/bookinfo/insights-config/../insights-intro/tests/insight-metrics.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl apply --context ${CLUSTER1} -f - < ./test.js +var chai = require('chai'); +var expect = chai.expect; +const helpers = require('./tests/chai-exec'); + +describe("Insight generation", () => { + it("Insight CFG0001 has not been triggered in the source (MGMT)", () => { + helpers.getOutputForCommand({ command: `kubectl --context ${process.env.MGMT} -n gloo-mesh patch svc gloo-mesh-mgmt-server -p '{"spec":{"ports": [{"port": 9094,"name":"http-insights"}]}}'` }); + helpers.getOutputForCommand({ command: "kubectl -n gloo-mesh run debug --image=nginx: --context " + process.env.MGMT }); + command = helpers.getOutputForCommand({ command: "kubectl --context " + process.env.MGMT + " -n gloo-mesh exec debug -- curl -s http://gloo-mesh-mgmt-server.gloo-mesh:9094/metrics" }).replaceAll("'", ""); + const regex = /gloo_mesh_insights{.*CFG0001.*} 1/; + const match = command.match(regex); + expect(match).to.be.null; + }); + + it("Insight CFG0001 has not been triggered in PROMETHEUS", () => { + helpers.getOutputForCommand({ command: `kubectl --context ${process.env.MGMT} -n gloo-mesh patch svc prometheus-server -p '{"spec":{"ports": [{"port": 9090,"name":"http-metrics"}]}}'` }); + command = helpers.getOutputForCommand({ command: "kubectl --context " + process.env.MGMT + " -n gloo-mesh exec debug -- curl -s 'http://prometheus-server.gloo-mesh:9090/api/v1/query?query=gloo_mesh_insights'" }).replaceAll("'", ""); + let result = JSON.parse(command); + let active = false; + result.data.result.forEach(item => { + if(item.metric.code == "CFG0001" && item.value[1] > 0) { + active = true + } + }); + expect(active).to.be.false; + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/bookinfo/insights-config/../insights-intro/tests/insight-metrics.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl --context ${CLUSTER1} -n bookinfo-backends delete virtualservice reviews +kubectl --context ${CLUSTER1} -n bookinfo-backends delete destinationrule reviews +kubectl apply --context ${CLUSTER1} -f - < ./test.js +var chai = require('chai'); +var expect = chai.expect; +const helpers = require('./tests/chai-exec'); + +describe("Insight generation", () => { + it("Insight SEC0008 has been triggered in the source (MGMT)", () => { + helpers.getOutputForCommand({ command: `kubectl --context ${process.env.MGMT} -n gloo-mesh patch svc gloo-mesh-mgmt-server -p '{"spec":{"ports": [{"port": 9094,"name":"http-insights"}]}}'` }); + helpers.getOutputForCommand({ command: "kubectl -n gloo-mesh run debug --image=nginx: --context " + process.env.MGMT }); + command = helpers.getOutputForCommand({ command: "kubectl --context " + process.env.MGMT + " -n gloo-mesh exec debug -- curl -s http://gloo-mesh-mgmt-server.gloo-mesh:9094/metrics" }).replaceAll("'", ""); + const regex = /gloo_mesh_insights{.*SEC0008.*} 1/; + const match = command.match(regex); + expect(match).to.not.be.null; + }); + + it("Insight SEC0008 has been triggered in PROMETHEUS", () => { + helpers.getOutputForCommand({ command: `kubectl --context ${process.env.MGMT} -n gloo-mesh patch svc prometheus-server -p '{"spec":{"ports": [{"port": 9090,"name":"http-metrics"}]}}'` }); + command = helpers.getOutputForCommand({ command: "kubectl --context " + process.env.MGMT + " -n gloo-mesh exec debug -- curl -s 'http://prometheus-server.gloo-mesh:9090/api/v1/query?query=gloo_mesh_insights'" }).replaceAll("'", ""); + let result = JSON.parse(command); + let active = false; + result.data.result.forEach(item => { + if(item.metric.code == "SEC0008" && item.value[1] > 0) { + active = true + } + }); + expect(active).to.be.true; + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/bookinfo/insights-security/../insights-intro/tests/insight-metrics.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl apply --context ${CLUSTER1} -f - < ./test.js +var chai = require('chai'); +var expect = chai.expect; +const helpers = require('./tests/chai-exec'); + +describe("Insight generation", () => { + it("Insight SEC0008 has not been triggered in the source (MGMT)", () => { + helpers.getOutputForCommand({ command: `kubectl --context ${process.env.MGMT} -n gloo-mesh patch svc gloo-mesh-mgmt-server -p '{"spec":{"ports": [{"port": 9094,"name":"http-insights"}]}}'` }); + helpers.getOutputForCommand({ command: "kubectl -n gloo-mesh run debug --image=nginx: --context " + process.env.MGMT }); + command = helpers.getOutputForCommand({ command: "kubectl --context " + process.env.MGMT + " -n gloo-mesh exec debug -- curl -s http://gloo-mesh-mgmt-server.gloo-mesh:9094/metrics" }).replaceAll("'", ""); + const regex = /gloo_mesh_insights{.*SEC0008.*} 1/; + const match = command.match(regex); + expect(match).to.be.null; + }); + + it("Insight SEC0008 has not been triggered in PROMETHEUS", () => { + helpers.getOutputForCommand({ command: `kubectl --context ${process.env.MGMT} -n gloo-mesh patch svc prometheus-server -p '{"spec":{"ports": [{"port": 9090,"name":"http-metrics"}]}}'` }); + command = helpers.getOutputForCommand({ command: "kubectl --context " + process.env.MGMT + " -n gloo-mesh exec debug -- curl -s 'http://prometheus-server.gloo-mesh:9090/api/v1/query?query=gloo_mesh_insights'" }).replaceAll("'", ""); + let result = JSON.parse(command); + let active = false; + result.data.result.forEach(item => { + if(item.metric.code == "SEC0008" && item.value[1] > 0) { + active = true + } + }); + expect(active).to.be.false; + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/apps/bookinfo/insights-security/../insights-intro/tests/insight-metrics.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl --context ${CLUSTER1} -n bookinfo-backends delete authorizationpolicy reviews +kubectl --context ${CLUSTER1} -n istio-system delete peerauthentication default +helm upgrade --install istio-base oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/base \ +--namespace istio-system \ +--kube-context=${CLUSTER1} \ +--version 1.23.0-patch1-solo \ +--create-namespace \ +-f - </istiod \ +--namespace istio-system \ +--kube-context=${CLUSTER1} \ +--version 1.23.0-patch1-solo \ +--create-namespace \ +-f - < + proxy: + clusterDomain: cluster.local + tag: 1.23.0-patch1-solo + multiCluster: + clusterName: cluster1 +profile: ambient +revision: 1-23-0-patch1 +istio_cni: + enabled: true +meshConfig: + accessLogFile: /dev/stdout + defaultConfig: + proxyMetadata: + ISTIO_META_DNS_AUTO_ALLOCATE: "true" + ISTIO_META_DNS_CAPTURE: "true" + trustDomain: cluster1 +pilot: + enabled: true + env: + PILOT_ENABLE_IP_AUTOALLOCATE: "true" + PILOT_ENABLE_K8S_SELECT_WORKLOAD_ENTRIES: "false" + PILOT_SKIP_VALIDATE_TRUST_DOMAIN: "true" +EOF + +helm upgrade --install istio-cni oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/cni \ +--namespace kube-system \ +--kube-context=${CLUSTER1} \ +--version 1.23.0-patch1-solo \ +--create-namespace \ +-f - < + proxy: 1.23.0-patch1-solo +profile: ambient +revision: 1-23-0-patch1 +cni: + ambient: + dnsCapture: true + excludeNamespaces: + - istio-system + - kube-system +EOF + +helm upgrade --install ztunnel oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/ztunnel \ +--namespace istio-system \ +--kube-context=${CLUSTER1} \ +--version 1.23.0-patch1-solo \ +--create-namespace \ +-f - < +istioNamespace: istio-system +multiCluster: + clusterName: cluster1 +namespace: istio-system +profile: ambient +proxy: + clusterDomain: cluster.local +tag: 1.23.0-patch1-solo +terminationGracePeriodSeconds: 29 +variant: distroless +EOF + +helm upgrade --install istio-ingressgateway-1-23-0-patch1 oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/gateway \ +--namespace istio-gateways \ +--kube-context=${CLUSTER1} \ +--version 1.23.0-patch1-solo \ +--create-namespace \ +-f - </gateway \ +--namespace istio-gateways \ +--kube-context=${CLUSTER1} \ +--version 1.23.0-patch1-solo \ +--create-namespace \ +-f - </base \ +--namespace istio-system \ +--kube-context=${CLUSTER2} \ +--version 1.23.0-patch1-solo \ +--create-namespace \ +-f - </istiod \ +--namespace istio-system \ +--kube-context=${CLUSTER2} \ +--version 1.23.0-patch1-solo \ +--create-namespace \ +-f - < + proxy: + clusterDomain: cluster.local + tag: 1.23.0-patch1-solo + multiCluster: + clusterName: cluster2 +profile: ambient +revision: 1-23-0-patch1 +istio_cni: + enabled: true +meshConfig: + accessLogFile: /dev/stdout + defaultConfig: + proxyMetadata: + ISTIO_META_DNS_AUTO_ALLOCATE: "true" + ISTIO_META_DNS_CAPTURE: "true" + trustDomain: cluster2 +pilot: + enabled: true + env: + PILOT_ENABLE_IP_AUTOALLOCATE: "true" + PILOT_ENABLE_K8S_SELECT_WORKLOAD_ENTRIES: "false" + PILOT_SKIP_VALIDATE_TRUST_DOMAIN: "true" +EOF + +helm upgrade --install istio-cni oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/cni \ +--namespace kube-system \ +--kube-context=${CLUSTER2} \ +--version 1.23.0-patch1-solo \ +--create-namespace \ +-f - < + proxy: 1.23.0-patch1-solo +profile: ambient +revision: 1-23-0-patch1 +cni: + ambient: + dnsCapture: true + excludeNamespaces: + - istio-system + - kube-system +EOF + +helm upgrade --install ztunnel oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/ztunnel \ +--namespace istio-system \ +--kube-context=${CLUSTER2} \ +--version 1.23.0-patch1-solo \ +--create-namespace \ +-f - < +istioNamespace: istio-system +multiCluster: + clusterName: cluster2 +namespace: istio-system +profile: ambient +proxy: + clusterDomain: cluster.local +tag: 1.23.0-patch1-solo +terminationGracePeriodSeconds: 29 +variant: distroless +EOF + +helm upgrade --install istio-ingressgateway-1-23-0-patch1 oci://us-docker.pkg.dev/gloo-mesh/istio-helm-/gateway \ +--namespace istio-gateways \ +--kube-context=${CLUSTER2} \ +--version 1.23.0-patch1-solo \ +--create-namespace \ +-f - </gateway \ +--namespace istio-gateways \ +--kube-context=${CLUSTER2} \ +--version 1.23.0-patch1-solo \ +--create-namespace \ +-f - < ./test.js + +const helpers = require('./tests/chai-exec'); + +const chaiExec = require("@jsdevtools/chai-exec"); +const helpersHttp = require('./tests/chai-http'); +const chai = require("chai"); +const expect = chai.expect; + +afterEach(function (done) { + if (this.currentTest.currentRetry() > 0) { + process.stdout.write("."); + setTimeout(done, 1000); + } else { + done(); + } +}); + +describe("Checking Istio installation", function() { + it('istiod pods are ready in cluster ' + process.env.CLUSTER1, () => helpers.checkDeploymentsWithLabels({ context: process.env.CLUSTER1, namespace: "istio-system", labels: "app=istiod", instances: 2 })); + it('gateway pods are ready in cluster ' + process.env.CLUSTER1, () => helpers.checkDeploymentsWithLabels({ context: process.env.CLUSTER1, namespace: "istio-gateways", labels: "app=istio-ingressgateway", instances: 4 })); + it('istiod pods are ready in cluster ' + process.env.CLUSTER2, () => helpers.checkDeploymentsWithLabels({ context: process.env.CLUSTER2, namespace: "istio-system", labels: "app=istiod", instances: 2 })); + it('gateway pods are ready in cluster ' + process.env.CLUSTER2, () => helpers.checkDeploymentsWithLabels({ context: process.env.CLUSTER2, namespace: "istio-gateways", labels: "app=istio-ingressgateway", instances: 4 })); + it("Gateways have an ip attached in cluster " + process.env.CLUSTER1, () => { + let cli = chaiExec("kubectl --context " + process.env.CLUSTER1 + " -n istio-gateways get svc -l app=istio-ingressgateway -o jsonpath='{.items}'"); + cli.stderr.should.be.empty; + let deployments = JSON.parse(cli.stdout.slice(1,-1)); + expect(deployments).to.have.lengthOf(2); + deployments.forEach((deployment) => { + expect(deployment.status.loadBalancer).to.have.property("ingress"); + }); + }); + it("Gateways have an ip attached in cluster " + process.env.CLUSTER2, () => { + let cli = chaiExec("kubectl --context " + process.env.CLUSTER2 + " -n istio-gateways get svc -l app=istio-ingressgateway -o jsonpath='{.items}'"); + cli.stderr.should.be.empty; + let deployments = JSON.parse(cli.stdout.slice(1,-1)); + expect(deployments).to.have.lengthOf(2); + deployments.forEach((deployment) => { + expect(deployment.status.loadBalancer).to.have.property("ingress"); + }); + }); +}); + +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/deploy-istio-helm/tests/istio-ready.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +cat <<'EOF' > ./test.js +const dns = require('dns'); +const chaiHttp = require("chai-http"); +const chai = require("chai"); +const expect = chai.expect; +chai.use(chaiHttp); +const { waitOnFailedTest } = require('./tests/utils'); + +afterEach(function(done) { waitOnFailedTest(done, this.currentTest.currentRetry())}); + +describe("Address '" + process.env.HOST_GW_CLUSTER1 + "' can be resolved in DNS", () => { + it(process.env.HOST_GW_CLUSTER1 + ' can be resolved', (done) => { + return dns.lookup(process.env.HOST_GW_CLUSTER1, (err, address, family) => { + expect(address).to.be.an.ip; + done(); + }); + }); +}); +EOF +echo "executing test ./default/tests/can-resolve.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +cat <<'EOF' > ./test.js +const dns = require('dns'); +const chaiHttp = require("chai-http"); +const chai = require("chai"); +const expect = chai.expect; +chai.use(chaiHttp); +const { waitOnFailedTest } = require('./tests/utils'); + +afterEach(function(done) { waitOnFailedTest(done, this.currentTest.currentRetry())}); + +describe("Address '" + process.env.HOST_GW_CLUSTER2 + "' can be resolved in DNS", () => { + it(process.env.HOST_GW_CLUSTER2 + ' can be resolved', (done) => { + return dns.lookup(process.env.HOST_GW_CLUSTER2, (err, address, family) => { + expect(address).to.be.an.ip; + done(); + }); + }); +}); +EOF +echo "executing test ./default/tests/can-resolve.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl --context ${CLUSTER1} get ns -l istio.io/rev=1-23 -o json | jq -r '.items[].metadata.name' | while read ns; do + kubectl --context ${CLUSTER1} label ns ${ns} istio.io/rev=1-23-0-patch1 --overwrite +done +kubectl --context ${CLUSTER2} get ns -l istio.io/rev=1-23 -o json | jq -r '.items[].metadata.name' | while read ns; do + kubectl --context ${CLUSTER2} label ns ${ns} istio.io/rev=1-23-0-patch1 --overwrite +done +kubectl --context ${CLUSTER1} -n httpbin patch deploy in-mesh --patch "{\"spec\": {\"template\": {\"metadata\": {\"labels\": {\"istio.io/rev\": \"1-23-0-patch1\" }}}}}" +kubectl --context ${CLUSTER1} -n clients patch deploy in-mesh-with-sidecar --patch "{\"spec\": {\"template\": {\"metadata\": {\"labels\": {\"istio.io/rev\": \"1-23-0-patch1\" }}}}}" +kubectl --context ${CLUSTER1} -n httpbin rollout status deploy in-mesh +curl -k "https:///productpage" -I +cat <<'EOF' > ./test.js +const helpers = require('./tests/chai-http'); + +describe("productpage is accessible", () => { + it('/productpage is available in cluster1', () => helpers.checkURL({ host: `https://cluster1-bookinfo.example.com`, path: '/productpage', retCode: 200 })); +}) + +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/helm-migrate-workloads-to-revision/../deploy-istio-helm/tests/productpage-available.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl --context ${CLUSTER1} -n istio-gateways patch svc istio-ingressgateway --type=json --patch '[{"op": "remove", "path": "/spec/selector/revision"}]' +kubectl --context ${CLUSTER1} -n istio-gateways patch svc istio-eastwestgateway --type=json --patch '[{"op": "remove", "path": "/spec/selector/revision"}]' +kubectl --context ${CLUSTER2} -n istio-gateways patch svc istio-ingressgateway --type=json --patch '[{"op": "remove", "path": "/spec/selector/revision"}]' +kubectl --context ${CLUSTER2} -n istio-gateways patch svc istio-eastwestgateway --type=json --patch '[{"op": "remove", "path": "/spec/selector/revision"}]' +curl -k "https:///productpage" -I +cat <<'EOF' > ./test.js +const helpers = require('./tests/chai-http'); + +describe("productpage is accessible", () => { + it('/productpage is available in cluster1', () => helpers.checkURL({ host: `https://cluster1-bookinfo.example.com`, path: '/productpage', retCode: 200 })); +}) + +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/helm-migrate-workloads-to-revision/../deploy-istio-helm/tests/productpage-available.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +cat <<'EOF' > ./test.js +const helpers = require('./tests/chai-http'); + +describe("productpage is accessible", () => { + it('/productpage is available in cluster1', () => helpers.checkURL({ host: `https://cluster1-bookinfo.example.com`, path: '/productpage', retCode: 200 })); +}) + +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/helm-migrate-workloads-to-revision/../deploy-istio-helm/tests/productpage-available.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +cat <<'EOF' > ./test.js +const chaiExec = require("@jsdevtools/chai-exec"); +const chai = require("chai"); +var expect = chai.expect; + +afterEach(function (done) { + if (this.currentTest.currentRetry() > 0) { + process.stdout.write("."); + setTimeout(done, 1000); + } else { + done(); + } +}); + +describe("istio in place upgrades", function() { + const cluster1 = process.env.CLUSTER1; + it("should upgrade waypoints", () => { + let cli = chaiExec(`sh -c "istioctl --context ${cluster1} ps | grep waypoint"`); + expect(cli.stdout).to.contain("1.23.0-patch1"); + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/helm-migrate-workloads-to-revision/tests/waypoint-upgraded.test.js.liquid" +timeout --signal=INT 1m mocha ./test.js --timeout 10000 --retries=60 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +helm uninstall istio-ingressgateway-1-23 \ +--namespace istio-gateways \ +--kube-context=${CLUSTER1} + +helm uninstall istio-eastwestgateway-1-23 \ +--namespace istio-gateways \ +--kube-context=${CLUSTER1} + +helm uninstall istio-ingressgateway-1-23 \ +--namespace istio-gateways \ +--kube-context=${CLUSTER2} + +helm uninstall istio-eastwestgateway-1-23 \ +--namespace istio-gateways \ +--kube-context=${CLUSTER2} +kubectl --context ${CLUSTER1} -n istio-system get pods +kubectl --context ${CLUSTER2} -n istio-system get pods +kubectl --context ${CLUSTER1} -n istio-gateways get pods +kubectl --context ${CLUSTER2} -n istio-gateways get pods +ATTEMPTS=1 +until [[ $(kubectl --context ${CLUSTER1} -n istio-gateways get pods -l "istio.io/rev=1-23" -o json | jq '.items | length') -eq 0 ]] || [ $ATTEMPTS -gt 120 ]; do + printf "." + ATTEMPTS=$((ATTEMPTS + 1)) + sleep 1 +done +[ $ATTEMPTS -le 120 ] || kubectl --context ${CLUSTER1} -n istio-gateways get pods -l "istio.io/rev=1-23" + +ATTEMPTS=1 +until [[ $(kubectl --context ${CLUSTER2} -n istio-gateways get pods -l "istio.io/rev=1-23" -o json | jq '.items | length') -eq 0 ]] || [ $ATTEMPTS -gt 60 ]; do + printf "." + ATTEMPTS=$((ATTEMPTS + 1)) + sleep 1 +done +[ $ATTEMPTS -le 60 ] || kubectl --context ${CLUSTER2} -n istio-gateways get pods -l "istio.io/rev=1-23" +helm uninstall istiod-1-23 \ +--namespace istio-system \ +--kube-context=${CLUSTER1} + +helm uninstall istiod-1-23 \ +--namespace istio-system \ +--kube-context=${CLUSTER2} +ATTEMPTS=1 +until [[ $(kubectl --context ${CLUSTER1} -n istio-system get pods -l "istio.io/rev=1-23" -o json | jq '.items | length') -eq 0 ]] || [ $ATTEMPTS -gt 120 ]; do + printf "." + ATTEMPTS=$((ATTEMPTS + 1)) + sleep 1 +done +[ $ATTEMPTS -le 120 ] || kubectl --context ${CLUSTER1} -n istio-system get pods -l "istio.io/rev=1-23" +ATTEMPTS=1 +until [[ $(kubectl --context ${CLUSTER2} -n istio-system get pods -l "istio.io/rev=1-23" -o json | jq '.items | length') -eq 0 ]] || [ $ATTEMPTS -gt 60 ]; do + printf "." + ATTEMPTS=$((ATTEMPTS + 1)) + sleep 1 +done +[ $ATTEMPTS -le 60 ] || kubectl --context ${CLUSTER2} -n istio-system get pods -l "istio.io/rev=1-23" +kubectl --context ${CLUSTER1} -n istio-system get pods && kubectl --context ${CLUSTER1} -n istio-gateways get pods +cat <<'EOF' > ./test.js +const chaiExec = require("@jsdevtools/chai-exec"); +var chai = require('chai'); +var expect = chai.expect; +chai.use(chaiExec); + +afterEach(function (done) { + if (this.currentTest.currentRetry() > 0) { + process.stdout.write("."); + setTimeout(done, 1000); + } else { + done(); + } +}); +describe("Old Istio version should be uninstalled", () => { + it("Pods aren't running anymore in CLUSTER1, namespace istio-system", () => { + let cli = chaiExec('kubectl --context ' + process.env.CLUSTER1 + ' -n istio-system get pods -l "istio.io/rev=' + process.env.OLD_REVISION +'" -o json'); + expect(cli).to.exit.with.code(0); + expect(JSON.parse(cli.stdout).items).to.have.lengthOf(0); + }); + it("Pods aren't running anymore in CLUSTER1, namespace istio-gateways", () => { + let cli = chaiExec('kubectl --context ' + process.env.CLUSTER1 + ' -n istio-gateways get pods -l "istio.io/rev=' + process.env.OLD_REVISION +'" -o json'); + expect(cli).to.exit.with.code(0); + expect(JSON.parse(cli.stdout).items).to.have.lengthOf(0); + }); + it("Pods aren't running anymore in CLUSTER2, namespace istio-system", () => { + let cli = chaiExec('kubectl --context ' + process.env.CLUSTER2 + ' -n istio-system get pods -l "istio.io/rev=' + process.env.OLD_REVISION +'" -o json'); + expect(cli).to.exit.with.code(0); + expect(JSON.parse(cli.stdout).items).to.have.lengthOf(0); + }); + it("Pods aren't running anymore in CLUSTER2, namespace istio-gateways", () => { + let cli = chaiExec('kubectl --context ' + process.env.CLUSTER2 + ' -n istio-gateways get pods -l "istio.io/rev=' + process.env.OLD_REVISION +'" -o json'); + expect(cli).to.exit.with.code(0); + expect(JSON.parse(cli.stdout).items).to.have.lengthOf(0); + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/helm-cleanup-revision/tests/previous-version-uninstalled.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl --context ${CLUSTER1} apply -f - < ./test.js +const chaiExec = require("@jsdevtools/chai-exec"); +var chai = require('chai'); +var expect = chai.expect; +chai.use(chaiExec); + +afterEach(function (done) { + if (this.currentTest.currentRetry() > 0) { + process.stdout.write("."); + setTimeout(done, 1000); + } else { + done(); + } +}); + +describe("egress traffic", function() { + const cluster = process.env.CLUSTER1 + + it(`virtual service should add customer header`, function() { + let command = `kubectl --context ${cluster} -n clients exec deploy/in-ambient -- curl -s httpbin.org/get`; + let cli = chaiExec(command); + expect(cli.output.toLowerCase()).to.contain('my-added-header'); + }); + + it(`destination rule should route to https`, function() { + let command = `kubectl --context ${cluster} -n clients exec deploy/in-ambient -- curl -s httpbin.org/get`; + let cli = chaiExec(command); + expect(cli.output.toLowerCase()).to.contain('https://httpbin.org/get'); + }); + + it(`other types of traffic (HTTP methods) should be rejected`, function() { + let command = `kubectl --context ${cluster} -n clients exec deploy/in-ambient -- curl -s -I httpbin.org/get`; + let cli = chaiExec(command); + expect(cli.output).to.contain('403 Forbidden'); + }); +}); + +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/ambient/waypoint-egress/tests/validate-egress-traffic.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 20000 --retries=60 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl --context ${CLUSTER1} delete authorizationpolicy httpbin -n egress +kubectl --context ${CLUSTER1} apply -f - < ./test.js +const chaiExec = require("@jsdevtools/chai-exec"); +var chai = require('chai'); +var expect = chai.expect; +chai.use(chaiExec); + +afterEach(function (done) { + if (this.currentTest.currentRetry() > 0) { + process.stdout.write("."); + setTimeout(done, 1000); + } else { + done(); + } +}); + +describe("waypoint for service when ns is labeled", function() { + const cluster = process.env.CLUSTER1 + + it(`should redirect traffic for all services to the waypoint`, () => { + let command = `kubectl --context ${cluster} -n clients exec deploy/in-ambient -- curl -v "http://ratings.bookinfo-backends:9080/ratings/0"`; + let cli = chaiExec(command); + expect(cli).to.exit.with.code(0); + expect(cli).output.to.contain('istio-envoy'); + + command = `kubectl --context ${cluster} -n clients exec deploy/in-ambient -- curl -v "http://reviews.bookinfo-backends:9080/reviews/0"`; + cli = chaiExec(command); + expect(cli).to.exit.with.code(0); + expect(cli).output.to.contain('istio-envoy'); + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/ambient/waypoint-deployment-options/tests/validate-waypoint-for-service-ns.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 20000 --retries=10 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl --context ${CLUSTER1} apply -f - < ./test.js +const chaiExec = require("@jsdevtools/chai-exec"); +var chai = require('chai'); +var expect = chai.expect; +chai.use(chaiExec); + +afterEach(function (done) { + if (this.currentTest.currentRetry() > 0) { + process.stdout.write("."); + setTimeout(done, 1000); + } else { + done(); + } +}); + +describe("service labeling to use a waypoint takes precedence over namespace labeling", function() { + const cluster = process.env.CLUSTER1 + + it(`should redirect traffic of labeled service through the waypoint and enforce the policy`, () => { + let command = `kubectl --context ${cluster} -n clients exec deploy/in-ambient -- curl -v "http://ratings.bookinfo-backends:9080/ratings/0"`; + let cli = chaiExec(command); + expect(cli).to.exit.with.code(0); + expect(cli).output.to.contain('Forbidden'); + }); + + it(`should NOT redirect traffic of NON labeled services, which are redirected to the waypoint the namespace is configured for`, () => { + let command = `kubectl --context ${cluster} -n clients exec deploy/in-ambient -- curl -v "http://reviews.bookinfo-backends:9080/reviews/0"`; + let cli = chaiExec(command); + expect(cli).to.exit.with.code(0); + expect(cli).output.to.contain('istio-envoy'); + }); +}); + +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/ambient/waypoint-deployment-options/tests/validate-waypoint-for-specific-service.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 20000 --retries=10 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl --context ${CLUSTER1} apply -f - < ./test.js +const chaiExec = require("@jsdevtools/chai-exec"); +var chai = require('chai'); +var expect = chai.expect; +chai.use(chaiExec); + +afterEach(function (done) { + if (this.currentTest.currentRetry() > 0) { + process.stdout.write("."); + setTimeout(done, 1000); + } else { + done(); + } +}); + +describe("waypoint for workloads when pod is labeled", function() { + const cluster = process.env.CLUSTER1 + + it(`should redirect traffic to waypoint`, () => { + let commandGetIP = `kubectl --context ${cluster} -n bookinfo-backends get pod -l app=ratings -o jsonpath='{.items[0].status.podIP}'`; + let cli = chaiExec(commandGetIP); + let podIP = cli.output.replace(/'/g, ''); + + let command = `kubectl --context ${cluster} -n clients exec deploy/in-ambient -- curl -v "http://${podIP}:9080/ratings/0"`; + cli = chaiExec(command); + + expect(cli).to.exit.with.code(0); + expect(cli).output.to.contain('istio-envoy'); + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/ambient/waypoint-deployment-options/tests/validate-waypoint-for-workload.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 20000 --retries=10 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +kubectl --context ${CLUSTER1} -n bookinfo-backends label pod -l app=ratings istio.io/use-waypoint- +kubectl --context ${CLUSTER1} -n bookinfo-backends label svc ratings istio.io/use-waypoint=ratings-waypoint +kubectl --context ${CLUSTER1} -n bookinfo-backends delete authorizationpolicy deny-traffic-from-clients-ns +kubectl --context ${CLUSTER1} -n bookinfo-backends delete gateway waypoint ratings-waypoint ratings-workload-waypoint diff --git a/gloo-mesh/core/2-7/ambient/scripts/configure-domain-rewrite.sh b/gloo-mesh/core/2-7/ambient/scripts/configure-domain-rewrite.sh index be6dbd6d8b..d6e684c9da 100755 --- a/gloo-mesh/core/2-7/ambient/scripts/configure-domain-rewrite.sh +++ b/gloo-mesh/core/2-7/ambient/scripts/configure-domain-rewrite.sh @@ -90,4 +90,4 @@ done # If the loop exits, it means the check failed consistently for 1 minute echo "DNS rewrite rule verification failed." -exit 1 +exit 1 \ No newline at end of file diff --git a/gloo-mesh/core/2-7/ambient/scripts/register-domain.sh b/gloo-mesh/core/2-7/ambient/scripts/register-domain.sh index f9084487e8..1cb84cd86a 100755 --- a/gloo-mesh/core/2-7/ambient/scripts/register-domain.sh +++ b/gloo-mesh/core/2-7/ambient/scripts/register-domain.sh @@ -14,7 +14,9 @@ hosts_file="/etc/hosts" # Function to check if the input is a valid IP address is_ip() { if [[ $1 =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then - return 0 # 0 = true + return 0 # 0 = true - valid IPv4 address + elif [[ $1 =~ ^[0-9a-f]+[:]+[0-9a-f]*[:]*[0-9a-f]*[:]*[0-9a-f]*[:]*[0-9a-f]*[:]*[0-9a-f]*[:]*[0-9]*$ ]]; then + return 0 # 0 = true - valid IPv6 address else return 1 # 1 = false fi @@ -38,14 +40,15 @@ else fi # Check if the entry already exists -if grep -q "$hostname" "$hosts_file"; then +if grep -q "$hostname\$" "$hosts_file"; then # Update the existing entry with the new IP tempfile=$(mktemp) - sed "s/^.*$hostname/$new_ip $hostname/" "$hosts_file" > "$tempfile" + sed "s/^.*$hostname\$/$new_ip $hostname/" "$hosts_file" > "$tempfile" sudo cp "$tempfile" "$hosts_file" + rm "$tempfile" echo "Updated $hostname in $hosts_file with new IP: $new_ip" else # Add a new entry if it doesn't exist echo "$new_ip $hostname" | sudo tee -a "$hosts_file" > /dev/null echo "Added $hostname to $hosts_file with IP: $new_ip" -fi \ No newline at end of file +fi diff --git a/gloo-mesh/core/2-7/ambient/tests/chai-exec.js b/gloo-mesh/core/2-7/ambient/tests/chai-exec.js index 67ba62f095..020262437f 100644 --- a/gloo-mesh/core/2-7/ambient/tests/chai-exec.js +++ b/gloo-mesh/core/2-7/ambient/tests/chai-exec.js @@ -139,7 +139,11 @@ global = { }, k8sObjectIsPresent: ({ context, namespace, k8sType, k8sObj }) => { - let command = "kubectl --context " + context + " -n " + namespace + " get " + k8sType + " " + k8sObj + " -o name"; + // covers both namespace scoped and cluster scoped objects + let command = "kubectl --context " + context + " get " + k8sType + " " + k8sObj + " -o name"; + if (namespace) { + command = "kubectl --context " + context + " -n " + namespace + " get " + k8sType + " " + k8sObj + " -o name"; + } debugLog(`Executing command: ${command}`); let cli = chaiExec(command); @@ -176,7 +180,6 @@ global = { debugLog(`Command output (stdout): ${cli.stdout}`); return cli.stdout; }, - curlInPod: ({ curlCommand, podName, namespace }) => { debugLog(`Executing curl command: ${curlCommand} on pod: ${podName} in namespace: ${namespace}`); const cli = chaiExec(curlCommand); diff --git a/gloo-mesh/core/2-7/ambient/tests/chai-http.js b/gloo-mesh/core/2-7/ambient/tests/chai-http.js index 67f43db003..92bf579690 100644 --- a/gloo-mesh/core/2-7/ambient/tests/chai-http.js +++ b/gloo-mesh/core/2-7/ambient/tests/chai-http.js @@ -25,7 +25,30 @@ global = { .send() .then(async function (res) { debugLog(`Response status code: ${res.status}`); - expect(res).to.have.status(retCode); + expect(res).to.have.property('status', retCode); + }); + }, + + checkURLWithIP: ({ ip, host, protocol = "http", path = "", headers = [], certFile = '', keyFile = '', retCode }) => { + debugLog(`Checking URL with IP: ${ip}, Host: ${host}, Path: ${path} with expected return code: ${retCode}`); + + let cert = certFile ? fs.readFileSync(certFile) : ''; + let key = keyFile ? fs.readFileSync(keyFile) : ''; + + let url = `${protocol}://${ip}`; + + // Use chai-http to make a request to the IP address, but set the Host header + let request = chai.request(url).head(path).redirects(0).cert(cert).key(key).set('Host', host); + + debugLog(`Setting headers: ${JSON.stringify(headers)}`); + headers.forEach(header => request.set(header.key, header.value)); + + return request + .send() + .then(async function (res) { + debugLog(`Response status code: ${res.status}`); + debugLog(`Response ${JSON.stringify(res)}`); + expect(res).to.have.property('status', retCode); }); }, @@ -124,7 +147,7 @@ global = { .send() .then(async function (res) { debugLog(`Response status code: ${res.status}`); - expect(res).to.have.status(retCode); + expect(res).to.have.property('status', retCode); }); } }; diff --git a/gloo-mesh/core/2-7/ambient/tests/proxies-changes.test.js.liquid b/gloo-mesh/core/2-7/ambient/tests/proxies-changes.test.js.liquid new file mode 100644 index 0000000000..1934ea13b6 --- /dev/null +++ b/gloo-mesh/core/2-7/ambient/tests/proxies-changes.test.js.liquid @@ -0,0 +1,58 @@ +{%- assign version_1_18_or_after = "1.18.0" | minimumGlooGatewayVersion %} +const { execSync } = require('child_process'); +const { expect } = require('chai'); +const { diff } = require('jest-diff'); + +function delay(ms) { + return new Promise(resolve => setTimeout(resolve, ms)); +} + +describe('Gloo snapshot stability test', function() { + let contextName = process.env.{{ context | default: "CLUSTER1" }}; + let delaySeconds = {{ delay | default: 5 }}; + + let firstSnapshot; + + it('should retrieve initial snapshot', function() { + const output = execSync( + `kubectl --context ${contextName} -n gloo-system exec deploy/gloo -- wget -O - localhost:{% if version_1_18_or_after %}9095{% else %}9091{% endif %}/snapshots/proxies -q`, + { encoding: 'utf8' } + ); + + try { + firstSnapshot = JSON.parse(output); + } catch (err) { + throw new Error('Failed to parse JSON output from initial snapshot: ' + err.message); + } + expect(firstSnapshot).to.be.an('object'); + }); + + it('should not change after the given delay', async function() { + await delay(delaySeconds * 1000); + + let secondSnapshot; + try { + const output2 = execSync( + `kubectl --context ${contextName} -n gloo-system exec deploy/gloo -- wget -O - localhost:{% if version_1_18_or_after %}9095{% else %}9091{% endif %}/snapshots/proxies -q`, + { encoding: 'utf8' } + ); + secondSnapshot = JSON.parse(output2); + } catch (err) { + throw new Error('Failed to retrieve or parse the second snapshot: ' + err.message); + } + + const firstJson = JSON.stringify(firstSnapshot, null, 2); + const secondJson = JSON.stringify(secondSnapshot, null, 2); + + // Show only 2 lines of context around each change + const diffOutput = diff(firstJson, secondJson, { contextLines: 2, expand: false }); + + if (! diffOutput.includes("Compared values have no visual difference.")) { + console.error('Differences found between snapshots:\n' + diffOutput); + throw new Error('Snapshots differ after the delay.'); + } else { + console.log('No differences found. The snapshots are stable.'); + } + }); +}); + From e685ae9263e70f620b2471424a920a556ece63ba Mon Sep 17 00:00:00 2001 From: soloio-bot <> Date: Fri, 3 Jan 2025 17:23:40 +0000 Subject: [PATCH 06/34] Update from https://github.com/solo-io/procgen/commit/b0fcf769a8ed8b42653f7aeb190716fb7add3277 --- .../2-6/ambient-interoperability/README.md | 45 +- .../deploy-kind-clusters/deploy-cluster1.sh | 235 +++ .../2-6/ambient-interoperability/package.json | 44 + .../core/2-6/ambient-interoperability/run.sh | 1349 +++++++++++++++++ .../scripts/configure-domain-rewrite.sh | 2 +- .../scripts/register-domain.sh | 11 +- .../tests/chai-exec.js | 7 +- .../tests/chai-http.js | 27 +- .../tests/proxies-changes.test.js.liquid | 58 + 9 files changed, 1739 insertions(+), 39 deletions(-) create mode 100644 gloo-mesh/core/2-6/ambient-interoperability/data/steps/deploy-kind-clusters/deploy-cluster1.sh create mode 100644 gloo-mesh/core/2-6/ambient-interoperability/package.json create mode 100644 gloo-mesh/core/2-6/ambient-interoperability/run.sh create mode 100644 gloo-mesh/core/2-6/ambient-interoperability/tests/proxies-changes.test.js.liquid diff --git a/gloo-mesh/core/2-6/ambient-interoperability/README.md b/gloo-mesh/core/2-6/ambient-interoperability/README.md index 84a621c817..05909b70da 100644 --- a/gloo-mesh/core/2-6/ambient-interoperability/README.md +++ b/gloo-mesh/core/2-6/ambient-interoperability/README.md @@ -15,7 +15,7 @@ source ./scripts/assert.sh ## Table of Contents * [Introduction](#introduction) -* [Lab 1 - Deploy a KinD cluster](#lab-1---deploy-a-kind-cluster-) +* [Lab 1 - Deploy KinD Cluster(s)](#lab-1---deploy-kind-cluster(s)-) * [Lab 2 - Deploy and register Gloo Mesh](#lab-2---deploy-and-register-gloo-mesh-) * [Lab 3 - Deploy Istio using Helm](#lab-3---deploy-istio-using-helm-) * [Lab 4 - Deploy the Bookinfo demo app](#lab-4---deploy-the-bookinfo-demo-app-) @@ -72,7 +72,7 @@ You can find more information about Gloo Mesh Core in the official documentation -## Lab 1 - Deploy a KinD cluster +## Lab 1 - Deploy KinD Cluster(s) Clone this repository and go to the directory where this `README.md` file is. @@ -84,12 +84,11 @@ export MGMT=cluster1 export CLUSTER1=cluster1 ``` -Run the following commands to deploy a Kubernetes cluster using [Kind](https://kind.sigs.k8s.io/): +Deploy the KinD clusters: ```bash -./scripts/deploy-multi-with-calico.sh 1 cluster1 us-west us-west-1 +bash ./data/steps/deploy-kind-clusters/deploy-cluster1.sh ``` - Then run the following commands to wait for all the Pods to be ready: ```bash @@ -98,38 +97,20 @@ Then run the following commands to wait for all the Pods to be ready: **Note:** If you run the `check.sh` script immediately after the `deploy.sh` script, you may see a jsonpath error. If that happens, simply wait a few seconds and try again. -Once the `check.sh` script completes, when you execute the `kubectl get pods -A` command, you should see the following: - -``` -NAMESPACE NAME READY STATUS RESTARTS AGE -kube-system calico-kube-controllers-59d85c5c84-sbk4k 1/1 Running 0 4h26m -kube-system calico-node-przxs 1/1 Running 0 4h26m -kube-system coredns-6955765f44-ln8f5 1/1 Running 0 4h26m -kube-system coredns-6955765f44-s7xxx 1/1 Running 0 4h26m -kube-system etcd-cluster1-control-plane 1/1 Running 0 4h27m -kube-system kube-apiserver-cluster1-control-plane 1/1 Running 0 4h27m -kube-system kube-controller-manager-cluster1-control-plane1/1 Running 0 4h27m -kube-system kube-proxy-ksvzw 1/1 Running 0 4h26m -kube-system kube-scheduler-cluster1-control-plane 1/1 Running 0 4h27m -local-path-storage local-path-provisioner-58f6947c7-lfmdx 1/1 Running 0 4h26m -metallb-system controller-5c9894b5cd-cn9x2 1/1 Running 0 4h26m -metallb-system speaker-d7jkp 1/1 Running 0 4h26m -``` - -**Note:** The CNI pods might be different, depending on which CNI you have deployed. - +Once the `check.sh` script completes, execute the `kubectl get pods -A` command, and verify that all pods are in a running state. @@ -175,6 +156,7 @@ EOF echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/deploy-and-register-gloo-mesh/tests/environment-variables.test.js.liquid" timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } --> + Run the following commands to deploy the Gloo Mesh management plane: ```bash @@ -236,6 +218,10 @@ EOF kubectl --context ${MGMT} -n gloo-mesh rollout status deploy/gloo-mesh-mgmt-server ``` +Set the endpoint for the Gloo Mesh UI: +```bash +export ENDPOINT_GLOO_MESH_UI=$(kubectl --context ${MGMT} -n gloo-mesh get svc gloo-mesh-ui -o jsonpath='{.status.loadBalancer.ingress[0].*}'):8090 +``` + + + +
+ +
+ +#
Gloo Mesh Enterprise (2.7.0-beta1-2025-01-02-main-8f7e13ee92)
+ + + +## Table of Contents +* [Introduction](#introduction) +* [Lab 1 - Deploy KinD Cluster(s)](#lab-1---deploy-kind-cluster(s)-) +* [Lab 2 - Deploy and register Gloo Mesh](#lab-2---deploy-and-register-gloo-mesh-) +* [Lab 3 - Deploy Httpbin to cluster1](#lab-3---deploy-httpbin-to-cluster1-) +* [Lab 4 - Deploy Httpbin to cluster2](#lab-4---deploy-httpbin-to-cluster2-) +* [Lab 5 - Deploy Gloo Gateway to cluster1](#lab-5---deploy-gloo-gateway-to-cluster1-) +* [Lab 6 - Deploy Gloo Gateway to cluster2](#lab-6---deploy-gloo-gateway-to-cluster2-) +* [Lab 7 - Distributed configs](#lab-7---distributed-configs-) + + + +## Introduction + +Gloo Mesh Enterprise is a distribution of the [Istio](https://istio.io/) service mesh that is hardened for production support across multicluster hybrid clusters and service meshes. +With Gloo Mesh Enterprise, you get an extensible, open-source based set of API tools to connect and manage your services across multiple clusters and service meshes. +It includes n-4 Istio version support with security patches to address Common Vulnerabilities and Exposures (CVEs), as well as special builds to meet regulatory standards such as Federal Information Processing Standards (FIPS). + +The Gloo Mesh API simplifies the complexity of your service mesh by installing custom resource definitions (CRDs) that you configure. +Then, Gloo Mesh translates these CRDs into Istio resources across your environment, and provides visibility across all of the resources and traffic. +Enterprise features include multitenancy, global failover and routing, observability, and east-west rate limiting and policy enforcement through authorization and authentication plug-ins. + +### Gloo Mesh Enterprise overview + +Gloo Mesh Enterprise provides many unique features, including: + +* Upstream-first approach to feature development +* Installation, upgrade, and management across clusters and service meshes +* Advanced features for security, traffic routing, tranformations, observability, and more +* End-to-end Istio support and CVE security patching for n-4 versions +* Specialty builds for distroless and FIPS compliance +* 24x7 production support and one-hour Severity 1 SLA +* Portal modules to extend functionality +* Workspaces for simplified multi-tenancy +* Zero-trust architecture for both north-south ingress and east-west service traffic +* Single pane of glass for operational management of Istio, including global observability + +Gloo Mesh Enterprise graph + +### Want to learn more about Gloo Mesh Enterprise? + +You can find more information about Gloo Mesh Enterprise in the official documentation: + + + + +## Lab 1 - Deploy KinD Cluster(s) + + +Clone this repository and go to the directory where this `README.md` file is. + +Set the context environment variables: + +```bash +export MGMT=mgmt +export CLUSTER1=cluster1 +export CLUSTER2=cluster2 +``` + +Deploy the KinD clusters: + +```bash +bash ./data/steps/deploy-kind-clusters/deploy-mgmt.sh +bash ./data/steps/deploy-kind-clusters/deploy-cluster1.sh +bash ./data/steps/deploy-kind-clusters/deploy-cluster2.sh +``` +Then run the following commands to wait for all the Pods to be ready: + +```bash +./scripts/check.sh mgmt +./scripts/check.sh cluster1 +./scripts/check.sh cluster2 +``` + +**Note:** If you run the `check.sh` script immediately after the `deploy.sh` script, you may see a jsonpath error. If that happens, simply wait a few seconds and try again. + +Once the `check.sh` script completes, execute the `kubectl get pods -A` command, and verify that all pods are in a running state. + + + + + +## Lab 2 - Deploy and register Gloo Mesh +[VIDEO LINK](https://youtu.be/djfFiepK4GY "Video Link") + + +Before we get started, let's install the `meshctl` CLI: + +```bash +export GLOO_MESH_VERSION=v2.7.0-beta1-2025-01-02-main-8f7e13ee92 +mkdir -p $HOME/.gloo-mesh/bin +curl "https://storage.googleapis.com/gloo-platform-dev/meshctl/$GLOO_MESH_VERSION/meshctl-$(uname | tr '[:upper:]' '[:lower:]')-amd64" > $HOME/.gloo-mesh/bin/meshctl +chmod +x $HOME/.gloo-mesh/bin/meshctl +export PATH=$HOME/.gloo-mesh/bin:$PATH +``` + + +Install the Kubernetes Gateway and the Gloo CRDs in the management plane. + +```bash +kubectl --context ${MGMT} apply -f https://github.com/kubernetes-sigs/gateway-api/releases/download/v1.2.0/standard-install.yaml + +helm upgrade -i -n gloo-system \ +--repo https://storage.googleapis.com/solo-public-helm \ + gloo-gateway gloo/gloo \ + --create-namespace \ + --version 1.17.16 \ + --kube-context $CLUSTER1 \ + -f -< + +Then, you need to set the environment variable to tell the Gloo Mesh agents how to communicate with the management plane: + + + +```bash +export ENDPOINT_GLOO_MESH=$(kubectl --context ${MGMT} -n gloo-mesh get svc gloo-mesh-mgmt-server -o jsonpath='{.status.loadBalancer.ingress[0].*}'):9900 +export HOST_GLOO_MESH=$(echo ${ENDPOINT_GLOO_MESH%:*}) +export ENDPOINT_TELEMETRY_GATEWAY=$(kubectl --context ${MGMT} -n gloo-mesh get svc gloo-telemetry-gateway -o jsonpath='{.status.loadBalancer.ingress[0].*}'):4317 +export ENDPOINT_GLOO_MESH_UI=$(kubectl --context ${MGMT} -n gloo-mesh get svc gloo-mesh-ui -o jsonpath='{.status.loadBalancer.ingress[0].*}'):8090 +``` + +Check that the variables have correct values: +``` +echo $HOST_GLOO_MESH +echo $ENDPOINT_GLOO_MESH +``` + + +Finally, you need to register the cluster(s). + + +Here is how you register the first one: + +```bash +kubectl apply --context ${MGMT} -f - < ca.crt +kubectl create secret generic relay-root-tls-secret -n gloo-mesh --context ${CLUSTER1} --from-file ca.crt=ca.crt +rm ca.crt + +kubectl get secret relay-identity-token-secret -n gloo-mesh --context ${MGMT} -o jsonpath='{.data.token}' | base64 -d > token +kubectl create secret generic relay-identity-token-secret -n gloo-mesh --context ${CLUSTER1} --from-file token=token +rm token + +helm upgrade --install gloo-platform-crds gloo-platform-crds \ + --repo https://storage.googleapis.com/gloo-platform-dev/platform-charts/helm-charts \ + --namespace gloo-mesh \ + --kube-context ${CLUSTER1} \ + --version 2.7.0-beta1-2025-01-02-main-8f7e13ee92 + +helm upgrade --install gloo-platform gloo-platform \ + --repo https://storage.googleapis.com/gloo-platform-dev/platform-charts/helm-charts \ + --namespace gloo-mesh \ + --kube-context ${CLUSTER1} \ + --version 2.7.0-beta1-2025-01-02-main-8f7e13ee92 \ + -f -< ca.crt +kubectl create secret generic relay-root-tls-secret -n gloo-mesh --context ${CLUSTER2} --from-file ca.crt=ca.crt +rm ca.crt + +kubectl get secret relay-identity-token-secret -n gloo-mesh --context ${MGMT} -o jsonpath='{.data.token}' | base64 -d > token +kubectl create secret generic relay-identity-token-secret -n gloo-mesh --context ${CLUSTER2} --from-file token=token +rm token + +helm upgrade --install gloo-platform-crds gloo-platform-crds \ + --repo https://storage.googleapis.com/gloo-platform-dev/platform-charts/helm-charts \ + --namespace gloo-mesh \ + --kube-context ${CLUSTER2} \ + --version 2.7.0-beta1-2025-01-02-main-8f7e13ee92 + +helm upgrade --install gloo-platform gloo-platform \ + --repo https://storage.googleapis.com/gloo-platform-dev/platform-charts/helm-charts \ + --namespace gloo-mesh \ + --kube-context ${CLUSTER2} \ + --version 2.7.0-beta1-2025-01-02-main-8f7e13ee92 \ + -f -< ./test.js +var chai = require('chai'); +var expect = chai.expect; +const helpers = require('./tests/chai-exec'); +describe("Cluster registration", () => { + it("cluster1 is registered", () => { + podName = helpers.getOutputForCommand({ command: "kubectl -n gloo-mesh get pods -l app=gloo-mesh-mgmt-server -o jsonpath='{.items[0].metadata.name}' --context " + process.env.MGMT }).replaceAll("'", ""); + command = helpers.getOutputForCommand({ command: "kubectl --context " + process.env.MGMT + " -n gloo-mesh debug -q -i " + podName + " --image=curlimages/curl -- curl -s http://localhost:9091/metrics" }).replaceAll("'", ""); + expect(command).to.contain("cluster1"); + }); + it("cluster2 is registered", () => { + podName = helpers.getOutputForCommand({ command: "kubectl -n gloo-mesh get pods -l app=gloo-mesh-mgmt-server -o jsonpath='{.items[0].metadata.name}' --context " + process.env.MGMT }).replaceAll("'", ""); + command = helpers.getOutputForCommand({ command: "kubectl --context " + process.env.MGMT + " -n gloo-mesh debug -q -i " + podName + " --image=curlimages/curl -- curl -s http://localhost:9091/metrics" }).replaceAll("'", ""); + expect(command).to.contain("cluster2"); + }); +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/deploy-and-register-gloo-mesh/tests/cluster-registration.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +--> + + + + +## Lab 3 - Deploy Httpbin to cluster1 + + +We're going to deploy the httpbin application to demonstrate several features of Gloo Gateway. + +You can find more information about this application [here](http://httpbin.org/). + +Run the following commands to deploy the httpbin app twice (`httpbin1` and `httpbin2`). + +```bash +kubectl --context ${CLUSTER1} create ns httpbin +kubectl apply --context ${CLUSTER1} -f - < +```shell +kubectl --context ${CLUSTER1} -n httpbin get pods +``` + +Here is the expected output when both Pods are ready: + +```,nocopy +NAME READY STATUS RESTARTS AGE +httpbin1-7fdbf6498-ms7qt 1/1 Running 0 94s +httpbin2-655777b846-6nrms 1/1 Running 0 93s +``` + + + + + + +## Lab 4 - Deploy Httpbin to cluster2 + + +We're going to deploy the httpbin application to demonstrate several features of Gloo Gateway. + +You can find more information about this application [here](http://httpbin.org/). + +Run the following commands to deploy the httpbin app twice (`httpbin1` and `httpbin2`). + +```bash +kubectl --context ${CLUSTER2} create ns httpbin +kubectl apply --context ${CLUSTER2} -f - < +```shell +kubectl --context ${CLUSTER2} -n httpbin get pods +``` + +Here is the expected output when both Pods are ready: + +```,nocopy +NAME READY STATUS RESTARTS AGE +httpbin1-7fdbf6498-ms7qt 1/1 Running 0 94s +httpbin2-655777b846-6nrms 1/1 Running 0 93s +``` + + + + + + +## Lab 5 - Deploy Gloo Gateway to cluster1 + +You can deploy Gloo Gateway with the `glooctl` CLI or declaratively using Helm. + +We're going to use the Helm option. + +Install the Kubernetes Gateway API CRDs as they do not come installed by default on most Kubernetes clusters. + +```bash +kubectl --context $CLUSTER1 apply -f https://github.com/kubernetes-sigs/gateway-api/releases/download/v1.2.0/standard-install.yaml +``` + +Next, install Gloo Gateway. This command installs the Gloo Gateway control plane into the namespace `gloo-system`. + +```bash +helm repo add solo-public-helm https://storage.googleapis.com/solo-public-helm + +helm repo update + +helm upgrade -i -n gloo-system \ + gloo-gateway solo-public-helm/gloo \ + --create-namespace \ + --version 1.17.16 \ + --kube-context $CLUSTER1 \ + -f -< +```bash +kubectl --context $CLUSTER1 -n gloo-system get pods +``` + +Here is the expected output: + +```,nocopy +NAME READY STATUS RESTARTS AGE +gateway-certgen-h5z9t 0/1 Completed 0 52s +gateway-proxy-7474c7bf9b-dsvtz 3/3 Running 0 47s +gloo-6b5575f9fc-8f2zs 1/1 Running 0 47s +gloo-resource-rollout-check-4bt5g 0/1 Completed 0 47s +gloo-resource-rollout-h5jf4 0/1 Completed 0 47s +``` + + + + + +## Lab 6 - Deploy Gloo Gateway to cluster2 + +You can deploy Gloo Gateway with the `glooctl` CLI or declaratively using Helm. + +We're going to use the Helm option. + +Install the Kubernetes Gateway API CRDs as they do not come installed by default on most Kubernetes clusters. + +```bash +kubectl --context $CLUSTER2 apply -f https://github.com/kubernetes-sigs/gateway-api/releases/download/v1.2.0/standard-install.yaml +``` + +Next, install Gloo Gateway. This command installs the Gloo Gateway control plane into the namespace `gloo-system`. + +```bash +helm repo add solo-public-helm https://storage.googleapis.com/solo-public-helm + +helm repo update + +helm upgrade -i -n gloo-system \ + gloo-gateway solo-public-helm/gloo \ + --create-namespace \ + --version 1.17.16 \ + --kube-context $CLUSTER2 \ + -f -< +```bash +kubectl --context $CLUSTER2 -n gloo-system get pods +``` + +Here is the expected output: + +```,nocopy +NAME READY STATUS RESTARTS AGE +gateway-certgen-h5z9t 0/1 Completed 0 52s +gateway-proxy-7474c7bf9b-dsvtz 3/3 Running 0 47s +gloo-6b5575f9fc-8f2zs 1/1 Running 0 47s +gloo-resource-rollout-check-4bt5g 0/1 Completed 0 47s +gloo-resource-rollout-h5jf4 0/1 Completed 0 47s +``` + + + + + +## Lab 7 - Distributed configs + +In this lab, we will explore the concept of distributed configurations in Gloo Mesh. We will demonstrate how Gloo Mesh enables you to manage configurations centrally from the management cluster while distributing them to the Gateways deployed in registered clusters (cluster1 and cluster2 in this case). + +### Prepare Namespaces + +Before we start distributing configuration, let's create a namespace on all three clusters that will contain the centrally-managed gateway resources: + +```bash +kubectl --context $MGMT create ns gloo-gateway-config +kubectl --context $CLUSTER1 create ns gloo-gateway-config +kubectl --context $CLUSTER2 create ns gloo-gateway-config +``` + +Having a dedicated namespace for these resources on workload clusters allows RBAC to be applied to these resources if needed. + +### Deploy a Centrally Managed GatewayClass + +Next, we will deploy a `GatewayClass` named `centrally-managed` in the management cluster. This deployment will automatically create gateways in the workload clusters for any Gateways that use this class. + +```bash +kubectl apply --context ${MGMT} -f - < ./test.js +const helpers = require('./tests/chai-exec'); + +describe("Gateway", () => { + it('should be created in cluster1', () => { + helpers.checkDeploymentHasPod({ context: process.env.CLUSTER1, namespace: "gloo-gateway-config", deployment: "gloo-proxy-generic-gateway-gloo-gateway-config" }); + }) + + it('should be created in cluster2', () => { + helpers.checkDeploymentHasPod({ context: process.env.CLUSTER2, namespace: "gloo-gateway-config", deployment: "gloo-proxy-generic-gateway-gloo-gateway-config" }); + }) +}); +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/distributed-configs/tests/check-gateway.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +--> + +Next apply the HTTPRoute: + +```bash +kubectl apply --context ${MGMT} -f - < ./test.js +const helpers = require('./tests/chai-exec'); + +describe("HTTPRoute", () => { + it('should be propagated to cluster1', () => { + return helpers.genericCommand({ + command: `kubectl --context=${process.env.CLUSTER1} get httproutes.gateway.networking.k8s.io -n gloo-gateway-config`, + responseContains: 'httpbin' + }); + }) + + it('should be propagated to cluster2', () => { + return helpers.genericCommand({ + command: `kubectl --context=${process.env.CLUSTER2} get httproutes.gateway.networking.k8s.io -n gloo-gateway-config`, + responseContains: 'httpbin' + }); + }) +}); + +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/distributed-configs/tests/verify-routes-created-in-clusters.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +--> + + +### Deploy Child HTTPRoutes + +Now, let's deploy child `HTTPRoute` resources in the `httpbin` namespace on both `cluster1` and `cluster2`. These child routes will define the actual backend service (`httpbin1`) to which traffic will be routed by the parent route. + +```bash +kubectl apply --context ${CLUSTER1} -f - < ./test.js +const httpHelpers = require('./tests/chai-http'); +const execHelpers = require('./tests/chai-exec'); + +describe("httpbin is accessible", () => { + let cluster1 = process.env.CLUSTER1; + let cluster2 = process.env.CLUSTER2; + + let gateway_ip_cluster1 = execHelpers.getOutputForCommand({ command: `kubectl --context ${cluster1} -n gloo-gateway-config get svc gloo-proxy-generic-gateway-gloo-gateway-config -o jsonpath='{.status.loadBalancer.ingress[0].ip}'`}).replaceAll("'", ""); + + let gateway_ip_cluster2 = execHelpers.getOutputForCommand({ command: `kubectl --context ${cluster2} -n gloo-gateway-config get svc gloo-proxy-generic-gateway-gloo-gateway-config -o jsonpath='{.status.loadBalancer.ingress[0].ip}'`}).replaceAll("'", ""); + + it('httpbin is available in cluster1', () => httpHelpers.checkURLWithIP({ ip: gateway_ip_cluster1, host: `httpbin`, path: '/get', retCode: 200 })); + + it('httpbin is available in cluster2', () => httpHelpers.checkURLWithIP({ ip: gateway_ip_cluster2, host: `httpbin`, path: '/get', retCode: 200 })); +}) + +EOF +echo "executing test dist/gloo-mesh-2-0-workshop/build/templates/steps/distributed-configs/tests/check-connectivity-children.test.js.liquid" +timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; } +--> + + + + diff --git a/gloo-mesh/enterprise/2-7/distributed-configs/data/.gitkeep b/gloo-mesh/enterprise/2-7/distributed-configs/data/.gitkeep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/gloo-mesh/enterprise/2-7/distributed-configs/data/steps/deploy-kind-clusters/deploy-cluster1.sh b/gloo-mesh/enterprise/2-7/distributed-configs/data/steps/deploy-kind-clusters/deploy-cluster1.sh new file mode 100644 index 0000000000..3fda068282 --- /dev/null +++ b/gloo-mesh/enterprise/2-7/distributed-configs/data/steps/deploy-kind-clusters/deploy-cluster1.sh @@ -0,0 +1,292 @@ +#!/usr/bin/env bash +set -o errexit + +number="2" +name="cluster1" +region="" +zone="" +twodigits=$(printf "%02d\n" $number) + +kindest_node=${KINDEST_NODE} + +if [ -z "$kindest_node" ]; then + export k8s_version="1.28.0" + + [[ ${k8s_version::1} != 'v' ]] && export k8s_version=v${k8s_version} + kindest_node_ver=$(curl --silent "https://registry.hub.docker.com/v2/repositories/kindest/node/tags?page_size=100" \ + | jq -r '.results | .[] | select(.name==env.k8s_version) | .name+"@"+.digest') + + if [ -z "$kindest_node_ver" ]; then + echo "Incorrect Kubernetes version provided: ${k8s_version}." + exit 1 + fi + kindest_node=kindest/node:${kindest_node_ver} +fi +echo "Using KinD image: ${kindest_node}" + +if [ -z "$3" ]; then + case $name in + cluster1) + region=us-west-1 + ;; + cluster2) + region=us-west-2 + ;; + *) + region=us-east-1 + ;; + esac +fi + +if [ -z "$4" ]; then + case $name in + cluster1) + zone=us-west-1a + ;; + cluster2) + zone=us-west-2a + ;; + *) + zone=us-east-1a + ;; + esac +fi + +if hostname -I 2>/dev/null; then + myip=$(hostname -I | awk '{ print $1 }') +else + myip=$(ipconfig getifaddr en0) +fi + +# Function to determine the next available cluster number +get_next_cluster_number() { + if ! kind get clusters 2>&1 | grep "^kind" > /dev/null; then + echo 1 + else + highest_num=$(kind get clusters | grep "^kind" | tail -1 | cut -c 5-) + echo $((highest_num + 1)) + fi +} + +if [ -f /.dockerenv ]; then +myip=$HOST_IP +container=$(docker inspect $(docker ps -q) | jq -r ".[] | select(.Config.Hostname == \"$HOSTNAME\") | .Name" | cut -d/ -f2) +docker network connect "kind" $container || true +number=$(get_next_cluster_number) +twodigits=$(printf "%02d\n" $number) +fi + +reg_name='kind-registry' +reg_port='5000' +docker start "${reg_name}" 2>/dev/null || \ +docker run -d --restart=always -p "0.0.0.0:${reg_port}:5000" --name "${reg_name}" registry:2 + +cache_port='5000' +cat > registries < ${HOME}/.${cache_name}-config.yml </dev/null || \ +docker run -d --restart=always ${DEPLOY_EXTRA_PARAMS} -v ${HOME}/.${cache_name}-config.yml:/etc/docker/registry/config.yml --name "${cache_name}" registry:2 +done +mkdir -p /tmp/oidc + +cat <<'EOF' >/tmp/oidc/sa-signer-pkcs8.pub +-----BEGIN PUBLIC KEY----- +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA53YiBcrn7+ZK0Vb4odeA +1riYdvEb8To4H6/HtF+OKzuCIXFQ+bRy7yMrDGITYpfYPrTZOgfdeTLZqOiAj+cL +395nvxdly83SUrdh7ItfOPRluuuiPHnFn111wpyjBw5nut4Kx+M5MksNfA1hU0Zw +zIM9OviX8iEF8xHWUtz4BAMDG8N6+zpLo0pAzaei5hKuLZ9dZOzHBC8VOW82cQMm +5X5uOKsCHMtNSjqYUNB1DxN6xxM+odGWT/6xthPGk6YCxmO28YHPFZfiS2eAIpD8 +2p/16KQKU6TkZSrldkYxiHIPhu+5f9faZJG7dB9pLN1SfdTBio4PK5Mz9muLUCv9 +ywIDAQAB +-----END PUBLIC KEY----- +EOF + +cat <<'EOF' >/tmp/oidc/sa-signer.key +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA53YiBcrn7+ZK0Vb4odeA1riYdvEb8To4H6/HtF+OKzuCIXFQ ++bRy7yMrDGITYpfYPrTZOgfdeTLZqOiAj+cL395nvxdly83SUrdh7ItfOPRluuui +PHnFn111wpyjBw5nut4Kx+M5MksNfA1hU0ZwzIM9OviX8iEF8xHWUtz4BAMDG8N6 ++zpLo0pAzaei5hKuLZ9dZOzHBC8VOW82cQMm5X5uOKsCHMtNSjqYUNB1DxN6xxM+ +odGWT/6xthPGk6YCxmO28YHPFZfiS2eAIpD82p/16KQKU6TkZSrldkYxiHIPhu+5 +f9faZJG7dB9pLN1SfdTBio4PK5Mz9muLUCv9ywIDAQABAoIBAB8tro+RMYUDRHjG +el9ypAxIeWEsQVNRQFYkW4ZUiNYSAgl3Ni0svX6xAg989peFVL+9pLVIcfDthJxY +FVlNCjBxyQ/YmwHFC9vQkARJEd6eLUXsj8INtS0ubbp1VxCQRDDL0C/0z7OSoJJh +SwboqjEiTJExA2a+RArmEDTBRzdi3t+kT8G23JcqOivrITt17K6bQYyJXw7/vUdc +r/R+hfd5TqVq92VddzDT7RNJAxsbPPXjGnESlq1GALBDs+uBGYsP0fiEJb2nicSv +z9fBnBeERhut1gcE0C0iLRQZb+3r8TitBtxrZv+0BHgXrkKtXDwWTqGEKOwC4dBn +7nxkH2ECgYEA6+/DOTABGYOWOQftFkJMjcugzDrjoGpuXuVOTb65T+3FHAzU93zy +3bt3wQxrlugluyy9Sc/PL3ck2LgUsPHZ+s7zsdGvvGALBD6bOSSKATz9JgjwifO8 +PgqUz1kXRwez2CtKLOOCFFtcIzEdWIzsa1ubNqLzgN7rD+XBkUc2uEcCgYEA+yTy +72EDMQVoIZOygytHsDNdy0iS2RsBbdurT27wkYuFpFUVWdbNSL+8haE+wJHseHcw +BD4WIMpU+hnS4p4OO8+6V7PiXOS5E/se91EJigZAoixgDUiC8ihojWgK9PYEavUo +hULWbayO59SxYWeUI4Ze0GP8Jw8vdB86ib4ulF0CgYEAgyzRuLjk05+iZODwQyDn +WSquov3W0rh51s7cw0LX2wWSQm8r9NGGYhs5kJ5sLwGxAKj2MNSWF4jBdrCZ6Gr+ +y4BGY0X209/+IAUC3jlfdSLIiF4OBlT6AvB1HfclhvtUVUp0OhLfnpvQ1UwYScRI +KcRLvovIoIzP2g3emfwjAz8CgYEAxUHhOhm1mwRHJNBQTuxok0HVMrze8n1eov39 +0RcvBvJSVp+pdHXdqX1HwqHCmxhCZuAeq8ZkNP8WvZYY6HwCbAIdt5MHgbT4lXQR +f2l8F5gPnhFCpExG5ZLNg/urV3oAQE4stHap21zEpdyOMhZb6Yc5424U+EzaFdgN +b3EcPtUCgYAkKvUlSnBbgiJz1iaN6fuTqH0efavuFGMhjNmG7GtpNXdgyl1OWIuc +Yu+tZtHXtKYf3B99GwPrFzw/7yfDwae5YeWmi2/pFTH96wv3brJBqkAWY8G5Rsmd +qF50p34vIFqUBniNRwSArx8t2dq/CuAMgLAtSjh70Q6ZAnCF85PD8Q== +-----END RSA PRIVATE KEY----- +EOF + +echo Contents of kind${number}.yaml +cat << EOF | tee kind${number}.yaml +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: +- role: control-plane + image: ${kindest_node} + extraPortMappings: + - containerPort: 6443 + hostPort: 70${twodigits} + extraMounts: + - containerPath: /etc/kubernetes/oidc + hostPath: /tmp/oidc + labels: + ingress-ready: true + topology.kubernetes.io/region: ${region} + topology.kubernetes.io/zone: ${zone} +networking: + disableDefaultCNI: true + serviceSubnet: "10.$(echo $twodigits | sed 's/^0*//').0.0/16" + podSubnet: "10.1${twodigits}.0.0/16" +kubeadmConfigPatches: +- | + kind: ClusterConfiguration + apiServer: + extraArgs: + service-account-key-file: /etc/kubernetes/pki/sa.pub + service-account-key-file: /etc/kubernetes/oidc/sa-signer-pkcs8.pub + service-account-signing-key-file: /etc/kubernetes/oidc/sa-signer.key + service-account-issuer: https://solo-workshop-oidc.s3.us-east-1.amazonaws.com + api-audiences: sts.amazonaws.com + extraVolumes: + - name: oidc + hostPath: /etc/kubernetes/oidc + mountPath: /etc/kubernetes/oidc + readOnly: true + metadata: + name: config +containerdConfigPatches: +- |- + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:${reg_port}"] + endpoint = ["http://${reg_name}:${reg_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] + endpoint = ["http://docker:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-docker.pkg.dev"] + endpoint = ["http://us-docker:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-central1-docker.pkg.dev"] + endpoint = ["http://us-central1-docker:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."quay.io"] + endpoint = ["http://quay:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."gcr.io"] + endpoint = ["http://gcr:${cache_port}"] +EOF +echo ----------------------------------------------------- + +kind create cluster --name kind${number} --config kind${number}.yaml +ipkind=$(docker inspect kind${number}-control-plane | jq -r '.[0].NetworkSettings.Networks[].IPAddress') +networkkind=$(echo ${ipkind} | awk -F. '{ print $1"."$2 }') +kubectl config set-cluster kind-kind${number} --server=https://${myip}:70${twodigits} --insecure-skip-tls-verify=true + +# Preload images +cat << EOF >> images.txt +quay.io/metallb/controller:v0.13.12 +quay.io/metallb/speaker:v0.13.12 +EOF +cat images.txt | while read image; do + docker pull $image || true + kind load docker-image $image --name kind${number} || true +done + +docker network connect "kind" "${reg_name}" || true +docker network connect "kind" docker || true +docker network connect "kind" us-docker || true +docker network connect "kind" us-central1-docker || true +docker network connect "kind" quay || true +docker network connect "kind" gcr || true +# Calico for ipv4 +curl -sL https://raw.githubusercontent.com/projectcalico/calico/v3.28.1/manifests/calico.yaml | sed 's/250m/50m/g' | kubectl --context kind-kind${number} apply -f - + +for i in 1 2 3 4 5; do kubectl --context=kind-kind${number} apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml && break || sleep 15; done +kubectl --context=kind-kind${number} create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)" +kubectl --context=kind-kind${number} -n metallb-system rollout status deploy controller || true + +cat << EOF | tee metallb${number}.yaml +apiVersion: metallb.io/v1beta1 +kind: IPAddressPool +metadata: + name: first-pool + namespace: metallb-system +spec: + addresses: + - ${networkkind}.1${twodigits}.1-${networkkind}.1${twodigits}.254 +--- +apiVersion: metallb.io/v1beta1 +kind: L2Advertisement +metadata: + name: empty + namespace: metallb-system +EOF + +printf "Create IPAddressPool in kind-kind${number}\n" +for i in {1..10}; do +kubectl --context=kind-kind${number} apply -f metallb${number}.yaml && break +sleep 2 +done + +# connect the registry to the cluster network if not already connected +printf "Renaming context kind-kind${number} to ${name}\n" +for i in {1..100}; do + (kubectl config get-contexts -oname | grep ${name}) && break + kubectl config rename-context kind-kind${number} ${name} && break + printf " $i"/100 + sleep 2 + [ $i -lt 100 ] || exit 1 +done + +# Document the local registry +# https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry +cat </dev/null; then + myip=$(hostname -I | awk '{ print $1 }') +else + myip=$(ipconfig getifaddr en0) +fi + +# Function to determine the next available cluster number +get_next_cluster_number() { + if ! kind get clusters 2>&1 | grep "^kind" > /dev/null; then + echo 1 + else + highest_num=$(kind get clusters | grep "^kind" | tail -1 | cut -c 5-) + echo $((highest_num + 1)) + fi +} + +if [ -f /.dockerenv ]; then +myip=$HOST_IP +container=$(docker inspect $(docker ps -q) | jq -r ".[] | select(.Config.Hostname == \"$HOSTNAME\") | .Name" | cut -d/ -f2) +docker network connect "kind" $container || true +number=$(get_next_cluster_number) +twodigits=$(printf "%02d\n" $number) +fi + +reg_name='kind-registry' +reg_port='5000' +docker start "${reg_name}" 2>/dev/null || \ +docker run -d --restart=always -p "0.0.0.0:${reg_port}:5000" --name "${reg_name}" registry:2 + +cache_port='5000' +cat > registries < ${HOME}/.${cache_name}-config.yml </dev/null || \ +docker run -d --restart=always ${DEPLOY_EXTRA_PARAMS} -v ${HOME}/.${cache_name}-config.yml:/etc/docker/registry/config.yml --name "${cache_name}" registry:2 +done +mkdir -p /tmp/oidc + +cat <<'EOF' >/tmp/oidc/sa-signer-pkcs8.pub +-----BEGIN PUBLIC KEY----- +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA53YiBcrn7+ZK0Vb4odeA +1riYdvEb8To4H6/HtF+OKzuCIXFQ+bRy7yMrDGITYpfYPrTZOgfdeTLZqOiAj+cL +395nvxdly83SUrdh7ItfOPRluuuiPHnFn111wpyjBw5nut4Kx+M5MksNfA1hU0Zw +zIM9OviX8iEF8xHWUtz4BAMDG8N6+zpLo0pAzaei5hKuLZ9dZOzHBC8VOW82cQMm +5X5uOKsCHMtNSjqYUNB1DxN6xxM+odGWT/6xthPGk6YCxmO28YHPFZfiS2eAIpD8 +2p/16KQKU6TkZSrldkYxiHIPhu+5f9faZJG7dB9pLN1SfdTBio4PK5Mz9muLUCv9 +ywIDAQAB +-----END PUBLIC KEY----- +EOF + +cat <<'EOF' >/tmp/oidc/sa-signer.key +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA53YiBcrn7+ZK0Vb4odeA1riYdvEb8To4H6/HtF+OKzuCIXFQ ++bRy7yMrDGITYpfYPrTZOgfdeTLZqOiAj+cL395nvxdly83SUrdh7ItfOPRluuui +PHnFn111wpyjBw5nut4Kx+M5MksNfA1hU0ZwzIM9OviX8iEF8xHWUtz4BAMDG8N6 ++zpLo0pAzaei5hKuLZ9dZOzHBC8VOW82cQMm5X5uOKsCHMtNSjqYUNB1DxN6xxM+ +odGWT/6xthPGk6YCxmO28YHPFZfiS2eAIpD82p/16KQKU6TkZSrldkYxiHIPhu+5 +f9faZJG7dB9pLN1SfdTBio4PK5Mz9muLUCv9ywIDAQABAoIBAB8tro+RMYUDRHjG +el9ypAxIeWEsQVNRQFYkW4ZUiNYSAgl3Ni0svX6xAg989peFVL+9pLVIcfDthJxY +FVlNCjBxyQ/YmwHFC9vQkARJEd6eLUXsj8INtS0ubbp1VxCQRDDL0C/0z7OSoJJh +SwboqjEiTJExA2a+RArmEDTBRzdi3t+kT8G23JcqOivrITt17K6bQYyJXw7/vUdc +r/R+hfd5TqVq92VddzDT7RNJAxsbPPXjGnESlq1GALBDs+uBGYsP0fiEJb2nicSv +z9fBnBeERhut1gcE0C0iLRQZb+3r8TitBtxrZv+0BHgXrkKtXDwWTqGEKOwC4dBn +7nxkH2ECgYEA6+/DOTABGYOWOQftFkJMjcugzDrjoGpuXuVOTb65T+3FHAzU93zy +3bt3wQxrlugluyy9Sc/PL3ck2LgUsPHZ+s7zsdGvvGALBD6bOSSKATz9JgjwifO8 +PgqUz1kXRwez2CtKLOOCFFtcIzEdWIzsa1ubNqLzgN7rD+XBkUc2uEcCgYEA+yTy +72EDMQVoIZOygytHsDNdy0iS2RsBbdurT27wkYuFpFUVWdbNSL+8haE+wJHseHcw +BD4WIMpU+hnS4p4OO8+6V7PiXOS5E/se91EJigZAoixgDUiC8ihojWgK9PYEavUo +hULWbayO59SxYWeUI4Ze0GP8Jw8vdB86ib4ulF0CgYEAgyzRuLjk05+iZODwQyDn +WSquov3W0rh51s7cw0LX2wWSQm8r9NGGYhs5kJ5sLwGxAKj2MNSWF4jBdrCZ6Gr+ +y4BGY0X209/+IAUC3jlfdSLIiF4OBlT6AvB1HfclhvtUVUp0OhLfnpvQ1UwYScRI +KcRLvovIoIzP2g3emfwjAz8CgYEAxUHhOhm1mwRHJNBQTuxok0HVMrze8n1eov39 +0RcvBvJSVp+pdHXdqX1HwqHCmxhCZuAeq8ZkNP8WvZYY6HwCbAIdt5MHgbT4lXQR +f2l8F5gPnhFCpExG5ZLNg/urV3oAQE4stHap21zEpdyOMhZb6Yc5424U+EzaFdgN +b3EcPtUCgYAkKvUlSnBbgiJz1iaN6fuTqH0efavuFGMhjNmG7GtpNXdgyl1OWIuc +Yu+tZtHXtKYf3B99GwPrFzw/7yfDwae5YeWmi2/pFTH96wv3brJBqkAWY8G5Rsmd +qF50p34vIFqUBniNRwSArx8t2dq/CuAMgLAtSjh70Q6ZAnCF85PD8Q== +-----END RSA PRIVATE KEY----- +EOF + +echo Contents of kind${number}.yaml +cat << EOF | tee kind${number}.yaml +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: +- role: control-plane + image: ${kindest_node} + extraPortMappings: + - containerPort: 6443 + hostPort: 70${twodigits} + extraMounts: + - containerPath: /etc/kubernetes/oidc + hostPath: /tmp/oidc + labels: + ingress-ready: true + topology.kubernetes.io/region: ${region} + topology.kubernetes.io/zone: ${zone} +networking: + disableDefaultCNI: true + serviceSubnet: "10.$(echo $twodigits | sed 's/^0*//').0.0/16" + podSubnet: "10.1${twodigits}.0.0/16" +kubeadmConfigPatches: +- | + kind: ClusterConfiguration + apiServer: + extraArgs: + service-account-key-file: /etc/kubernetes/pki/sa.pub + service-account-key-file: /etc/kubernetes/oidc/sa-signer-pkcs8.pub + service-account-signing-key-file: /etc/kubernetes/oidc/sa-signer.key + service-account-issuer: https://solo-workshop-oidc.s3.us-east-1.amazonaws.com + api-audiences: sts.amazonaws.com + extraVolumes: + - name: oidc + hostPath: /etc/kubernetes/oidc + mountPath: /etc/kubernetes/oidc + readOnly: true + metadata: + name: config +containerdConfigPatches: +- |- + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:${reg_port}"] + endpoint = ["http://${reg_name}:${reg_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] + endpoint = ["http://docker:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-docker.pkg.dev"] + endpoint = ["http://us-docker:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-central1-docker.pkg.dev"] + endpoint = ["http://us-central1-docker:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."quay.io"] + endpoint = ["http://quay:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."gcr.io"] + endpoint = ["http://gcr:${cache_port}"] +EOF +echo ----------------------------------------------------- + +kind create cluster --name kind${number} --config kind${number}.yaml +ipkind=$(docker inspect kind${number}-control-plane | jq -r '.[0].NetworkSettings.Networks[].IPAddress') +networkkind=$(echo ${ipkind} | awk -F. '{ print $1"."$2 }') +kubectl config set-cluster kind-kind${number} --server=https://${myip}:70${twodigits} --insecure-skip-tls-verify=true + +# Preload images +cat << EOF >> images.txt +quay.io/metallb/controller:v0.13.12 +quay.io/metallb/speaker:v0.13.12 +EOF +cat images.txt | while read image; do + docker pull $image || true + kind load docker-image $image --name kind${number} || true +done + +docker network connect "kind" "${reg_name}" || true +docker network connect "kind" docker || true +docker network connect "kind" us-docker || true +docker network connect "kind" us-central1-docker || true +docker network connect "kind" quay || true +docker network connect "kind" gcr || true +# Calico for ipv4 +curl -sL https://raw.githubusercontent.com/projectcalico/calico/v3.28.1/manifests/calico.yaml | sed 's/250m/50m/g' | kubectl --context kind-kind${number} apply -f - + +for i in 1 2 3 4 5; do kubectl --context=kind-kind${number} apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml && break || sleep 15; done +kubectl --context=kind-kind${number} create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)" +kubectl --context=kind-kind${number} -n metallb-system rollout status deploy controller || true + +cat << EOF | tee metallb${number}.yaml +apiVersion: metallb.io/v1beta1 +kind: IPAddressPool +metadata: + name: first-pool + namespace: metallb-system +spec: + addresses: + - ${networkkind}.1${twodigits}.1-${networkkind}.1${twodigits}.254 +--- +apiVersion: metallb.io/v1beta1 +kind: L2Advertisement +metadata: + name: empty + namespace: metallb-system +EOF + +printf "Create IPAddressPool in kind-kind${number}\n" +for i in {1..10}; do +kubectl --context=kind-kind${number} apply -f metallb${number}.yaml && break +sleep 2 +done + +# connect the registry to the cluster network if not already connected +printf "Renaming context kind-kind${number} to ${name}\n" +for i in {1..100}; do + (kubectl config get-contexts -oname | grep ${name}) && break + kubectl config rename-context kind-kind${number} ${name} && break + printf " $i"/100 + sleep 2 + [ $i -lt 100 ] || exit 1 +done + +# Document the local registry +# https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry +cat </dev/null; then + myip=$(hostname -I | awk '{ print $1 }') +else + myip=$(ipconfig getifaddr en0) +fi + +# Function to determine the next available cluster number +get_next_cluster_number() { + if ! kind get clusters 2>&1 | grep "^kind" > /dev/null; then + echo 1 + else + highest_num=$(kind get clusters | grep "^kind" | tail -1 | cut -c 5-) + echo $((highest_num + 1)) + fi +} + +if [ -f /.dockerenv ]; then +myip=$HOST_IP +container=$(docker inspect $(docker ps -q) | jq -r ".[] | select(.Config.Hostname == \"$HOSTNAME\") | .Name" | cut -d/ -f2) +docker network connect "kind" $container || true +number=$(get_next_cluster_number) +twodigits=$(printf "%02d\n" $number) +fi + +reg_name='kind-registry' +reg_port='5000' +docker start "${reg_name}" 2>/dev/null || \ +docker run -d --restart=always -p "0.0.0.0:${reg_port}:5000" --name "${reg_name}" registry:2 + +cache_port='5000' +cat > registries < ${HOME}/.${cache_name}-config.yml </dev/null || \ +docker run -d --restart=always ${DEPLOY_EXTRA_PARAMS} -v ${HOME}/.${cache_name}-config.yml:/etc/docker/registry/config.yml --name "${cache_name}" registry:2 +done +mkdir -p /tmp/oidc + +cat <<'EOF' >/tmp/oidc/sa-signer-pkcs8.pub +-----BEGIN PUBLIC KEY----- +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA53YiBcrn7+ZK0Vb4odeA +1riYdvEb8To4H6/HtF+OKzuCIXFQ+bRy7yMrDGITYpfYPrTZOgfdeTLZqOiAj+cL +395nvxdly83SUrdh7ItfOPRluuuiPHnFn111wpyjBw5nut4Kx+M5MksNfA1hU0Zw +zIM9OviX8iEF8xHWUtz4BAMDG8N6+zpLo0pAzaei5hKuLZ9dZOzHBC8VOW82cQMm +5X5uOKsCHMtNSjqYUNB1DxN6xxM+odGWT/6xthPGk6YCxmO28YHPFZfiS2eAIpD8 +2p/16KQKU6TkZSrldkYxiHIPhu+5f9faZJG7dB9pLN1SfdTBio4PK5Mz9muLUCv9 +ywIDAQAB +-----END PUBLIC KEY----- +EOF + +cat <<'EOF' >/tmp/oidc/sa-signer.key +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA53YiBcrn7+ZK0Vb4odeA1riYdvEb8To4H6/HtF+OKzuCIXFQ ++bRy7yMrDGITYpfYPrTZOgfdeTLZqOiAj+cL395nvxdly83SUrdh7ItfOPRluuui +PHnFn111wpyjBw5nut4Kx+M5MksNfA1hU0ZwzIM9OviX8iEF8xHWUtz4BAMDG8N6 ++zpLo0pAzaei5hKuLZ9dZOzHBC8VOW82cQMm5X5uOKsCHMtNSjqYUNB1DxN6xxM+ +odGWT/6xthPGk6YCxmO28YHPFZfiS2eAIpD82p/16KQKU6TkZSrldkYxiHIPhu+5 +f9faZJG7dB9pLN1SfdTBio4PK5Mz9muLUCv9ywIDAQABAoIBAB8tro+RMYUDRHjG +el9ypAxIeWEsQVNRQFYkW4ZUiNYSAgl3Ni0svX6xAg989peFVL+9pLVIcfDthJxY +FVlNCjBxyQ/YmwHFC9vQkARJEd6eLUXsj8INtS0ubbp1VxCQRDDL0C/0z7OSoJJh +SwboqjEiTJExA2a+RArmEDTBRzdi3t+kT8G23JcqOivrITt17K6bQYyJXw7/vUdc +r/R+hfd5TqVq92VddzDT7RNJAxsbPPXjGnESlq1GALBDs+uBGYsP0fiEJb2nicSv +z9fBnBeERhut1gcE0C0iLRQZb+3r8TitBtxrZv+0BHgXrkKtXDwWTqGEKOwC4dBn +7nxkH2ECgYEA6+/DOTABGYOWOQftFkJMjcugzDrjoGpuXuVOTb65T+3FHAzU93zy +3bt3wQxrlugluyy9Sc/PL3ck2LgUsPHZ+s7zsdGvvGALBD6bOSSKATz9JgjwifO8 +PgqUz1kXRwez2CtKLOOCFFtcIzEdWIzsa1ubNqLzgN7rD+XBkUc2uEcCgYEA+yTy +72EDMQVoIZOygytHsDNdy0iS2RsBbdurT27wkYuFpFUVWdbNSL+8haE+wJHseHcw +BD4WIMpU+hnS4p4OO8+6V7PiXOS5E/se91EJigZAoixgDUiC8ihojWgK9PYEavUo +hULWbayO59SxYWeUI4Ze0GP8Jw8vdB86ib4ulF0CgYEAgyzRuLjk05+iZODwQyDn +WSquov3W0rh51s7cw0LX2wWSQm8r9NGGYhs5kJ5sLwGxAKj2MNSWF4jBdrCZ6Gr+ +y4BGY0X209/+IAUC3jlfdSLIiF4OBlT6AvB1HfclhvtUVUp0OhLfnpvQ1UwYScRI +KcRLvovIoIzP2g3emfwjAz8CgYEAxUHhOhm1mwRHJNBQTuxok0HVMrze8n1eov39 +0RcvBvJSVp+pdHXdqX1HwqHCmxhCZuAeq8ZkNP8WvZYY6HwCbAIdt5MHgbT4lXQR +f2l8F5gPnhFCpExG5ZLNg/urV3oAQE4stHap21zEpdyOMhZb6Yc5424U+EzaFdgN +b3EcPtUCgYAkKvUlSnBbgiJz1iaN6fuTqH0efavuFGMhjNmG7GtpNXdgyl1OWIuc +Yu+tZtHXtKYf3B99GwPrFzw/7yfDwae5YeWmi2/pFTH96wv3brJBqkAWY8G5Rsmd +qF50p34vIFqUBniNRwSArx8t2dq/CuAMgLAtSjh70Q6ZAnCF85PD8Q== +-----END RSA PRIVATE KEY----- +EOF + +echo Contents of kind${number}.yaml +cat << EOF | tee kind${number}.yaml +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: +- role: control-plane + image: ${kindest_node} + extraPortMappings: + - containerPort: 6443 + hostPort: 70${twodigits} + extraMounts: + - containerPath: /etc/kubernetes/oidc + hostPath: /tmp/oidc + labels: + ingress-ready: true + topology.kubernetes.io/region: ${region} + topology.kubernetes.io/zone: ${zone} +networking: + disableDefaultCNI: true + serviceSubnet: "10.$(echo $twodigits | sed 's/^0*//').0.0/16" + podSubnet: "10.1${twodigits}.0.0/16" +kubeadmConfigPatches: +- | + kind: ClusterConfiguration + apiServer: + extraArgs: + service-account-key-file: /etc/kubernetes/pki/sa.pub + service-account-key-file: /etc/kubernetes/oidc/sa-signer-pkcs8.pub + service-account-signing-key-file: /etc/kubernetes/oidc/sa-signer.key + service-account-issuer: https://solo-workshop-oidc.s3.us-east-1.amazonaws.com + api-audiences: sts.amazonaws.com + extraVolumes: + - name: oidc + hostPath: /etc/kubernetes/oidc + mountPath: /etc/kubernetes/oidc + readOnly: true + metadata: + name: config +containerdConfigPatches: +- |- + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:${reg_port}"] + endpoint = ["http://${reg_name}:${reg_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] + endpoint = ["http://docker:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-docker.pkg.dev"] + endpoint = ["http://us-docker:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-central1-docker.pkg.dev"] + endpoint = ["http://us-central1-docker:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."quay.io"] + endpoint = ["http://quay:${cache_port}"] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."gcr.io"] + endpoint = ["http://gcr:${cache_port}"] +EOF +echo ----------------------------------------------------- + +kind create cluster --name kind${number} --config kind${number}.yaml +ipkind=$(docker inspect kind${number}-control-plane | jq -r '.[0].NetworkSettings.Networks[].IPAddress') +networkkind=$(echo ${ipkind} | awk -F. '{ print $1"."$2 }') +kubectl config set-cluster kind-kind${number} --server=https://${myip}:70${twodigits} --insecure-skip-tls-verify=true + +# Preload images +cat << EOF >> images.txt +quay.io/metallb/controller:v0.13.12 +quay.io/metallb/speaker:v0.13.12 +EOF +cat images.txt | while read image; do + docker pull $image || true + kind load docker-image $image --name kind${number} || true +done + +docker network connect "kind" "${reg_name}" || true +docker network connect "kind" docker || true +docker network connect "kind" us-docker || true +docker network connect "kind" us-central1-docker || true +docker network connect "kind" quay || true +docker network connect "kind" gcr || true +# Calico for ipv4 +curl -sL https://raw.githubusercontent.com/projectcalico/calico/v3.28.1/manifests/calico.yaml | sed 's/250m/50m/g' | kubectl --context kind-kind${number} apply -f - + +for i in 1 2 3 4 5; do kubectl --context=kind-kind${number} apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml && break || sleep 15; done +kubectl --context=kind-kind${number} create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)" +kubectl --context=kind-kind${number} -n metallb-system rollout status deploy controller || true + +cat << EOF | tee metallb${number}.yaml +apiVersion: metallb.io/v1beta1 +kind: IPAddressPool +metadata: + name: first-pool + namespace: metallb-system +spec: + addresses: + - ${networkkind}.1${twodigits}.1-${networkkind}.1${twodigits}.254 +--- +apiVersion: metallb.io/v1beta1 +kind: L2Advertisement +metadata: + name: empty + namespace: metallb-system +EOF + +printf "Create IPAddressPool in kind-kind${number}\n" +for i in {1..10}; do +kubectl --context=kind-kind${number} apply -f metallb${number}.yaml && break +sleep 2 +done + +# connect the registry to the cluster network if not already connected +printf "Renaming context kind-kind${number} to ${name}\n" +for i in {1..100}; do + (kubectl config get-contexts -oname | grep ${name}) && break + kubectl config rename-context kind-kind${number} ${name} && break + printf " $i"/100 + sleep 2 + [ $i -lt 100 ] || exit 1 +done + +# Document the local registry +# https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry +cat <