|
| 1 | +#!/usr/bin/env bash |
| 2 | +set -xe |
| 3 | + |
| 4 | +rm -rf ~/.kube |
| 5 | + |
| 6 | +KIND_VERSION=${KIND_VERSION:-"v0.25.0"} |
| 7 | +# To properly upgrade Kind version check the releases in github https://github.com/kubernetes-sigs/kind/releases and use proper image based on Kind version |
| 8 | +KIND_NODE_IMAGE=${KIND_NODE_IMAGE:-"kindest/node:v1.26.15@sha256:c79602a44b4056d7e48dc20f7504350f1e87530fe953428b792def00bc1076dd"} |
| 9 | +COPY_DOCKER_LOGIN=${COPY_DOCKER_LOGIN:-"false"} |
| 10 | + |
| 11 | +KIND_CLUSTER_NAME="kind-cluster" |
| 12 | + |
| 13 | +# note that IPv6 is only supported on kind (i.e., minikube does not support it). Also we assume that when you set this flag |
| 14 | +# to true then you meet requirements (i.) net.ipv6.conf.all.disable_ipv6 = 0 (ii. you have installed CNI supporting IPv6) |
| 15 | +IP_FAMILY=${IP_FAMILY:-"ipv4"} |
| 16 | + |
| 17 | +ARCH=$1 |
| 18 | +if [ -z "$ARCH" ]; then |
| 19 | + ARCH="amd64" |
| 20 | +fi |
| 21 | + |
| 22 | +function install_kubectl { |
| 23 | + if [ "${TEST_KUBECTL_VERSION:-latest}" = "latest" ]; then |
| 24 | + TEST_KUBECTL_VERSION=$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt) |
| 25 | + fi |
| 26 | + curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/${TEST_KUBECTL_VERSION}/bin/linux/${ARCH}/kubectl && chmod +x kubectl |
| 27 | + sudo cp kubectl /usr/local/bin |
| 28 | +} |
| 29 | + |
| 30 | +function label_node { |
| 31 | + # It should work for all clusters |
| 32 | + for nodeName in $(kubectl get nodes -o custom-columns=:.metadata.name --no-headers); |
| 33 | + do |
| 34 | + echo ${nodeName}; |
| 35 | + kubectl label node ${nodeName} rack-key=zone; |
| 36 | + done |
| 37 | +} |
| 38 | + |
| 39 | +function install_kubernetes_provisioner { |
| 40 | + |
| 41 | + KIND_URL=https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-linux-${ARCH} |
| 42 | + |
| 43 | + curl -Lo kind ${KIND_URL} && chmod +x kind |
| 44 | + sudo cp kind /usr/local/bin |
| 45 | +} |
| 46 | + |
| 47 | +function create_cluster_role_binding_admin { |
| 48 | + kubectl create clusterrolebinding add-on-cluster-admin --clusterrole=cluster-admin --serviceaccount=kube-system:default |
| 49 | +} |
| 50 | + |
| 51 | +: ' |
| 52 | +@brief: Set up Kubernetes configuration directory and file. |
| 53 | +@note: Ensures $HOME/.kube directory and $HOME/.kube/config file exist. |
| 54 | +' |
| 55 | +function setup_kube_directory { |
| 56 | + mkdir $HOME/.kube || true |
| 57 | + touch $HOME/.kube/config |
| 58 | +} |
| 59 | + |
| 60 | +: ' |
| 61 | +@brief: Add Docker Hub credentials to Kubernetes node. |
| 62 | +@param $1: Container name/ID. |
| 63 | +@global: COPY_DOCKER_LOGIN - If "true", copies credentials. |
| 64 | +@note: Uses hosts $HOME/.docker/config.json. |
| 65 | +' |
| 66 | +function add_docker_hub_credentials_to_kubernetes { |
| 67 | + # Add Docker hub credentials to Minikube |
| 68 | + if [ "$COPY_DOCKER_LOGIN" = "true" ] |
| 69 | + then |
| 70 | + for node in $(kind get nodes --name "${KIND_CLUSTER_NAME}"); do |
| 71 | + if [[ "$node" != *"external-load-balancer"* ]]; |
| 72 | + then |
| 73 | + # the -oname format is kind/name (so node/name) we just want name |
| 74 | + node_name=${node#node/} |
| 75 | + # copy the config to where kubelet will look |
| 76 | + docker cp "$HOME/.docker/config.json" "${node_name}:/var/lib/kubelet/config.json" |
| 77 | + # restart kubelet to pick up the config |
| 78 | + docker exec "${node_name}" systemctl restart kubelet.service |
| 79 | + fi |
| 80 | + done |
| 81 | + fi |
| 82 | +} |
| 83 | + |
| 84 | +: ' |
| 85 | +@brief: Update Docker daemon configuration and restart service. |
| 86 | +@param $1: JSON string for Docker daemon configuration. |
| 87 | +@note: Requires sudo permissions. |
| 88 | +' |
| 89 | +function updateDockerDaemonConfiguration() { |
| 90 | + # We need to add such host to insecure-registry (as localhost is default) |
| 91 | + echo $1 | sudo tee /etc/docker/daemon.json |
| 92 | + # we need to restart docker service to propagate configuration |
| 93 | + sudo systemctl restart docker |
| 94 | +} |
| 95 | + |
| 96 | +: ' |
| 97 | +@brief: Increases the inotify user watches and user instances limits on a Linux system. |
| 98 | +@param: None. |
| 99 | +@global: None. |
| 100 | +@note: Inotify is a Linux subsystem used for file system event notifications. This function |
| 101 | + helps adjust the limits for applications or services that monitor a large number |
| 102 | + of files or directories. |
| 103 | + This is specifically needed for multi-node control plane cluster |
| 104 | + https://github.com/kubernetes-sigs/kind/issues/2744#issuecomment-1127808069 |
| 105 | +' |
| 106 | +function adjust_inotify_limits { |
| 107 | + # Increase the inotify user watches limit |
| 108 | + echo "Setting fs.inotify.max_user_watches to 655360..." |
| 109 | + echo fs.inotify.max_user_watches=655360 | sudo tee -a /etc/sysctl.conf |
| 110 | + |
| 111 | + # Increase the inotify user instances limit |
| 112 | + echo "Setting fs.inotify.max_user_instances to 1280..." |
| 113 | + echo fs.inotify.max_user_instances=1280 | sudo tee -a /etc/sysctl.conf |
| 114 | + |
| 115 | + # Reload the system configuration settings |
| 116 | + echo "Reloading sysctl settings..." |
| 117 | + sudo sysctl -p |
| 118 | + |
| 119 | + echo "Inotify limits adjusted successfully." |
| 120 | +} |
| 121 | + |
| 122 | +setup_kube_directory |
| 123 | +install_kubectl |
| 124 | +install_kubernetes_provisioner |
| 125 | +adjust_inotify_limits |
| 126 | + |
| 127 | +reg_name='kind-registry' |
| 128 | +reg_port='5001' |
| 129 | + |
| 130 | +if [[ "$IP_FAMILY" = "ipv4" || "$IP_FAMILY" = "dual" ]]; then |
| 131 | + hostname=$(hostname --ip-address | grep -oE '\b([0-9]{1,3}\.){3}[0-9]{1,3}\b' | awk '$1 != "127.0.0.1" { print $1 }' | head -1) |
| 132 | + |
| 133 | + # update insecure registries |
| 134 | + updateDockerDaemonConfiguration "{ \"insecure-registries\" : [\"${hostname}:${reg_port}\"] }" |
| 135 | + |
| 136 | + # Create kind cluster with containerd registry config dir enabled |
| 137 | + # TODO: kind will eventually enable this by default and this patch will |
| 138 | + # be unnecessary. |
| 139 | + # |
| 140 | + # See: |
| 141 | + # https://github.com/kubernetes-sigs/kind/issues/2875 |
| 142 | + # https://github.com/containerd/containerd/blob/main/docs/cri/config.md#registry-configuration |
| 143 | + # See: https://github.com/containerd/containerd/blob/main/docs/hosts.md |
| 144 | + cat <<EOF | kind create cluster --image "${KIND_NODE_IMAGE}" --config=- |
| 145 | + kind: Cluster |
| 146 | + apiVersion: kind.x-k8s.io/v1alpha4 |
| 147 | + nodes: |
| 148 | + - role: control-plane |
| 149 | + - role: control-plane |
| 150 | + - role: control-plane |
| 151 | + - role: worker |
| 152 | + - role: worker |
| 153 | + - role: worker |
| 154 | + name: $KIND_CLUSTER_NAME |
| 155 | + containerdConfigPatches: |
| 156 | + - |- |
| 157 | + [plugins."io.containerd.grpc.v1.cri".registry] |
| 158 | + config_path = "/etc/containerd/certs.d" |
| 159 | + networking: |
| 160 | + ipFamily: $IP_FAMILY |
| 161 | +EOF |
| 162 | + # run local container registry |
| 163 | + if [ "$(docker inspect -f '{{.State.Running}}' "${reg_name}" 2>/dev/null || true)" != 'true' ]; then |
| 164 | + docker run \ |
| 165 | + -d --restart=always -p "${hostname}:${reg_port}:5000" --name "${reg_name}" \ |
| 166 | + registry:2 |
| 167 | + fi |
| 168 | + |
| 169 | + # Add the registry config to the nodes |
| 170 | + # |
| 171 | + # This is necessary because localhost resolves to loopback addresses that are |
| 172 | + # network-namespace local. |
| 173 | + # In other words: localhost in the container is not localhost on the host. |
| 174 | + # |
| 175 | + # We want a consistent name that works from both ends, so we tell containerd to |
| 176 | + # alias localhost:${reg_port} to the registry container when pulling images |
| 177 | + # note: kind get nodes (default name `kind` and with specifying new name we have to use --name <cluster-name> |
| 178 | + # See https://kind.sigs.k8s.io/docs/user/local-registry/ |
| 179 | + REGISTRY_DIR="/etc/containerd/certs.d/${hostname}:${reg_port}" |
| 180 | + |
| 181 | + for node in $(kind get nodes --name "${KIND_CLUSTER_NAME}"); do |
| 182 | + echo "Executing command in node:${node}" |
| 183 | + docker exec "${node}" mkdir -p "${REGISTRY_DIR}" |
| 184 | + cat <<EOF | docker exec -i "${node}" cp /dev/stdin "${REGISTRY_DIR}/hosts.toml" |
| 185 | + [host."http://${reg_name}:5000"] |
| 186 | +EOF |
| 187 | + done |
| 188 | + |
| 189 | +elif [[ "$IP_FAMILY" = "ipv6" ]]; then |
| 190 | + # for ipv6 configuration |
| 191 | + ula_fixed_ipv6="fd01:2345:6789" |
| 192 | + registry_dns="myregistry.local" |
| 193 | + |
| 194 | + # manually assign an IPv6 address to eth0 interface |
| 195 | + sudo ip -6 addr add "${ula_fixed_ipv6}"::1/64 dev eth0 |
| 196 | + |
| 197 | + # use ULA (i.e., Unique Local Address), which offers a similar "private" scope as link-local |
| 198 | + # but without the interface dependency and some of the other challenges of link-local addresses. |
| 199 | + # (link-local starts as fe80::) but we will use ULA fd01 |
| 200 | + updateDockerDaemonConfiguration "{ |
| 201 | + \"insecure-registries\" : [\"[${ula_fixed_ipv6}::1]:${reg_port}\", \"${registry_dns}:${reg_port}\"], |
| 202 | + \"experimental\": true, |
| 203 | + \"ip6tables\": true, |
| 204 | + \"fixed-cidr-v6\": \"${ula_fixed_ipv6}::/80\" |
| 205 | + }" |
| 206 | + |
| 207 | + cat <<EOF | kind create cluster --image "${KIND_NODE_IMAGE}" --config=- |
| 208 | + kind: Cluster |
| 209 | + apiVersion: kind.x-k8s.io/v1alpha4 |
| 210 | + nodes: |
| 211 | + - role: control-plane |
| 212 | + - role: control-plane |
| 213 | + - role: control-plane |
| 214 | + - role: worker |
| 215 | + - role: worker |
| 216 | + - role: worker |
| 217 | + name: $KIND_CLUSTER_NAME |
| 218 | + containerdConfigPatches: |
| 219 | + - |- |
| 220 | + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."myregistry.local:5001"] |
| 221 | + endpoint = ["http://myregistry.local:5001"] |
| 222 | + networking: |
| 223 | + ipFamily: $IP_FAMILY |
| 224 | +EOF |
| 225 | + # run local container registry |
| 226 | + if [ "$(docker inspect -f '{{.State.Running}}' "${reg_name}" 2>/dev/null || true)" != 'true' ]; then |
| 227 | + docker run \ |
| 228 | + -d --restart=always -p "[${ula_fixed_ipv6}::1]:${reg_port}:5000" --name "${reg_name}" \ |
| 229 | + registry:2 |
| 230 | + fi |
| 231 | + # we need to also make a DNS record for docker tag because it seems that such version does not support []:: format |
| 232 | + echo "${ula_fixed_ipv6}::1 ${registry_dns}" >> /etc/hosts |
| 233 | + |
| 234 | + # note: kind get nodes (default name `kind` and with specifying new name we have to use --name <cluster-name> |
| 235 | + # See https://kind.sigs.k8s.io/docs/user/local-registry/ |
| 236 | + for node in $(kind get nodes --name "${KIND_CLUSTER_NAME}"); do |
| 237 | + echo "Executing command in node:${node}" |
| 238 | + cat <<EOF | docker exec -i "${node}" cp /dev/stdin "/etc/hosts" |
| 239 | +${ula_fixed_ipv6}::1 ${registry_dns} |
| 240 | +EOF |
| 241 | + done |
| 242 | +fi |
| 243 | + |
| 244 | +# Connect the registry to the cluster network if not already connected |
| 245 | +# This allows kind to bootstrap the network but ensures they're on the same network |
| 246 | +if [ "$(docker inspect -f='{{json .NetworkSettings.Networks.kind}}' "${reg_name}")" = 'null' ]; then |
| 247 | + docker network connect "kind" "${reg_name}" |
| 248 | +fi |
| 249 | + |
| 250 | +add_docker_hub_credentials_to_kubernetes |
| 251 | + |
| 252 | +create_cluster_role_binding_admin |
| 253 | +label_node |
0 commit comments