33set -e
44
55source lib/common.sh
6+ source lib/capi.sh
67
78if [ -z " $1 " ] || [ -z " $2 " ]
89 then
@@ -17,6 +18,10 @@ HELM_VALUE_K8S_ADDONS="--set cni.calico.enabled=true"
1718IS_MANAGED_CLUSTER=" false"
1819SCRIPT_DIR=$( cd -- " $( dirname -- " ${BASH_SOURCE[0]} " ) " & > /dev/null && pwd )
1920
21+ YQ_ASSETS_DIR=" $ASSET_DIR /yq/$( ls $ASSET_DIR /yq | grep v) "
22+ YQ_PATH=" $YQ_ASSETS_DIR /yq_linux_amd64"
23+ chmod +x $YQ_PATH
24+
2025export KUBECONFIG=~ /.kube/config
2126
2227log_info " Creating TKS Admin Cluster via Cluster API"
8388 else
8489 clusterctl get kubeconfig $CLUSTER_NAME > output/kubeconfig_$CLUSTER_NAME
8590 chmod 600 output/kubeconfig_$CLUSTER_NAME
91+ export KUBECONFIG=output/kubeconfig_$CLUSTER_NAME
8692 helm upgrade -i k8s-addons $ASSET_DIR /taco-helm/kubernetes-addons $HELM_VALUE_K8S_ADDONS
8793 helm upgrade -i aws-ebs-csi-driver --namespace kube-system $ASSET_DIR /aws-ebs-csi-driver-helm/aws-ebs-csi-driver
8894 fi
9399 chmod 600 output/kubeconfig_$CLUSTER_NAME
94100 export KUBECONFIG=output/kubeconfig_$CLUSTER_NAME
95101 kubectl apply -f $ASSET_DIR /calico/calico.yaml
96- helm upgrade -i local-path-provisioner --namespace kube-system --set storageClass.name=taco-storage $ASSET_DIR /local-path-provisioner/deploy/chart/local-path-provisioner
102+ helm upgrade -i local-path-provisioner --namespace kube-system --set storageClass.name=taco-storage --set image.repository= ${BOOTSTRAP_CLUSTER_SERVER_IP} :5000/local-path-provisioner $ASSET_DIR /local-path-provisioner/deploy/chart/local-path-provisioner
97103 ;;
98104esac
99105
@@ -102,66 +108,48 @@ for node in $(kubectl get no -o jsonpath='{.items[*].metadata.name}');do
102108 kubectl wait --for=condition=Ready no/$node
103109done
104110echo " -----"
111+ case $TKS_ADMIN_CLUSTER_INFRA_PROVIDER in
112+ " byoh" )
113+ log_info " remove taint from the interim node in BYOH"
114+ for no in $( kubectl get no -o name) ; do
115+ BYOH_TMP_NODE=${no#*/ }
116+ kubectl taint nodes $no node-role.kubernetes.io/control-plane:NoSchedule- || true
117+ done
118+ ;;
119+ esac
120+
105121kubectl get no
106122log_info " Make sure all node status are ready"
107123
108- install_capi_to_admin_cluster () {
109- export KUBECONFIG=output/kubeconfig_$CLUSTER_NAME
110- log_info " Initializing cluster API provider components in TKS admin cluster"
111- for provider in ${CAPI_INFRA_PROVIDERS[@]} ; do
112- case $provider in
113- " aws" )
114- export AWS_REGION
115- export AWS_ACCESS_KEY_ID
116- export AWS_SECRET_ACCESS_KEY
117-
118- export AWS_B64ENCODED_CREDENTIALS=$( clusterawsadm bootstrap credentials encode-as-profile)
119- export EXP_MACHINE_POOL=true
120- export CAPA_EKS_IAM=true
121- export CAPA_EKS_ADD_ROLES=true
122-
123- CAPI_PROVIDER_NS=capa-system
124- ;;
125- " byoh" )
126- CAPI_PROVIDER_NS=byoh-system
127- ;;
128- esac
129- done
130-
131- case $TKS_ADMIN_CLUSTER_INFRA_PROVIDER in
132- " byoh" )
133- for no in $( kubectl get no -o name) ; do
134- BYOH_TMP_NODE=${no#*/ }
135- kubectl taint nodes $no node-role.kubernetes.io/control-plane:NoSchedule- || true
136- done
137- ;;
138- esac
139- gum spin --spinner dot --title " Waiting for providers to be installed..." -- clusterctl init --infrastructure $( printf -v joined ' %s,' " ${CAPI_INFRA_PROVIDERS[@]} " ; echo " ${joined% ,} " ) --wait-providers
140- }
141-
142- install_capi_to_admin_cluster
124+ log_info " Initializing cluster API provider components in TKS admin cluster"
125+ prepare_capi_providers admin ${BOOTSTRAP_CLUSTER_SERVER_IP} :5000
126+ install_capi_providers admin output/kubeconfig_$CLUSTER_NAME
143127
144128move_byoh_resources () {
145129 rc_kind=$1
130+ src_k8s_kubeconfig=$2
131+ dst_k8s_kubeconfig=$3
146132
147133 log_info " Move $rc_kind BYOH resources to the admin cluster"
148134
149- export KUBECONFIG=~ /.kube/config
150- for rc in $( kubectl get $rc_kind -o name) ; do
135+ for rc in $( kubectl get --kubeconfig $src_k8s_kubeconfig $rc_kind -o name) ; do
151136 rc_file=${rc#*/ }
152- kubectl get $rc -o yaml | egrep -v ' uid|resourceVersion|creationTimestamp|generation' > output/$rc_kind -" $rc_file " .yaml
153- kubectl apply --kubeconfig output/kubeconfig_ $CLUSTER_NAME -f output/$rc_kind -" $rc_file " .yaml
137+ kubectl get --kubeconfig $src_k8s_kubeconfig $rc -o yaml | egrep -v ' uid|resourceVersion|creationTimestamp|generation' > output/$rc_kind -" $rc_file " .yaml
138+ kubectl apply --kubeconfig $dst_k8s_kubeconfig -f output/$rc_kind -" $rc_file " .yaml
154139 done
155140}
156141
157142if [ $TKS_ADMIN_CLUSTER_INFRA_PROVIDER == " byoh" ]; then
158- gum spin --spinner dot --title " Deleting BYOH infra provider for a moment..." -- clusterctl delete --infrastructure byoh
143+ log_info " Deleting BYOH infra provider for a moment..."
144+ kubectl delete -f output/admin-byoh-infra.yaml
145+ # # XXX: check no pod in byoh-system
159146
160- move_byoh_resources byoh
161- move_byoh_resources k8sinstallerconfigtemplates
147+ move_byoh_resources byoh ~ /.kube/config output/kubeconfig_ $CLUSTER_NAME
148+ move_byoh_resources k8sinstallerconfigtemplates ~ /.kube/config output/kubeconfig_ $CLUSTER_NAME
162149
163- export KUBECONFIG=output/kubeconfig_$CLUSTER_NAME
164- gum spin --spinner dot --title " Reinstalling BYOH infra provider..." -- clusterctl init --infrastructure $( printf -v joined ' %s,' " ${CAPI_INFRA_PROVIDERS[@]} " ; echo " ${joined% ,} " ) --wait-providers
150+ kubectl apply -f output/admin-byoh-infra.yaml
151+ sleep 10
152+ ./util/wait_for_all_pods_in_ns.sh byoh-system
165153
166154 kubectl patch deploy -n byoh-system byoh-controller-manager --type=' json' -p=' [{"op": "replace", "path": "/spec/template/spec/containers/0/resources/limits/memory", "value":"1000Mi"}]'
167155 kubectl patch deploy -n byoh-system byoh-controller-manager --type=' json' -p=' [{"op": "replace", "path": "/spec/template/spec/containers/0/resources/limits/cpu", "value":"1000m"}]'
0 commit comments