diff --git a/pkg/liqoctl/test/network/setup/namespace.go b/pkg/liqoctl/test/network/setup/namespace.go index 406612286f..5ad703d5c5 100644 --- a/pkg/liqoctl/test/network/setup/namespace.go +++ b/pkg/liqoctl/test/network/setup/namespace.go @@ -74,7 +74,7 @@ func RemoveNamespace(ctx context.Context, cl *client.Client) error { if err := cl.Consumer.Delete(ctx, ns); err != nil { return err } - timeout, cancel := context.WithTimeout(ctx, 1*time.Minute) + timeout, cancel := context.WithTimeout(ctx, 10*time.Minute) defer cancel() if err := wait.PollUntilContextCancel(timeout, 1*time.Second, true, func(ctx context.Context) (done bool, err error) { if err := cl.Consumer.Get(ctx, ctrlclient.ObjectKeyFromObject(ns), ns); err != nil { diff --git a/test/e2e/cruise/apiserverinteraction/apiserver_interaction_test.go b/test/e2e/cruise/apiserverinteraction/apiserver_interaction_test.go index 5dea002990..5a58791b9d 100644 --- a/test/e2e/cruise/apiserverinteraction/apiserver_interaction_test.go +++ b/test/e2e/cruise/apiserverinteraction/apiserver_interaction_test.go @@ -97,6 +97,11 @@ var _ = Describe("Liqo E2E", func() { _, err := util.EnforceNamespace(ctx, testContext.Clusters[0].NativeClient, testContext.Clusters[0].Cluster, namespaceName) Expect(err).ToNot(HaveOccurred()) + By("Ensuring virtual nodes are ready") + Eventually(func() bool { + return util.CheckVirtualNodes(ctx, testContext.Clusters[0].NativeClient, testContext.ClustersNumber) + }, timeout, interval).Should(BeTrue()) + Expect(util.OffloadNamespace(testContext.Clusters[0].KubeconfigPath, namespaceName, "--pod-offloading-strategy", "Remote")).To(Succeed()) time.Sleep(2 * time.Second) diff --git a/test/e2e/pipeline/infra/kubeadm/pre-requirements.sh b/test/e2e/pipeline/infra/kubeadm/pre-requirements.sh index 1a815389e7..509fceb8c5 100755 --- a/test/e2e/pipeline/infra/kubeadm/pre-requirements.sh +++ b/test/e2e/pipeline/infra/kubeadm/pre-requirements.sh @@ -18,9 +18,9 @@ # POD_CIDR_OVERLAPPING -> the pod CIDR of the clusters is overlapping # CLUSTER_TEMPLATE_FILE -> the file where the cluster template is stored -set -e # Fail in case of error -set -o nounset # Fail if undefined variables are used -set -o pipefail # Fail if one of the piped commands fails +set -e # Fail in case of error +set -o nounset # Fail if undefined variables are used +set -o pipefail # Fail if one of the piped commands fails error() { local sourcefile=$1 @@ -43,3 +43,5 @@ install_kubectl "${OS}" "${ARCH}" "${K8S_VERSION}" install_helm "${OS}" "${ARCH}" install_clusterctl "${OS}" "${ARCH}" + +generate_kubeconfig diff --git a/test/e2e/pipeline/infra/kubeadm/setup.sh b/test/e2e/pipeline/infra/kubeadm/setup.sh index 7c84ad75f3..ae429d249d 100755 --- a/test/e2e/pipeline/infra/kubeadm/setup.sh +++ b/test/e2e/pipeline/infra/kubeadm/setup.sh @@ -19,14 +19,14 @@ # CLUSTER_TEMPLATE_FILE -> the file where the cluster template is stored # CNI -> the CNI plugin used -set -e # Fail in case of error -set -o nounset # Fail if undefined variables are used -set -o pipefail # Fail if one of the piped commands fails +set -e # Fail in case of error +set -o nounset # Fail if undefined variables are used +set -o pipefail # Fail if one of the piped commands fails error() { - local sourcefile=$1 - local lineno=$2 - echo "An error occurred at $sourcefile:$lineno." + local sourcefile=$1 + local lineno=$2 + echo "An error occurred at $sourcefile:$lineno." } trap 'error "${BASH_SOURCE}" "${LINENO}"' ERR @@ -38,7 +38,7 @@ WORKDIR=$(dirname "$FILEPATH") source "$WORKDIR/../../utils.sh" # shellcheck disable=SC1091 -# shellcheck source=../cni.sh +# shellcheck source=../cni.sh source "$WORKDIR/../cni.sh" export K8S_VERSION=${K8S_VERSION:-"1.29.7"} @@ -56,17 +56,17 @@ export POD_CIDR_OVERLAPPING=${POD_CIDR_OVERLAPPING:-"false"} TARGET_NAMESPACE="liqo-ci" -for i in $(seq 1 "${CLUSTER_NUMBER}"); -do +for i in $(seq 1 "${CLUSTER_NUMBER}"); do CAPI_CLUSTER_NAME=$(forge_clustername "${i}") - if [[ ${POD_CIDR_OVERLAPPING} != "true" ]]; then - # this should avoid the ipam to reserve a pod CIDR of another cluster as local external CIDR causing remapping - export POD_CIDR="10.$((i * 10)).0.0/16" - fi - echo "Creating cluster ${CAPI_CLUSTER_NAME}" + if [[ ${POD_CIDR_OVERLAPPING} != "true" ]]; then + # this should avoid the ipam to reserve a pod CIDR of another cluster as local external CIDR causing remapping + export POD_CIDR="10.$((i * 10)).0.0/16" + fi + echo "Creating cluster ${CAPI_CLUSTER_NAME}" POD_CIDR_ESC_1=$(echo $POD_CIDR | cut -d'/' -f1) POD_CIDR_ESC_2=$(echo $POD_CIDR | cut -d'/' -f2) POD_CIDR_ESC="${POD_CIDR_ESC_1}\/${POD_CIDR_ESC_2}" + clusterctl generate cluster "${CAPI_CLUSTER_NAME}" \ --kubernetes-version "$K8S_VERSION" \ --control-plane-machine-count 1 \ @@ -75,19 +75,18 @@ do --infrastructure kubevirt | sed "s/10.243.0.0\/16/$POD_CIDR_ESC/g" | ${KUBECTL} apply -f - done -for i in $(seq 1 "${CLUSTER_NUMBER}"); -do +for i in $(seq 1 "${CLUSTER_NUMBER}"); do CAPI_CLUSTER_NAME=$(forge_clustername "${i}") if [[ ${POD_CIDR_OVERLAPPING} != "true" ]]; then - # this should avoid the ipam to reserve a pod CIDR of another cluster as local external CIDR causing remapping - export POD_CIDR="10.$((i * 10)).0.0/16" - fi + # this should avoid the ipam to reserve a pod CIDR of another cluster as local external CIDR causing remapping + export POD_CIDR="10.$((i * 10)).0.0/16" + fi echo "Waiting for cluster ${CAPI_CLUSTER_NAME} to be ready" "${KUBECTL}" wait --for condition=Ready=true -n "$TARGET_NAMESPACE" "clusters.cluster.x-k8s.io/${CAPI_CLUSTER_NAME}" --timeout=-1s echo "Getting kubeconfig for cluster ${CAPI_CLUSTER_NAME}" mkdir -p "${TMPDIR}/kubeconfigs" - clusterctl get kubeconfig -n "$TARGET_NAMESPACE" "${CAPI_CLUSTER_NAME}" > "${TMPDIR}/kubeconfigs/liqo_kubeconf_${i}" + clusterctl get kubeconfig -n "$TARGET_NAMESPACE" "${CAPI_CLUSTER_NAME}" >"${TMPDIR}/kubeconfigs/liqo_kubeconf_${i}" CURRENT_CONTEXT=$("${KUBECTL}" config current-context --kubeconfig "${TMPDIR}/kubeconfigs/liqo_kubeconf_${i}") "${KUBECTL}" config set contexts."${CURRENT_CONTEXT}".namespace default --kubeconfig "${TMPDIR}/kubeconfigs/liqo_kubeconf_${i}" @@ -102,8 +101,7 @@ do install_metrics_server "${TMPDIR}/kubeconfigs/liqo_kubeconf_${i}" done -for i in $(seq 1 "${CLUSTER_NUMBER}"); -do +for i in $(seq 1 "${CLUSTER_NUMBER}"); do echo "Waiting for cluster ${CAPI_CLUSTER_NAME} CNI to be ready" "wait_${CNI}" "${TMPDIR}/kubeconfigs/liqo_kubeconf_${i}" done diff --git a/test/e2e/pipeline/utils.sh b/test/e2e/pipeline/utils.sh index d86043b449..30d6b4baa2 100755 --- a/test/e2e/pipeline/utils.sh +++ b/test/e2e/pipeline/utils.sh @@ -36,29 +36,34 @@ waitandretry() { fi } -function setup_arch_and_os(){ +function setup_arch_and_os() { ARCH=$(uname -m) case $ARCH in - armv5*) ARCH="armv5";; - armv6*) ARCH="armv6";; - armv7*) ARCH="arm";; - aarch64) ARCH="arm64";; - x86) ARCH="386";; - x86_64) ARCH="amd64";; - i686) ARCH="386";; - i386) ARCH="386";; - *) echo "Error architecture '${ARCH}' unknown"; exit 1 ;; + armv5*) ARCH="armv5" ;; + armv6*) ARCH="armv6" ;; + armv7*) ARCH="arm" ;; + aarch64) ARCH="arm64" ;; + x86) ARCH="386" ;; + x86_64) ARCH="amd64" ;; + i686) ARCH="386" ;; + i386) ARCH="386" ;; + *) + echo "Error architecture '${ARCH}' unknown" + exit 1 + ;; esac - OS=$(uname |tr '[:upper:]' '[:lower:]') + OS=$(uname | tr '[:upper:]' '[:lower:]') case "$OS" in # Minimalist GNU for Windows - "mingw"*) OS='windows'; return ;; + "mingw"*) + OS='windows' + return + ;; esac } - -function check_supported_arch_and_os(){ +function check_supported_arch_and_os() { local supported=$1 local os=$2 local arch=$3 @@ -89,8 +94,7 @@ function install_kubectl() { version=$(curl -L -s https://dl.k8s.io/release/stable.txt) fi - if ! command -v "${KUBECTL}" &> /dev/null - then + if ! command -v "${KUBECTL}" &>/dev/null; then echo "WARNING: kubectl could not be found. Downloading and installing it locally..." echo "Downloading https://dl.k8s.io/release/${version}/bin/${os}/${arch}/kubectl" if ! curl --fail -Lo "${KUBECTL}" "https://dl.k8s.io/release/${version}/bin/${os}/${arch}/kubectl"; then @@ -114,8 +118,7 @@ function install_helm() { HELM_VERSION="v3.15.3" - if ! command -v "${HELM}" &> /dev/null - then + if ! command -v "${HELM}" &>/dev/null; then echo "WARNING: helm could not be found. Downloading and installing it locally..." if ! curl --fail -Lo "./helm-${HELM_VERSION}-${os}-${arch}.tar.gz" "https://get.helm.sh/helm-${HELM_VERSION}-${os}-${arch}.tar.gz"; then echo "Error: Unable to download helm for '${os}-${arch}'" @@ -159,7 +162,7 @@ function install_gcloud() { cd - #Login to gcloud - echo "${GCLOUD_KEY}" | base64 -d > "${BINDIR}/gke_key_file.json" + echo "${GCLOUD_KEY}" | base64 -d >"${BINDIR}/gke_key_file.json" "${GCLOUD}" auth activate-service-account --key-file="${BINDIR}/gke_key_file.json" "${GCLOUD}" components install gke-gcloud-auth-plugin } @@ -167,19 +170,16 @@ function install_gcloud() { function install_az() { local os=$1 - if ! command -v az &> /dev/null - then - echo "Azure CLI could not be found. Downloading and installing..." - if [[ "${os}" == "linux" ]] - then - curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash - elif [[ "${os}" == "darwin" ]] - then - brew update && brew install azure-cli - else - echo "Error: Azure CLI is not supported on ${os}" - exit 1 - fi + if ! command -v az &>/dev/null; then + echo "Azure CLI could not be found. Downloading and installing..." + if [[ "${os}" == "linux" ]]; then + curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash + elif [[ "${os}" == "darwin" ]]; then + brew update && brew install azure-cli + else + echo "Error: Azure CLI is not supported on ${os}" + exit 1 + fi fi echo "Azure CLI version:" @@ -206,8 +206,7 @@ function wait_kyverno() { local kubeconfig=$1 # Wait for the kyverno deployments to be ready - if ! waitandretry 5s 2 "${KUBECTL} rollout status deployment -n kyverno --kubeconfig ${kubeconfig}" - then + if ! waitandretry 5s 2 "${KUBECTL} rollout status deployment -n kyverno --kubeconfig ${kubeconfig}"; then echo "Failed to wait for kyverno deployments to be ready" exit 1 fi @@ -221,3 +220,41 @@ function install_clusterctl() { sudo install -o root -g root -m 0755 clusterctl /usr/local/bin/clusterctl clusterctl version } + +function generate_kubeconfig() { + # Ensure .kube directory exists + mkdir -p "${HOME}/.kube" + + # Get service account details + SA_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) + SA_CA_CERT=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt + NAMESPACE=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace) + + echo "SA_TOKEN: ${SA_TOKEN}" + echo "SA_CA_CERT: ${SA_CA_CERT}" + echo "NAMESPACE: ${NAMESPACE}" + + # Get the Kubernetes API server address + KUBERNETES_SERVICE_HOST=${KUBERNETES_SERVICE_HOST:-kubernetes.default.svc} + KUBERNETES_SERVICE_PORT=${KUBERNETES_SERVICE_PORT:-443} + + # Create kubeconfig + "${KUBECTL}" config set-cluster default-cluster \ + --server=https://"${KUBERNETES_SERVICE_HOST}":"${KUBERNETES_SERVICE_PORT}" \ + --certificate-authority="${SA_CA_CERT}" \ + --embed-certs=true \ + --kubeconfig="${HOME}/.kube/config" + + "${KUBECTL}" config set-credentials default-user \ + --token="${SA_TOKEN}" \ + --kubeconfig="${HOME}/.kube/config" + + "${KUBECTL}" config set-context default-context \ + --cluster=default-cluster \ + --user=default-user \ + --namespace="${NAMESPACE}" \ + --kubeconfig="${HOME}/.kube/config" + + "${KUBECTL}" config use-context default-context \ + --kubeconfig="${HOME}/.kube/config" +} diff --git a/test/e2e/testutils/apiserver/create.go b/test/e2e/testutils/apiserver/create.go index 9f0748de67..5561c9a643 100644 --- a/test/e2e/testutils/apiserver/create.go +++ b/test/e2e/testutils/apiserver/create.go @@ -19,6 +19,7 @@ import ( "context" "fmt" "os" + "strings" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" @@ -41,7 +42,7 @@ const ( ) var ( - image = "bitnami/kubectl" + image = "alpine/kubectl" ) func init() { @@ -61,7 +62,7 @@ func CreateKubectlJob(ctx context.Context, cl client.Client, namespace string, v Spec: corev1.PodSpec{ Containers: []corev1.Container{{ Name: containerName, - Image: fmt.Sprintf("%s:%s.%s", image, v.Major, v.Minor), + Image: fmt.Sprintf("%s:%s.%s.%s", image, v.Major, v.Minor, patchVersion(v)), Args: []string{"get", "pods", "-n", namespace, "--no-headers", "-o", "custom-columns=:.metadata.name"}, Resources: util.ResourceRequirements(), }}, @@ -113,3 +114,12 @@ func CreateRoleBinding(ctx context.Context, cl client.Client, namespace string) } return cl.Create(ctx, rb) } + +func patchVersion(v *version.Info) string { + tmp := strings.Split(v.GitVersion, ".") + if len(tmp) < 3 { + return "" + } + patch := tmp[2] + return strings.Split(patch, "+")[0] +}