|
| 1 | +//go:build e2e |
| 2 | +// +build e2e |
| 3 | + |
| 4 | +/* |
| 5 | +Copyright 2021 The Kubernetes Authors. |
| 6 | +
|
| 7 | +Licensed under the Apache License, Version 2.0 (the "License"); |
| 8 | +you may not use this file except in compliance with the License. |
| 9 | +You may obtain a copy of the License at |
| 10 | +
|
| 11 | + http://www.apache.org/licenses/LICENSE-2.0 |
| 12 | +
|
| 13 | +Unless required by applicable law or agreed to in writing, software |
| 14 | +distributed under the License is distributed on an "AS IS" BASIS, |
| 15 | +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 16 | +See the License for the specific language governing permissions and |
| 17 | +limitations under the License. |
| 18 | +*/ |
| 19 | + |
| 20 | +package e2e |
| 21 | + |
| 22 | +import ( |
| 23 | + "fmt" |
| 24 | + |
| 25 | + . "github.com/onsi/ginkgo/v2" |
| 26 | + . "github.com/onsi/gomega" |
| 27 | + . "github.com/onsi/gomega/gstruct" |
| 28 | + |
| 29 | + "k8s.io/utils/ptr" |
| 30 | + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" |
| 31 | + capi_e2e "sigs.k8s.io/cluster-api/test/e2e" |
| 32 | + "sigs.k8s.io/cluster-api/test/framework" |
| 33 | + "sigs.k8s.io/cluster-api/util/patch" |
| 34 | + "sigs.k8s.io/controller-runtime/pkg/client" |
| 35 | +) |
| 36 | + |
| 37 | +const ( |
| 38 | + e2eOldLabelName = "Cluster.topology.controlPlane.oldLabel" |
| 39 | + e2eOldAnnotationName = "Cluster.topology.controlPlane.oldAnnotation" |
| 40 | + e2eNewAnnotationValue = "newAnnotationValue" |
| 41 | + kcpManagerName = "capi-kthreescontrolplane" |
| 42 | +) |
| 43 | + |
| 44 | +var ( |
| 45 | + clusterctlDownloadURL = "https://github.com/kubernetes-sigs/cluster-api/releases/download/v%s/clusterctl-{OS}-{ARCH}" |
| 46 | + providerCAPIPrefix = "cluster-api:v%s" |
| 47 | + providerKThreesPrefix = "k3s:v%s" |
| 48 | + providerDockerPrefix = "docker:v%s" |
| 49 | +) |
| 50 | + |
| 51 | +var _ = Describe("When testing clusterctl upgrades using ClusterClass (v0.2.0=>current) [ClusterClass]", func() { |
| 52 | + // Upgrade from v0.2.0 to current (current version is built from src). |
| 53 | + var ( |
| 54 | + specName = "clusterctl-upgrade" |
| 55 | + version = "0.2.0" |
| 56 | + k3sCapiUpgradedVersion string |
| 57 | + capiCoreVersion string |
| 58 | + capiCoreUpgradedVersion string |
| 59 | + ) |
| 60 | + BeforeEach(func() { |
| 61 | + Expect(e2eConfig.Variables).To(HaveKey(K3sCapiCurrentVersion)) |
| 62 | + Expect(e2eConfig.Variables).To(HaveKey(CapiCoreVersion)) |
| 63 | + |
| 64 | + // Will upgrade k3s CAPI from v0.2.0 to k3sCapiUpgradedVersion. |
| 65 | + k3sCapiUpgradedVersion = e2eConfig.GetVariable(K3sCapiCurrentVersion) |
| 66 | + |
| 67 | + // Will init other CAPI core/CAPD componenets with CapiCoreVersion, and then upgrade to CapiCoreUpgradedVersion. |
| 68 | + // For now, this two versions are equal. |
| 69 | + capiCoreVersion = e2eConfig.GetVariable(CapiCoreVersion) |
| 70 | + capiCoreUpgradedVersion = capiCoreVersion |
| 71 | + }) |
| 72 | + |
| 73 | + capi_e2e.ClusterctlUpgradeSpec(ctx, func() capi_e2e.ClusterctlUpgradeSpecInput { |
| 74 | + return capi_e2e.ClusterctlUpgradeSpecInput{ |
| 75 | + E2EConfig: e2eConfig, |
| 76 | + ClusterctlConfigPath: clusterctlConfigPath, |
| 77 | + BootstrapClusterProxy: bootstrapClusterProxy, |
| 78 | + ArtifactFolder: artifactFolder, |
| 79 | + SkipCleanup: skipCleanup, |
| 80 | + InfrastructureProvider: ptr.To("docker"), |
| 81 | + InitWithBinary: fmt.Sprintf(clusterctlDownloadURL, capiCoreVersion), |
| 82 | + InitWithCoreProvider: fmt.Sprintf(providerCAPIPrefix, capiCoreVersion), |
| 83 | + InitWithBootstrapProviders: []string{fmt.Sprintf(providerKThreesPrefix, version)}, |
| 84 | + InitWithControlPlaneProviders: []string{fmt.Sprintf(providerKThreesPrefix, version)}, |
| 85 | + InitWithInfrastructureProviders: []string{fmt.Sprintf(providerDockerPrefix, capiCoreVersion)}, |
| 86 | + InitWithProvidersContract: "v1beta1", |
| 87 | + // InitWithKubernetesVersion is for the management cluster, WorkloadKubernetesVersion is for the workload cluster. |
| 88 | + // Hardcoding the versions as later versions of k3s might not be compatible with the older versions of CAPI k3s. |
| 89 | + InitWithKubernetesVersion: "v1.30.0", |
| 90 | + WorkloadKubernetesVersion: "v1.30.2+k3s2", |
| 91 | + MgmtFlavor: "topology", |
| 92 | + WorkloadFlavor: "topology", |
| 93 | + UseKindForManagementCluster: true, |
| 94 | + // Configuration for the provider upgrades. |
| 95 | + Upgrades: []capi_e2e.ClusterctlUpgradeSpecInputUpgrade{ |
| 96 | + { |
| 97 | + // CAPI core or CAPD with compatible version. |
| 98 | + CoreProvider: fmt.Sprintf(providerCAPIPrefix, capiCoreUpgradedVersion), |
| 99 | + InfrastructureProviders: []string{fmt.Sprintf(providerDockerPrefix, capiCoreUpgradedVersion)}, |
| 100 | + // Upgrade to current k3s. |
| 101 | + BootstrapProviders: []string{fmt.Sprintf(providerKThreesPrefix, k3sCapiUpgradedVersion)}, |
| 102 | + ControlPlaneProviders: []string{fmt.Sprintf(providerKThreesPrefix, k3sCapiUpgradedVersion)}, |
| 103 | + }, |
| 104 | + }, |
| 105 | + // After the k3s CAPI upgrade, will test the inplace mutable fields |
| 106 | + // could be updated correctly. This is in complement to the |
| 107 | + // inplace_rollout_test to include the k3s CAPI upgrade scenario. |
| 108 | + // We are testing upgrading from v0.2.0 as we do not support SSA |
| 109 | + // before v0.2.0. |
| 110 | + PostUpgrade: func(managementClusterProxy framework.ClusterProxy, clusterNamespace, clusterName string) { |
| 111 | + clusterList := &clusterv1.ClusterList{} |
| 112 | + mgmtClient := managementClusterProxy.GetClient() |
| 113 | + |
| 114 | + if err := mgmtClient.List(ctx, clusterList, client.InNamespace(clusterNamespace)); err != nil { |
| 115 | + Expect(err).NotTo(HaveOccurred()) |
| 116 | + } |
| 117 | + Expect(clusterList.Items).To(HaveLen(1), fmt.Sprintf("Expected to have only one cluster in the namespace %s", clusterNamespace)) |
| 118 | + |
| 119 | + cluster := &clusterList.Items[0] |
| 120 | + |
| 121 | + Byf("Waiting the new controller to reconcile at least once, to set the managed fields with k3s kcpManagerName for all control plane machines.") |
| 122 | + Eventually(func(g Gomega) { |
| 123 | + controlPlaneMachineList := &clusterv1.MachineList{} |
| 124 | + g.Expect(mgmtClient.List(ctx, controlPlaneMachineList, client.InNamespace(clusterNamespace), client.MatchingLabels{ |
| 125 | + clusterv1.MachineControlPlaneLabel: "", |
| 126 | + clusterv1.ClusterNameLabel: cluster.Name, |
| 127 | + })).To(Succeed()) |
| 128 | + for _, m := range controlPlaneMachineList.Items { |
| 129 | + g.Expect(m.ObjectMeta.ManagedFields).To(ContainElement(MatchFields(IgnoreExtras, Fields{ |
| 130 | + "Manager": Equal(kcpManagerName), |
| 131 | + }))) |
| 132 | + } |
| 133 | + }, e2eConfig.GetIntervals(specName, "wait-control-plane")...).Should(Succeed()) |
| 134 | + |
| 135 | + Byf("Modifying the control plane label and annotations of Cluster %s", cluster.Name) |
| 136 | + topologyControlPlane := cluster.Spec.Topology.ControlPlane |
| 137 | + Expect(topologyControlPlane.Metadata.Labels).To(HaveKey(e2eOldLabelName)) |
| 138 | + Expect(topologyControlPlane.Metadata.Annotations).To(HaveKey(e2eOldAnnotationName)) |
| 139 | + |
| 140 | + patchHelper, err := patch.NewHelper(cluster, mgmtClient) |
| 141 | + Expect(err).ToNot(HaveOccurred()) |
| 142 | + |
| 143 | + // Remove old label, and set an old annotation with new value. |
| 144 | + delete(topologyControlPlane.Metadata.Labels, e2eOldLabelName) |
| 145 | + topologyControlPlane.Metadata.Annotations[e2eOldAnnotationName] = e2eNewAnnotationValue |
| 146 | + |
| 147 | + Expect(patchHelper.Patch(ctx, cluster)).To(Succeed()) |
| 148 | + |
| 149 | + Byf("Waiting for labels and annotations of all controlplane machines to be updated.") |
| 150 | + Eventually(func(g Gomega) { |
| 151 | + controlPlaneMachineList := &clusterv1.MachineList{} |
| 152 | + g.Expect(mgmtClient.List(ctx, controlPlaneMachineList, client.InNamespace(clusterNamespace), client.MatchingLabels{ |
| 153 | + clusterv1.MachineControlPlaneLabel: "", |
| 154 | + clusterv1.ClusterNameLabel: cluster.Name, |
| 155 | + })).To(Succeed()) |
| 156 | + for _, m := range controlPlaneMachineList.Items { |
| 157 | + g.Expect(m.ObjectMeta.Labels).NotTo(HaveKey(e2eOldLabelName)) |
| 158 | + g.Expect(m.ObjectMeta.Annotations).To(HaveKeyWithValue(e2eOldAnnotationName, e2eNewAnnotationValue)) |
| 159 | + } |
| 160 | + }, e2eConfig.GetIntervals(specName, "wait-control-plane")...).Should(Succeed()) |
| 161 | + }, |
| 162 | + } |
| 163 | + }) |
| 164 | +}) |
0 commit comments