Skip to content

Commit f7d2499

Browse files
authored
Add clusterctl upgrade test (#138)
Signed-off-by: nasusoba <[email protected]> add clusterctl upgrade test
1 parent 71bc0af commit f7d2499

File tree

8 files changed

+228
-42
lines changed

8 files changed

+228
-42
lines changed

test/e2e/README.md

+2-6
Original file line numberDiff line numberDiff line change
@@ -14,12 +14,6 @@ To run a specific e2e test, such as `[PR-Blocking]`, use the `GINKGO_FOCUS` envi
1414
```shell
1515
make GINKGO_FOCUS="\\[PR-Blocking\\]" test-e2e # only run e2e test with `[PR-Blocking]` in its spec name
1616
```
17-
### Run the e2e test with tilt
18-
It is quite useful to run the e2e test with [tilt](https://cluster-api.sigs.k8s.io/developer/tilt), so that you will not need to rebuild docker image with `make docker-build-e2e` everytime. Also you will not need to wait a new cluster creation and setup. If you have set up your tilt cluster and made the current context points to this cluster, you could run:
19-
```shell
20-
# running e2e for the cluster pointed by the current context
21-
make USE_EXISTING_CLUSTER=true test-e2e
22-
```
2317
## Develop an e2e test
2418
You could refer to [Developing E2E tests](https://cluster-api.sigs.k8s.io/developer/e2e) for a complete guide for developing e2e tests.
2519

@@ -32,3 +26,5 @@ A guide for developing a k3s e2e test:
3226

3327
## Troubleshooting
3428
* [Cluster API with Docker - "too many open files".](https://cluster-api.sigs.k8s.io/user/troubleshooting.html?highlight=too%20many#cluster-api-with-docker----too-many-open-files)
29+
* invalid provider metadata
30+
* If you see the error `invalid provider metadata: version v1.8.99 for the provider capd-system/infrastructure-docker does not match any release series`, it might be that the artifact you are using is outdated. Please remove the `_artifacts` folder and try again.

test/e2e/clusterctl_upgrade_test.go

+164
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,164 @@
1+
//go:build e2e
2+
// +build e2e
3+
4+
/*
5+
Copyright 2021 The Kubernetes Authors.
6+
7+
Licensed under the Apache License, Version 2.0 (the "License");
8+
you may not use this file except in compliance with the License.
9+
You may obtain a copy of the License at
10+
11+
http://www.apache.org/licenses/LICENSE-2.0
12+
13+
Unless required by applicable law or agreed to in writing, software
14+
distributed under the License is distributed on an "AS IS" BASIS,
15+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16+
See the License for the specific language governing permissions and
17+
limitations under the License.
18+
*/
19+
20+
package e2e
21+
22+
import (
23+
"fmt"
24+
25+
. "github.com/onsi/ginkgo/v2"
26+
. "github.com/onsi/gomega"
27+
. "github.com/onsi/gomega/gstruct"
28+
29+
"k8s.io/utils/ptr"
30+
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
31+
capi_e2e "sigs.k8s.io/cluster-api/test/e2e"
32+
"sigs.k8s.io/cluster-api/test/framework"
33+
"sigs.k8s.io/cluster-api/util/patch"
34+
"sigs.k8s.io/controller-runtime/pkg/client"
35+
)
36+
37+
const (
38+
e2eOldLabelName = "Cluster.topology.controlPlane.oldLabel"
39+
e2eOldAnnotationName = "Cluster.topology.controlPlane.oldAnnotation"
40+
e2eNewAnnotationValue = "newAnnotationValue"
41+
kcpManagerName = "capi-kthreescontrolplane"
42+
)
43+
44+
var (
45+
clusterctlDownloadURL = "https://github.com/kubernetes-sigs/cluster-api/releases/download/v%s/clusterctl-{OS}-{ARCH}"
46+
providerCAPIPrefix = "cluster-api:v%s"
47+
providerKThreesPrefix = "k3s:v%s"
48+
providerDockerPrefix = "docker:v%s"
49+
)
50+
51+
var _ = Describe("When testing clusterctl upgrades using ClusterClass (v0.2.0=>current) [ClusterClass]", func() {
52+
// Upgrade from v0.2.0 to current (current version is built from src).
53+
var (
54+
specName = "clusterctl-upgrade"
55+
version = "0.2.0"
56+
k3sCapiUpgradedVersion string
57+
capiCoreVersion string
58+
capiCoreUpgradedVersion string
59+
)
60+
BeforeEach(func() {
61+
Expect(e2eConfig.Variables).To(HaveKey(K3sCapiCurrentVersion))
62+
Expect(e2eConfig.Variables).To(HaveKey(CapiCoreVersion))
63+
64+
// Will upgrade k3s CAPI from v0.2.0 to k3sCapiUpgradedVersion.
65+
k3sCapiUpgradedVersion = e2eConfig.GetVariable(K3sCapiCurrentVersion)
66+
67+
// Will init other CAPI core/CAPD componenets with CapiCoreVersion, and then upgrade to CapiCoreUpgradedVersion.
68+
// For now, this two versions are equal.
69+
capiCoreVersion = e2eConfig.GetVariable(CapiCoreVersion)
70+
capiCoreUpgradedVersion = capiCoreVersion
71+
})
72+
73+
capi_e2e.ClusterctlUpgradeSpec(ctx, func() capi_e2e.ClusterctlUpgradeSpecInput {
74+
return capi_e2e.ClusterctlUpgradeSpecInput{
75+
E2EConfig: e2eConfig,
76+
ClusterctlConfigPath: clusterctlConfigPath,
77+
BootstrapClusterProxy: bootstrapClusterProxy,
78+
ArtifactFolder: artifactFolder,
79+
SkipCleanup: skipCleanup,
80+
InfrastructureProvider: ptr.To("docker"),
81+
InitWithBinary: fmt.Sprintf(clusterctlDownloadURL, capiCoreVersion),
82+
InitWithCoreProvider: fmt.Sprintf(providerCAPIPrefix, capiCoreVersion),
83+
InitWithBootstrapProviders: []string{fmt.Sprintf(providerKThreesPrefix, version)},
84+
InitWithControlPlaneProviders: []string{fmt.Sprintf(providerKThreesPrefix, version)},
85+
InitWithInfrastructureProviders: []string{fmt.Sprintf(providerDockerPrefix, capiCoreVersion)},
86+
InitWithProvidersContract: "v1beta1",
87+
// InitWithKubernetesVersion is for the management cluster, WorkloadKubernetesVersion is for the workload cluster.
88+
// Hardcoding the versions as later versions of k3s might not be compatible with the older versions of CAPI k3s.
89+
InitWithKubernetesVersion: "v1.30.0",
90+
WorkloadKubernetesVersion: "v1.30.2+k3s2",
91+
MgmtFlavor: "topology",
92+
WorkloadFlavor: "topology",
93+
UseKindForManagementCluster: true,
94+
// Configuration for the provider upgrades.
95+
Upgrades: []capi_e2e.ClusterctlUpgradeSpecInputUpgrade{
96+
{
97+
// CAPI core or CAPD with compatible version.
98+
CoreProvider: fmt.Sprintf(providerCAPIPrefix, capiCoreUpgradedVersion),
99+
InfrastructureProviders: []string{fmt.Sprintf(providerDockerPrefix, capiCoreUpgradedVersion)},
100+
// Upgrade to current k3s.
101+
BootstrapProviders: []string{fmt.Sprintf(providerKThreesPrefix, k3sCapiUpgradedVersion)},
102+
ControlPlaneProviders: []string{fmt.Sprintf(providerKThreesPrefix, k3sCapiUpgradedVersion)},
103+
},
104+
},
105+
// After the k3s CAPI upgrade, will test the inplace mutable fields
106+
// could be updated correctly. This is in complement to the
107+
// inplace_rollout_test to include the k3s CAPI upgrade scenario.
108+
// We are testing upgrading from v0.2.0 as we do not support SSA
109+
// before v0.2.0.
110+
PostUpgrade: func(managementClusterProxy framework.ClusterProxy, clusterNamespace, clusterName string) {
111+
clusterList := &clusterv1.ClusterList{}
112+
mgmtClient := managementClusterProxy.GetClient()
113+
114+
if err := mgmtClient.List(ctx, clusterList, client.InNamespace(clusterNamespace)); err != nil {
115+
Expect(err).NotTo(HaveOccurred())
116+
}
117+
Expect(clusterList.Items).To(HaveLen(1), fmt.Sprintf("Expected to have only one cluster in the namespace %s", clusterNamespace))
118+
119+
cluster := &clusterList.Items[0]
120+
121+
Byf("Waiting the new controller to reconcile at least once, to set the managed fields with k3s kcpManagerName for all control plane machines.")
122+
Eventually(func(g Gomega) {
123+
controlPlaneMachineList := &clusterv1.MachineList{}
124+
g.Expect(mgmtClient.List(ctx, controlPlaneMachineList, client.InNamespace(clusterNamespace), client.MatchingLabels{
125+
clusterv1.MachineControlPlaneLabel: "",
126+
clusterv1.ClusterNameLabel: cluster.Name,
127+
})).To(Succeed())
128+
for _, m := range controlPlaneMachineList.Items {
129+
g.Expect(m.ObjectMeta.ManagedFields).To(ContainElement(MatchFields(IgnoreExtras, Fields{
130+
"Manager": Equal(kcpManagerName),
131+
})))
132+
}
133+
}, e2eConfig.GetIntervals(specName, "wait-control-plane")...).Should(Succeed())
134+
135+
Byf("Modifying the control plane label and annotations of Cluster %s", cluster.Name)
136+
topologyControlPlane := cluster.Spec.Topology.ControlPlane
137+
Expect(topologyControlPlane.Metadata.Labels).To(HaveKey(e2eOldLabelName))
138+
Expect(topologyControlPlane.Metadata.Annotations).To(HaveKey(e2eOldAnnotationName))
139+
140+
patchHelper, err := patch.NewHelper(cluster, mgmtClient)
141+
Expect(err).ToNot(HaveOccurred())
142+
143+
// Remove old label, and set an old annotation with new value.
144+
delete(topologyControlPlane.Metadata.Labels, e2eOldLabelName)
145+
topologyControlPlane.Metadata.Annotations[e2eOldAnnotationName] = e2eNewAnnotationValue
146+
147+
Expect(patchHelper.Patch(ctx, cluster)).To(Succeed())
148+
149+
Byf("Waiting for labels and annotations of all controlplane machines to be updated.")
150+
Eventually(func(g Gomega) {
151+
controlPlaneMachineList := &clusterv1.MachineList{}
152+
g.Expect(mgmtClient.List(ctx, controlPlaneMachineList, client.InNamespace(clusterNamespace), client.MatchingLabels{
153+
clusterv1.MachineControlPlaneLabel: "",
154+
clusterv1.ClusterNameLabel: cluster.Name,
155+
})).To(Succeed())
156+
for _, m := range controlPlaneMachineList.Items {
157+
g.Expect(m.ObjectMeta.Labels).NotTo(HaveKey(e2eOldLabelName))
158+
g.Expect(m.ObjectMeta.Annotations).To(HaveKeyWithValue(e2eOldAnnotationName, e2eNewAnnotationValue))
159+
}
160+
}, e2eConfig.GetIntervals(specName, "wait-control-plane")...).Should(Succeed())
161+
},
162+
}
163+
})
164+
})

test/e2e/common.go

+2
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,8 @@ const (
4343
WorkersMachineTemplateUpgradeTo = "WORKERS_MACHINE_TEMPLATE_UPGRADE_TO"
4444
IPFamily = "IP_FAMILY"
4545
KindImageVersion = "KIND_IMAGE_VERSION"
46+
CapiCoreVersion = "CAPI_CORE_VERSION"
47+
K3sCapiCurrentVersion = "K3S_CAPI_CURRENT_VERSION"
4648
)
4749

4850
func Byf(format string, a ...interface{}) {

test/e2e/config/k3s-docker.yaml

+39-32
Original file line numberDiff line numberDiff line change
@@ -15,9 +15,10 @@ providers:
1515
- name: cluster-api
1616
type: CoreProvider
1717
versions:
18-
- name: v1.7.2
19-
value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.7.2/core-components.yaml
18+
- name: v1.8.1
19+
value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.8.1/core-components.yaml
2020
type: url
21+
contract: v1beta1
2122
files:
2223
- sourcePath: "../data/shared/v1beta1/metadata.yaml"
2324
replacements:
@@ -26,25 +27,12 @@ providers:
2627
- name: docker
2728
type: InfrastructureProvider
2829
versions:
29-
# By default, will use the latest version defined in ../data/shared/v1beta1/metadata.yaml
30+
# Will use the latest version defined in ../data/shared/v1beta1/metadata.yaml
3031
# to init the management cluster
31-
- name: v1.7.2 # used during e2e-test
32-
value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.7.2/infrastructure-components-development.yaml
33-
type: url
34-
files:
35-
- sourcePath: "../data/shared/v1beta1/metadata.yaml"
36-
replacements:
37-
- old: "imagePullPolicy: Always"
38-
new: "imagePullPolicy: IfNotPresent"
39-
40-
# Add v1.8.99 to support tilt (not presented in ../data/shared/v1beta1/metadata.yaml)
41-
# when bootstrapping with tilt, it will use
42-
# the defaultProviderVersion in https://github.com/kubernetes-sigs/cluster-api/blob/main/hack/tools/internal/tilt-prepare/main.go as
43-
# default version for docker infrastructure provider
44-
# name here should match defaultProviderVersion
45-
- name: v1.8.99 # next; use manifest from source files
46-
value: https://github.com/kubernetes-sigs/cluster-api/releases/latest/download/infrastructure-components-development.yaml
32+
- name: v1.8.1 # used during e2e-test
33+
value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.8.1/infrastructure-components-development.yaml
4734
type: url
35+
contract: v1beta1
4836
files:
4937
- sourcePath: "../data/shared/v1beta1/metadata.yaml"
5038
replacements:
@@ -60,31 +48,50 @@ providers:
6048
- name: k3s
6149
type: BootstrapProvider
6250
versions:
63-
# Could add older release version for upgrading test, but
64-
# by default, will only use the latest version defined in
65-
# ${ProjectRoot}/metadata.yaml to init the management cluster
51+
# Older release is added for k3s provider upgrading test (clusterctl_upgrade_test),
52+
# all other tests will use the k3s build from source
53+
# to init the management cluster.
54+
- name: "v0.2.0"
55+
value: "https://github.com/k3s-io/cluster-api-k3s/releases/download/v0.2.0/bootstrap-components.yaml"
56+
type: "url"
57+
contract: v1beta1
58+
files:
59+
- sourcePath: "../data/shared/k3s/v0.2/metadata.yaml"
60+
targetName: "metadata.yaml"
61+
# By default, will only use the latest version defined in
62+
# ${ProjectRoot}/metadata.yaml (this one) to init the management cluster
6663
# this version should be updated when ${ProjectRoot}/metadata.yaml
6764
# is modified
6865
- name: v0.2.99 # next; use manifest from source files
6966
value: "../../../bootstrap/config/default"
70-
files:
71-
- sourcePath: "../../../metadata.yaml"
72-
targetName: "metadata.yaml"
67+
files:
68+
- sourcePath: "../../../metadata.yaml"
69+
targetName: "metadata.yaml"
7370
- name: k3s
7471
type: ControlPlaneProvider
7572
versions:
73+
- name: "v0.2.0"
74+
value: "https://github.com/k3s-io/cluster-api-k3s/releases/download/v0.2.0/control-plane-components.yaml"
75+
type: "url"
76+
contract: v1beta1
77+
files:
78+
- sourcePath: "../data/shared/k3s/v0.2/metadata.yaml"
79+
targetName: "metadata.yaml"
7680
- name: v0.2.99 # next; use manifest from source files
7781
value: "../../../controlplane/config/default"
78-
files:
79-
- sourcePath: "../../../metadata.yaml"
80-
targetName: "metadata.yaml"
82+
files:
83+
- sourcePath: "../../../metadata.yaml"
84+
targetName: "metadata.yaml"
8185

8286
variables:
83-
KUBERNETES_VERSION_MANAGEMENT: "v1.28.0"
84-
KUBERNETES_VERSION: "v1.28.6+k3s2"
85-
KUBERNETES_VERSION_UPGRADE_TO: "v1.28.7+k3s1"
87+
KUBERNETES_VERSION_MANAGEMENT: "v1.30.0"
88+
KUBERNETES_VERSION: "v1.30.2+k3s2"
89+
KUBERNETES_VERSION_UPGRADE_TO: "v1.30.3+k3s1"
8690
IP_FAMILY: "IPv4"
87-
KIND_IMAGE_VERSION: "v1.28.0"
91+
KIND_IMAGE_VERSION: "v1.30.0"
92+
# Used during clusterctl upgrade test
93+
CAPI_CORE_VERSION: "1.8.1"
94+
K3S_CAPI_CURRENT_VERSION: "0.2.99"
8895
# Enabling the feature flags by setting the env variables.
8996
CLUSTER_TOPOLOGY: "true"
9097
EXP_MACHINE_POOL: "true"

test/e2e/data/infrastructure-docker/bases/cluster-with-topology.yaml

+8
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,14 @@ spec:
1616
class: k3s
1717
version: ${KUBERNETES_VERSION}
1818
controlPlane:
19+
metadata:
20+
# These labels are used by clusterctl_upgrade_test to test
21+
# labels added previous to supporting SSA could be modified
22+
# or deleted.
23+
labels:
24+
Cluster.topology.controlPlane.oldLabel: "Cluster.topology.controlPlane.oldLabelValue"
25+
annotations:
26+
Cluster.topology.controlPlane.oldAnnotation: "Cluster.topology.controlPlane.oldAnnotationValue"
1927
nodeDeletionTimeout: "30s"
2028
nodeVolumeDetachTimeout: "5m"
2129
replicas: ${CONTROL_PLANE_MACHINE_COUNT}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3
2+
kind: Metadata
3+
releaseSeries:
4+
- major: 0
5+
minor: 1
6+
contract: v1beta1
7+
- major: 0
8+
minor: 2
9+
contract: v1beta1

test/e2e/data/shared/v1beta1/metadata.yaml

+1-1
Original file line numberDiff line numberDiff line change
@@ -5,5 +5,5 @@ apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3
55
kind: Metadata
66
releaseSeries:
77
- major: 1
8-
minor: 7
8+
minor: 8
99
contract: v1beta1

test/e2e/inplace_rollout_test.go

+3-3
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ import (
4949
// setting on ControlPlane object could be rollout to underlying machines.
5050
// The original test does not apply to k3s cluster as it modified controlPlane fields specific to KubeadmControlPlane.
5151
// Link to CAPI clusterclass_rollout test: https://github.com/kubernetes-sigs/cluster-api/blob/main/test/e2e/clusterclass_rollout.go
52-
var _ = Describe("Inplace mutable fields rollout test", func() {
52+
var _ = Describe("Inplace mutable fields rollout test [ClusterClass]", func() {
5353
var (
5454
ctx = context.TODO()
5555
specName = "inplace-rollout"
@@ -151,7 +151,7 @@ type modifyControlPlaneViaClusterAndWaitInput struct {
151151
}
152152

153153
// modifyControlPlaneViaClusterAndWait modifies the ControlPlaneTopology of a Cluster topology via ModifyControlPlaneTopology.
154-
// It then waits until the changes are rolled out to the ControlPlane of the Cluster.
154+
// It then waits until the changes are rolled out to the ControlPlane and ControlPlane Machine of the Cluster.
155155
func modifyControlPlaneViaClusterAndWait(ctx context.Context, input modifyControlPlaneViaClusterAndWaitInput) {
156156
Expect(ctx).NotTo(BeNil(), "ctx is required for modifyControlPlaneViaClusterAndWait")
157157
Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling modifyControlPlaneViaClusterAndWait")
@@ -167,7 +167,7 @@ func modifyControlPlaneViaClusterAndWait(ctx context.Context, input modifyContro
167167
input.ModifyControlPlaneTopology(&input.Cluster.Spec.Topology.ControlPlane)
168168
Expect(patchHelper.Patch(ctx, input.Cluster)).To(Succeed())
169169

170-
// NOTE: We only wait until the change is rolled out to the control plane object and not to the control plane machines.
170+
// NOTE: We wait until the change is rolled out to the control plane object and the control plane machines.
171171
Byf("Waiting for control plane rollout to complete.")
172172
Eventually(func(g Gomega) {
173173
// Get the ControlPlane.

0 commit comments

Comments
 (0)