forked from rancher/fleet
-
Notifications
You must be signed in to change notification settings - Fork 0
223 lines (214 loc) · 9.66 KB
/
e2e-rancher-upgrade-fleet.yml
File metadata and controls
223 lines (214 loc) · 9.66 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
# Upgrade Fleet in given Rancher versions to given Fleet release and run tests
name: E2E Upgrade Fleet in Rancher
on:
workflow_dispatch:
inputs:
ref:
description: "Checkout git branch/tag"
required: true
default: "main"
k3s_version:
# https://hub.docker.com/r/rancher/k3s/tags
# k3d version list k3s | sed 's/+/-/' | sort -h
description: "K3s version to use"
required: true
default: "v1.35.1-k3s1"
rancher_version:
description: "Rancher version to install"
required: true
default: "2.12.0"
fleet_crd_url:
description: "Fleet CRD chart URL from rancher/charts"
required: true
default: https://github.com/rancher/charts/raw/dev-v2.12/assets/fleet-crd/fleet-crd-107.0.1+up0.13.1.tgz
fleet_url:
description: "Fleet chart URL from rancher/charts"
required: true
default: https://github.com/rancher/charts/raw/dev-v2.12/assets/fleet/fleet-107.0.1+up0.13.1.tgz
image_repo:
description: "Fleet image repo, the image name fleet/fleet-agent is to be appended later"
required: true
default: "rancher"
image_tag:
description: "Fleet image tag"
required: true
default: "v0.13.1"
env:
GOARCH: amd64
CGO_ENABLED: 0
SETUP_K3D_VERSION: 'v5.8.3'
jobs:
rancher-fleet-upgrade:
runs-on: ubuntu-latest
steps:
-
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
fetch-depth: 0
ref: ${{ github.event.inputs.ref }}
-
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
with:
go-version-file: 'go.mod'
check-latest: true
-
name: Install Ginkgo CLI
run: go install github.com/onsi/ginkgo/v2/ginkgo
-
name: Determine cache key
id: cache-key
run: ./.github/scripts/determine-cache-key.sh
-
name: Cache crust-gather CLI
id: cache-crust
uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3
with:
path: ~/.local/bin/crust-gather
key: ${{ runner.os }}-crust-gather-${{ steps.cache-key.outputs.value }}
restore-keys: |
${{ runner.os }}-crust-gather-
-
name: Install crust-gather CLI
run: |
if [ "${{ steps.cache-crust.outputs.cache-hit }}" != "true" ]; then
echo "Cache not found, downloading from source"
mkdir -p ~/.local/bin
if curl -sSfL https://github.com/crust-gather/crust-gather/raw/main/install.sh | sh -s -- --yes; then
# Cache the binary for future runs
if [ ! -f ~/.local/bin/crust-gather ]; then
which crust-gather && cp $(which crust-gather) ~/.local/bin/
fi
else
echo "Failed to download crust-gather"
exit 1
fi
else
echo "Using cached crust-gather CLI"
chmod +x ~/.local/bin/crust-gather
sudo ln -sf ~/.local/bin/crust-gather /usr/local/bin/
fi
-
uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3
id: rancher-cli-cache
with:
path: /home/runner/.local/bin
key: ${{ runner.os }}-rancher-cli-2.6.0
-
name: Install Rancher CLI
if: steps.rancher-cli-cache.outputs.cache-hit != 'true'
run: |
mkdir -p /home/runner/.local/bin
wget -q https://github.com/rancher/cli/releases/download/v2.12.0/rancher-linux-amd64-v2.12.0.tar.gz
tar -xz --strip-components=2 -f rancher-linux-amd64-v2.12.0.tar.gz -C /home/runner/.local/bin
rancher --version
-
name: Install k3d
run: curl --silent --fail https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | TAG=${{ env.SETUP_K3D_VERSION }} bash
-
name: Set up k3d control-plane cluster
run: |
k3d cluster create upstream --wait \
-p "80:80@agent:0:direct" \
-p "443:443@agent:0:direct" \
--api-port 6443 \
--agents 1 \
--k3s-arg '--kubelet-arg=eviction-hard=imagefs.available<1%,nodefs.available<1%@agent:*' \
--k3s-arg '--kubelet-arg=eviction-minimum-reclaim=imagefs.available=1%,nodefs.available=1%@agent:*' \
--network "nw01" \
--image docker.io/rancher/k3s:${{github.event.inputs.k3s_version}} \
--k3s-arg "--cluster-init@server:0"
-
name: Set up k3d downstream cluster
run: |
k3d cluster create downstream --wait \
-p "81:80@agent:0:direct" \
-p "444:443@agent:0:direct" \
--api-port 6644 \
--agents 1 \
--k3s-arg '--kubelet-arg=eviction-hard=imagefs.available<1%,nodefs.available<1%@agent:*' \
--k3s-arg '--kubelet-arg=eviction-minimum-reclaim=imagefs.available=1%,nodefs.available=1%@agent:*' \
--network "nw01" \
--image docker.io/rancher/k3s:${{github.event.inputs.k3s_version}} \
--k3s-arg "--cluster-init@server:0"
-
name: Set up Rancher
env:
public_hostname: "172.18.0.1.sslip.io"
run: |
./.github/scripts/setup-rancher.sh "${{github.event.inputs.rancher_version}}"
./.github/scripts/wait-for-loadbalancer.sh
./.github/scripts/register-downstream-clusters.sh
sleep 30
./.github/scripts/label-downstream-cluster.sh
-
name: Create example workload
run: |
kubectl apply -n fleet-local -f e2e/assets/fleet-upgrade/gitrepo-simple.yaml
kubectl apply -n fleet-default -f e2e/assets/fleet-upgrade/gitrepo-simple.yaml
# wait for bundle ready
until kubectl get bundles -n fleet-local test-simple-simple-chart -o=jsonpath='{.status.conditions[?(@.type=="Ready")].status}' | grep -q "True"; do sleep 3; done
until kubectl get bundles -n fleet-default test-simple-simple-chart -o=jsonpath='{.status.conditions[?(@.type=="Ready")].status}' | grep -q "True"; do sleep 3; done
-
name: Deploy latest fleet
env:
fleet_crd_url: ${{github.event.inputs.fleet_crd_url}}
fleet_url: ${{github.event.inputs.fleet_url}}
image_repo: ${{github.event.inputs.image_repo}}
image_tag: ${{github.event.inputs.image_tag}}
fleetns: "cattle-fleet-system"
run: |
helm upgrade fleet-crd "$fleet_crd_url" --wait -n "$fleetns"
until helm -n "$fleetns" status fleet-crd | grep -q "STATUS: deployed"; do echo waiting for original fleet-crd chart to be deployed; sleep 1; done
# need to repeat some defaults, because of --reuse-values
helm upgrade fleet "$fleet_url" \
--wait -n "$fleetns" \
--reuse-values \
--set image.repository="$image_repo/fleet" \
--set image.tag="$image_tag" \
--set agentImage.repository="$image_repo/fleet-agent" \
--set agentImage.tag="$image_tag" \
--set leaderElection.leaseDuration=30s --set leaderElection.retryPeriod=10s --set leaderElection.renewDeadline=25s
until helm -n "$fleetns" status fleet | grep -q "STATUS: deployed"; do echo waiting for original fleet chart to be deployed; sleep 3; done
kubectl -n "$fleetns" rollout status deploy/fleet-controller
# wait for bundle update
until kubectl get bundles -n fleet-local fleet-agent-local -ojsonpath='{.spec.resources}' | grep -q "image: rancher/fleet-agent:$image_tag"; do sleep 3; done
until kubectl get bundles -n fleet-default -ojsonpath='{.items[*].spec.resources}' | grep -q "image: rancher/fleet-agent:$image_tag"; do sleep 3; done
# wait for fleet agent bundles
until kubectl get bundles -n fleet-local | grep -q -E "fleet-agent-local.*1/1"; do echo "waiting for local agent bundle"; sleep 1; done
until kubectl get bundles -n fleet-default | grep -q -E "fleet-agent-c.*1/1"; do echo "waiting for agent bundle"; sleep 1; done
-
name: Verify Installation
env:
FLEET_E2E_NS: fleet-local
FLEET_VERSION: ${{github.event.inputs.image_tag}}
FLEET_LOCAL_AGENT_NAMESPACE: "cattle-fleet-local-system"
FLEET_AGENT_NAMESPACE: "cattle-fleet-system"
run: |
# this doesn't work with <0.10
ginkgo --github-output --trace --label-filter='!single-cluster' e2e/installation
-
name: E2E tests for examples
env:
FLEET_E2E_NS: fleet-local
FLEET_E2E_NS_DOWNSTREAM: fleet-default
run: |
# Force use of non-managed downstream cluster for portability
export CI_REGISTERED_CLUSTER=$(kubectl get clusters.fleet.cattle.io -n $FLEET_E2E_NS_DOWNSTREAM -o jsonpath='{range .items[*]}{.metadata.name}{"\n"}' | grep -v second)
kubectl config use-context k3d-upstream
ginkgo --github-output --trace e2e/multi-cluster
-
name: Dump Failed Downstream Environment
if: failure()
run: |
kubectl config use-context k3d-downstream
crust-gather collect --exclude-namespace=kube-system --exclude-kind=Lease --duration=5s -f tmp/downstream
-
name: Upload logs
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
if: failure()
with:
name: gha-fleet-upgrade-rancher-logs-${{ github.event.inputs.rancher_version }}-${{ github.event.inputs.k3s_version }}-${{ github.sha }}-${{ github.run_id }}
path: |
tmp/downstream
tmp/upstream
retention-days: 2