4
4
5
5
:_mod-docs-content-type: PROCEDURE
6
6
[id="dr-hosted-cluster-within-aws-region-backup_{context} "]
7
- = Backing up a hosted cluster
7
+ = Backing up a hosted cluster on {aws-short}
8
8
9
9
To recover your hosted cluster in your target management cluster, you first need to back up all of the relevant data.
10
10
11
11
.Procedure
12
12
13
- . Create a configmap file to declare the source management cluster by entering this command:
13
+ . Create a config map file to declare the source management cluster by entering the following command:
14
14
+
15
15
[source,terminal]
16
16
----
17
17
$ oc create configmap mgmt-parent-cluster -n default \
18
18
-- from-literal=from=${MGMT_CLUSTER_NAME}
19
19
----
20
20
21
- . Shut down the reconciliation in the hosted cluster and in the node pools by entering these commands:
21
+ . Shut down the reconciliation in the hosted cluster and in the node pools by entering the following commands:
22
22
+
23
23
[source,terminal]
24
24
----
25
25
$ PAUSED_UNTIL="true"
26
- $ oc patch -n ${HC_CLUSTER_NS} hostedclusters/${HC_CLUSTER_NAME} \
27
- -p '{"spec":{"pausedUntil":"' ${PAUSED_UNTIL} '"}}' -- type=merge
28
- $ oc scale deployment -n ${HC_CLUSTER_NS} -${HC_CLUSTER_NAME} -- replicas=0 \
29
- kube-apiserver openshift-apiserver openshift-oauth-apiserver control-plane-operator
30
26
----
31
27
+
32
28
[source,terminal]
33
29
----
34
- $ PAUSED_UNTIL="true"
35
30
$ oc patch -n ${HC_CLUSTER_NS} hostedclusters/${HC_CLUSTER_NAME} \
36
31
-p '{"spec":{"pausedUntil":"'${PAUSED_UNTIL}'"}}' --type=merge
32
+ ----
33
+ +
34
+ [source,terminal]
35
+ ----
37
36
$ oc patch -n ${HC_CLUSTER_NS} nodepools/${NODEPOOLS} \
38
37
-p '{"spec":{"pausedUntil":"'${PAUSED_UNTIL}'"}}' --type=merge
38
+ ----
39
+ +
40
+ [source,terminal]
41
+ ----
39
42
$ oc scale deployment -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} --replicas=0 \
40
43
kube-apiserver openshift-apiserver openshift-oauth-apiserver control-plane-operator
41
44
----
42
45
43
- . Back up etcd and upload the data to an S3 bucket by running this bash script:
46
+ . Back up etcd and upload the data to an S3 bucket by running the following bash script:
44
47
+
45
48
[TIP]
46
49
====
@@ -93,82 +96,189 @@ For more information about backing up etcd, see "Backing up and restoring etcd o
93
96
* `MachineDeployments`, `MachineSets`, and `Machines` from the Hosted Control Plane namespace
94
97
* `ControlPlane` secrets from the Hosted Control Plane namespace
95
98
+
99
+ .. Enter the following commands:
100
+ +
96
101
[source,terminal]
97
102
----
98
103
$ mkdir -p ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS} \
99
104
${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}-${HC_CLUSTER_NAME}
105
+ ----
106
+ +
107
+ [source,terminal]
108
+ ----
100
109
$ chmod 700 ${BACKUP_DIR}/namespaces/
101
-
102
- # HostedCluster
110
+ ----
111
+ +
112
+ .. Back up the `HostedCluster` objects from the `HostedCluster` namespace by entering the following commands:
113
+ +
114
+ [source,terminal]
115
+ ----
103
116
$ echo "Backing Up HostedCluster Objects:"
104
- $ oc get hc ${HC_CLUSTER_NAME} -n ${HC_CLUSTER_NS} -o yaml > ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}/hc-${HC_CLUSTER_NAME}.yaml
117
+ ----
118
+ +
119
+ [source,terminal]
120
+ ----
121
+ $ oc get hc ${HC_CLUSTER_NAME} -n ${HC_CLUSTER_NS} -o yaml > \
122
+ ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}/hc-${HC_CLUSTER_NAME}.yaml
123
+ ----
124
+ +
125
+ [source,terminal]
126
+ ----
105
127
$ echo "--> HostedCluster"
106
- $ sed -i '' -e '/^status:$/,$d' ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}/hc-${HC_CLUSTER_NAME}.yaml
107
-
108
- # NodePool
109
- $ oc get np ${NODEPOOLS} -n ${HC_CLUSTER_NS} -o yaml > ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}/np-${NODEPOOLS}.yaml
128
+ ----
129
+ +
130
+ [source,terminal]
131
+ ----
132
+ $ sed -i '' -e '/^status:$/,$d' \
133
+ ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}/hc-${HC_CLUSTER_NAME}.yaml
134
+ ----
135
+ +
136
+ .. Back up the `NodePool` objects from the `HostedCluster` namespace by entering the following commands:
137
+ +
138
+ [source,terminal]
139
+ ----
140
+ $ oc get np ${NODEPOOLS} -n ${HC_CLUSTER_NS} -o yaml > \
141
+ ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}/np-${NODEPOOLS}.yaml
142
+ ----
143
+ +
144
+ [source,terminal]
145
+ ----
110
146
$ echo "--> NodePool"
111
- $ sed -i '' -e '/^status:$/,$ d' ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}/np-${NODEPOOLS}.yaml
112
-
113
- # Secrets in the HC Namespace
147
+ ----
148
+ +
149
+ [source,terminal]
150
+ ----
151
+ $ sed -i '' -e '/^status:$/,$ d' \
152
+ ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}/np-${NODEPOOLS}.yaml
153
+ ----
154
+ +
155
+ .. Back up the secrets in the `HostedCluster` namespace by running the following shell script:
156
+ +
157
+ [source,terminal]
158
+ ----
114
159
$ echo "--> HostedCluster Secrets:"
115
160
for s in $(oc get secret -n ${HC_CLUSTER_NS} | grep "^${HC_CLUSTER_NAME}" | awk '{print $1}'); do
116
161
oc get secret -n ${HC_CLUSTER_NS} $s -o yaml > ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}/secret-${s}.yaml
117
162
done
118
-
119
- # Secrets in the HC Control Plane Namespace
163
+ ----
164
+ +
165
+ .. Back up the secrets in the `HostedCluster` control plane namespace by running the following shell script:
166
+ +
167
+ [source,terminal]
168
+ ----
120
169
$ echo "--> HostedCluster ControlPlane Secrets:"
121
170
for s in $(oc get secret -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} | egrep -v "docker|service-account-token|oauth-openshift|NAME|token-${HC_CLUSTER_NAME}" | awk '{print $1}'); do
122
171
oc get secret -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} $s -o yaml > ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}-${HC_CLUSTER_NAME}/secret-${s}.yaml
123
172
done
124
-
125
- # Hosted Control Plane
173
+ ----
174
+ +
175
+ .. Back up the hosted control plane by entering the following commands:
176
+ +
177
+ [source,terminal]
178
+ ----
126
179
$ echo "--> HostedControlPlane:"
127
- $ oc get hcp ${HC_CLUSTER_NAME} -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} -o yaml > ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}-${HC_CLUSTER_NAME}/hcp-${HC_CLUSTER_NAME}.yaml
128
-
129
- # Cluster
180
+ ----
181
+ +
182
+ [source,terminal]
183
+ ----
184
+ $ oc get hcp ${HC_CLUSTER_NAME} -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} -o yaml > \
185
+ ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}-${HC_CLUSTER_NAME}/hcp-${HC_CLUSTER_NAME}.yaml
186
+ ----
187
+ +
188
+ .. Back up the cluster by entering the following commands:
189
+ +
190
+ [source,terminal]
191
+ ----
130
192
$ echo "--> Cluster:"
131
- $ CL_NAME=$(oc get hcp ${HC_CLUSTER_NAME} -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} -o jsonpath={.metadata.labels.\*} | grep ${HC_CLUSTER_NAME})
132
- $ oc get cluster ${CL_NAME} -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} -o yaml > ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}-${HC_CLUSTER_NAME}/cl-${HC_CLUSTER_NAME}.yaml
133
-
134
- # AWS Cluster
193
+ ----
194
+ +
195
+ [source,terminal]
196
+ ----
197
+ $ CL_NAME=$(oc get hcp ${HC_CLUSTER_NAME} -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} \
198
+ -o jsonpath={.metadata.labels.\*} | grep ${HC_CLUSTER_NAME})
199
+ ----
200
+ +
201
+ [source,terminal]
202
+ ----
203
+ $ oc get cluster ${CL_NAME} -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} -o yaml > \
204
+ ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}-${HC_CLUSTER_NAME}/cl-${HC_CLUSTER_NAME}.yaml
205
+ ----
206
+ +
207
+ .. Back up the {aws-short} cluster by entering the following commands:
208
+ +
209
+ [source,terminal]
210
+ ----
135
211
$ echo "--> AWS Cluster:"
136
- $ oc get awscluster ${HC_CLUSTER_NAME} -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} -o yaml > ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}-${HC_CLUSTER_NAME}/awscl-${HC_CLUSTER_NAME}.yaml
137
-
138
- # AWS MachineTemplate
212
+ ----
213
+ +
214
+ [source,terminal]
215
+ ----
216
+ $ oc get awscluster ${HC_CLUSTER_NAME} -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} -o yaml > \
217
+ ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}-${HC_CLUSTER_NAME}/awscl-${HC_CLUSTER_NAME}.yaml
218
+ ----
219
+ +
220
+ .. Back up the {aws-short} `MachineTemplate` objects by entering the following commands:
221
+ +
222
+ [source,terminal]
223
+ ----
139
224
$ echo "--> AWS Machine Template:"
140
- $ oc get awsmachinetemplate ${NODEPOOLS} -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} -o yaml > ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}-${HC_CLUSTER_NAME}/awsmt-${HC_CLUSTER_NAME}.yaml
141
-
142
- # AWS Machines
225
+ ----
226
+ +
227
+ [source,terminal]
228
+ ----
229
+ $ oc get awsmachinetemplate ${NODEPOOLS} -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} -o yaml > \
230
+ ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}-${HC_CLUSTER_NAME}/awsmt-${HC_CLUSTER_NAME}.yaml
231
+ ----
232
+ +
233
+ .. Back up the {aws-short} `Machines` objects by running the following shell script:
234
+ +
235
+ [source,terminal]
236
+ ----
143
237
$ echo "--> AWS Machine:"
238
+ ----
239
+ +
240
+ [source,terminal]
241
+ ----
144
242
$ CL_NAME=$(oc get hcp ${HC_CLUSTER_NAME} -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} -o jsonpath={.metadata.labels.\*} | grep ${HC_CLUSTER_NAME})
145
243
for s in $(oc get awsmachines -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} --no-headers | grep ${CL_NAME} | cut -f1 -d\ ); do
146
244
oc get -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} awsmachines $s -o yaml > ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}-${HC_CLUSTER_NAME}/awsm-${s}.yaml
147
245
done
148
-
149
- # MachineDeployments
246
+ ----
247
+ +
248
+ .. Back up the `MachineDeployments` objects by running the following shell script:
249
+ +
250
+ [source,terminal]
251
+ ----
150
252
$ echo "--> HostedCluster MachineDeployments:"
151
253
for s in $(oc get machinedeployment -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} -o name); do
152
254
mdp_name=$(echo ${s} | cut -f 2 -d /)
153
255
oc get -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} $s -o yaml > ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}-${HC_CLUSTER_NAME}/machinedeployment-${mdp_name}.yaml
154
256
done
155
-
156
- # MachineSets
257
+ ----
258
+ +
259
+ .. Back up the `MachineSets` objects by running the following shell script:
260
+ +
261
+ [source,terminal]
262
+ ----
157
263
$ echo "--> HostedCluster MachineSets:"
158
264
for s in $(oc get machineset -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} -o name); do
159
265
ms_name=$(echo ${s} | cut -f 2 -d /)
160
266
oc get -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} $s -o yaml > ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}-${HC_CLUSTER_NAME}/machineset-${ms_name}.yaml
161
267
done
162
-
163
- # Machines
268
+ ----
269
+ +
270
+ .. Back up the `Machines` objects from the Hosted Control Plane namespace by running the following shell script:
271
+ +
272
+ [source,terminal]
273
+ ----
164
274
$ echo "--> HostedCluster Machine:"
165
275
for s in $(oc get machine -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} -o name); do
166
276
m_name=$(echo ${s} | cut -f 2 -d /)
167
277
oc get -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} $s -o yaml > ${BACKUP_DIR}/namespaces/${HC_CLUSTER_NS}-${HC_CLUSTER_NAME}/machine-${m_name}.yaml
168
278
done
169
279
----
170
280
171
- . Clean up the `ControlPlane` routes by entering this command:
281
+ . Clean up the `ControlPlane` routes by entering the following command:
172
282
+
173
283
[source,terminal]
174
284
----
@@ -177,7 +287,7 @@ $ oc delete routes -n ${HC_CLUSTER_NS}-${HC_CLUSTER_NAME} --all
177
287
+
178
288
By entering that command, you enable the ExternalDNS Operator to delete the Route53 entries.
179
289
180
- . Verify that the Route53 entries are clean by running this script:
290
+ . Verify that the Route53 entries are clean by running the following script:
181
291
+
182
292
[source,terminal]
183
293
----
@@ -226,4 +336,4 @@ Check all of the {product-title} objects and the S3 bucket to verify that everyt
226
336
227
337
.Next steps
228
338
229
- Restore your hosted cluster.
339
+ Restore your hosted cluster.
0 commit comments