Skip to content
This repository was archived by the owner on Sep 30, 2020. It is now read-only.

Commit 1fb65f7

Browse files
committed
Make kube-aws node-pools update not to fail
We've been denying all the updates to cfn stacks for node pools in stack policy
1 parent 642c504 commit 1fb65f7

File tree

2 files changed

+56
-11
lines changed

2 files changed

+56
-11
lines changed

e2e/run

+44-10
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,17 @@ configure() {
7171
echo 'createRecordSet: true' >> cluster.yaml
7272

7373
# required to run kube-aws update
74+
customize_worker
75+
76+
${KUBE_AWS_CMD} render
77+
78+
${KUBE_AWS_CMD} validate --s3-uri ${KUBE_AWS_S3_URI}
79+
80+
echo Generated configuration files in ${WORK_DIR}:
81+
find .
82+
}
83+
84+
customize_worker() {
7485
echo 'workerCount: 2' >> cluster.yaml
7586
echo 'controllerCount: 2' >> cluster.yaml
7687
echo -e 'experimental:\n nodeDrainer:\n enabled: true' >> cluster.yaml
@@ -86,13 +97,6 @@ configure() {
8697
if [ "${KUBE_AWS_USE_CALICO}" != "" ]; then
8798
echo 'useCalico: true' >> cluster.yaml
8899
fi
89-
90-
${KUBE_AWS_CMD} render
91-
92-
${KUBE_AWS_CMD} validate --s3-uri ${KUBE_AWS_S3_URI}
93-
94-
echo Generated configuration files in ${WORK_DIR}:
95-
find .
96100
}
97101

98102
clean() {
@@ -228,22 +232,52 @@ nodepool_init() {
228232
fi
229233
}
230234

231-
nodepool() {
235+
nodepool_render() {
232236
cd ${WORK_DIR}
233237

234-
nodepool_init
235-
236238
${KUBE_AWS_CMD} node-pools render stack --node-pool-name ${KUBE_AWS_POOL_NAME}
239+
}
240+
241+
nodepool_up() {
242+
cd ${WORK_DIR}
243+
237244
${KUBE_AWS_CMD} node-pools up --node-pool-name ${KUBE_AWS_POOL_NAME} --export
238245
${KUBE_AWS_CMD} node-pools up --node-pool-name ${KUBE_AWS_POOL_NAME} --s3-uri ${KUBE_AWS_S3_URI}
239246
}
240247

248+
nodepool_update() {
249+
cd ${WORK_DIR}
250+
251+
pushd ${NODE_POOL_ASSETS_DIR}
252+
253+
if [ "${KUBE_AWS_SPOT_FLEET_ENABLED}" ]; then
254+
SED_CMD="sed -e 's/targetCapacity: 3/targetCapacity: 5/'"
255+
diff --unified cluster.yaml <(cat cluster.yaml | sh -c "${SED_CMD}") || true
256+
sh -c "${SED_CMD} -i bak cluster.yaml"
257+
else
258+
echo 'workerCount: 2' >> cluster.yaml
259+
fi
260+
261+
popd
262+
263+
${KUBE_AWS_CMD} node-pools update --node-pool-name ${KUBE_AWS_POOL_NAME} --s3-uri ${KUBE_AWS_S3_URI}
264+
}
265+
241266
nodepool_destroy() {
242267
cd ${WORK_DIR}
243268

244269
${KUBE_AWS_CMD} node-pools destroy --node-pool-name ${KUBE_AWS_POOL_NAME}
245270
}
246271

272+
nodepool() {
273+
cd ${WORK_DIR}
274+
275+
nodepool_init
276+
nodepool_render
277+
nodepool_up
278+
nodepool_update
279+
}
280+
247281
all() {
248282
build
249283
prepare

nodepool/cluster/cluster.go

+12-1
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,18 @@ func New(cfg *config.ProvidedConfig, awsDebug bool) *Cluster {
4848
}
4949

5050
func (c *Cluster) stackProvisioner() *cfnstack.Provisioner {
51-
return cfnstack.NewProvisioner(c.NodePoolName, c.StackTags, "{}", c.session)
51+
stackPolicyBody := `{
52+
"Statement" : [
53+
{
54+
"Effect" : "Allow",
55+
"Principal" : "*",
56+
"Action" : "Update:*",
57+
"Resource" : "*"
58+
}
59+
]
60+
}`
61+
62+
return cfnstack.NewProvisioner(c.NodePoolName, c.StackTags, stackPolicyBody, c.session)
5263
}
5364

5465
func (c *Cluster) Create(stackBody string, s3URI string) error {

0 commit comments

Comments
 (0)