Skip to content
This repository was archived by the owner on Sep 30, 2020. It is now read-only.

Commit 711fb97

Browse files
authored
Merge pull request #165 from mumoshu/fix-cfn-init-and-signal-for-node-pools
Fix consistent failures in cfn-signal.service and set-aws-environment.service
2 parents 9e618c9 + eb77ee8 commit 711fb97

File tree

4 files changed

+37
-15
lines changed

4 files changed

+37
-15
lines changed

config/config.go

+6
Original file line numberDiff line numberDiff line change
@@ -683,6 +683,12 @@ type Config struct {
683683
VPCRef string
684684
}
685685

686+
// CloudFormation stack name which is unique in an AWS account.
687+
// This is intended to be used to reference stack name from cloud-config as the target of awscli or cfn-bootstrap-tools commands e.g. `cfn-init` and `cfn-signal`
688+
func (c Config) StackName() string {
689+
return c.ClusterName
690+
}
691+
686692
func (c Cluster) valid() error {
687693
if c.CreateRecordSet {
688694
if c.HostedZone == "" && c.HostedZoneID == "" {

config/templates/cloud-config-worker

+2-2
Original file line numberDiff line numberDiff line change
@@ -209,7 +209,7 @@ coreos:
209209
{{.AWSCliImageRepo}}:{{.AWSCliTag}} -- cfn-init -v \
210210
--region {{.Region}} \
211211
--resource LaunchConfigurationWorker \
212-
--stack {{.ClusterName}}
212+
--stack {{.StackName}}
213213
{{end}}
214214

215215
{{if .Worker.SpotFleet.Enabled}}
@@ -290,7 +290,7 @@ coreos:
290290
{{.AWSCliImageRepo}}:{{.AWSCliTag}} -- cfn-signal -e 0 \
291291
--region {{.Region}} \
292292
--resource AutoScaleWorker \
293-
--stack {{.ClusterName}}
293+
--stack {{.StackName}}
294294
{{end}}
295295

296296
{{if .Experimental.AwsNodeLabels.Enabled }}

e2e/run

+23-13
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,9 @@
33
KUBE_AWS_CMD=${KUBE_AWS_CMD:-$GOPATH/src/github.com/coreos/kube-aws/bin/kube-aws}
44
E2E_DIR=$(cd $(dirname $0); pwd)
55
WORK_DIR=${E2E_DIR}/assets/${KUBE_AWS_CLUSTER_NAME}
6-
NODE_POOL_ASSETS_DIR=${E2E_DIR}/assets/${KUBE_AWS_CLUSTER_NAME}/node-pools/${KUBE_AWS_POOL_NAME}
6+
KUBE_AWS_NODE_POOL_INDEX=${KUBE_AWS_NODE_POOL_INDEX:-1}
7+
KUBE_AWS_NODE_POOL_NAME=${KUBE_AWS_CLUSTER_NAME}-nodepool${KUBE_AWS_NODE_POOL_INDEX}
8+
NODE_POOL_ASSETS_DIR=${E2E_DIR}/assets/${KUBE_AWS_CLUSTER_NAME}/node-pools/${KUBE_AWS_NODE_POOL_NAME}
79
SRC_DIR=$(cd $(dirname $0); cd ..; pwd)
810
KUBECONFIG=${WORK_DIR}/kubeconfig
911

@@ -71,6 +73,13 @@ configure() {
7173
echo 'createRecordSet: true' >> cluster.yaml
7274

7375
# required to run kube-aws update
76+
echo 'workerCount: 2' >> cluster.yaml
77+
echo 'controllerCount: 2' >> cluster.yaml
78+
79+
if [ "${KUBE_AWS_USE_CALICO}" != "" ]; then
80+
echo 'useCalico: true' >> cluster.yaml
81+
fi
82+
7483
customize_worker
7584

7685
${KUBE_AWS_CMD} render
@@ -82,8 +91,6 @@ configure() {
8291
}
8392

8493
customize_worker() {
85-
echo 'workerCount: 2' >> cluster.yaml
86-
echo 'controllerCount: 2' >> cluster.yaml
8794
echo -e 'experimental:\n nodeDrainer:\n enabled: true' >> cluster.yaml
8895
if [ "${KUBE_AWS_WAIT_SIGNAL_ENABLED}" != "" ]; then
8996
echo -e ' waitSignal:\n enabled: true' >> cluster.yaml
@@ -97,9 +104,6 @@ customize_worker() {
97104
if [ "${KUBE_AWS_AWS_ENV_ENABLED}" != "" ]; then
98105
echo -e " awsEnvironment:\n enabled: true\n environment:\n CFNSTACK: '{\"Ref\":\"AWS::StackId\"}'" >> cluster.yaml
99106
fi
100-
if [ "${KUBE_AWS_USE_CALICO}" != "" ]; then
101-
echo 'useCalico: true' >> cluster.yaml
102-
fi
103107
}
104108

105109
clean() {
@@ -224,34 +228,40 @@ ssh_worker() {
224228
nodepool_init() {
225229
cd ${WORK_DIR}
226230

227-
${KUBE_AWS_CMD} node-pools init --node-pool-name ${KUBE_AWS_POOL_NAME} \
231+
${KUBE_AWS_CMD} node-pools init --node-pool-name ${KUBE_AWS_NODE_POOL_NAME} \
228232
--availability-zone ${KUBE_AWS_AVAILABILITY_ZONE} \
229233
--key-name ${KUBE_AWS_KEY_NAME} \
230234
--kms-key-arn ${KUBE_AWS_KMS_KEY_ARN}
231235

236+
cd ${NODE_POOL_ASSETS_DIR}
237+
232238
if [ "${KUBE_AWS_SPOT_FLEET_ENABLED}" != "" ]; then
233239
echo Writing ${NODE_POOL_ASSETS_DIR}/cluster.yaml
234240
echo -e "worker:\n spotFleet:\n targetCapacity: 3\n" >> ${NODE_POOL_ASSETS_DIR}/cluster.yaml
235241
fi
242+
243+
echo -e "instanceCIDR: 10.0.${KUBE_AWS_NODE_POOL_INDEX}.0/24" >> ${NODE_POOL_ASSETS_DIR}/cluster.yaml
244+
245+
customize_worker
236246
}
237247

238248
nodepool_render() {
239249
cd ${WORK_DIR}
240250

241-
${KUBE_AWS_CMD} node-pools render stack --node-pool-name ${KUBE_AWS_POOL_NAME}
251+
${KUBE_AWS_CMD} node-pools render stack --node-pool-name ${KUBE_AWS_NODE_POOL_NAME}
242252
}
243253

244254
nodepool_validate() {
245255
cd ${WORK_DIR}
246256

247-
${KUBE_AWS_CMD} node-pools validate --node-pool-name ${KUBE_AWS_POOL_NAME} --s3-uri ${KUBE_AWS_S3_URI}
257+
${KUBE_AWS_CMD} node-pools validate --node-pool-name ${KUBE_AWS_NODE_POOL_NAME} --s3-uri ${KUBE_AWS_S3_URI}
248258
}
249259

250260
nodepool_up() {
251261
cd ${WORK_DIR}
252262

253-
${KUBE_AWS_CMD} node-pools up --node-pool-name ${KUBE_AWS_POOL_NAME} --export
254-
${KUBE_AWS_CMD} node-pools up --node-pool-name ${KUBE_AWS_POOL_NAME} --s3-uri ${KUBE_AWS_S3_URI}
263+
${KUBE_AWS_CMD} node-pools up --node-pool-name ${KUBE_AWS_NODE_POOL_NAME} --export
264+
${KUBE_AWS_CMD} node-pools up --node-pool-name ${KUBE_AWS_NODE_POOL_NAME} --s3-uri ${KUBE_AWS_S3_URI}
255265
}
256266

257267
nodepool_update() {
@@ -269,13 +279,13 @@ nodepool_update() {
269279

270280
popd
271281

272-
${KUBE_AWS_CMD} node-pools update --node-pool-name ${KUBE_AWS_POOL_NAME} --s3-uri ${KUBE_AWS_S3_URI}
282+
${KUBE_AWS_CMD} node-pools update --node-pool-name ${KUBE_AWS_NODE_POOL_NAME} --s3-uri ${KUBE_AWS_S3_URI}
273283
}
274284

275285
nodepool_destroy() {
276286
cd ${WORK_DIR}
277287

278-
${KUBE_AWS_CMD} node-pools destroy --node-pool-name ${KUBE_AWS_POOL_NAME}
288+
${KUBE_AWS_CMD} node-pools destroy --node-pool-name ${KUBE_AWS_NODE_POOL_NAME}
279289
}
280290

281291
nodepool() {

nodepool/config/config.go

+6
Original file line numberDiff line numberDiff line change
@@ -214,6 +214,12 @@ func (c ProvidedConfig) valid() error {
214214
return nil
215215
}
216216

217+
// CloudFormation stack name which is unique in an AWS account.
218+
// This is intended to be used to reference stack name from cloud-config as the target of awscli or cfn-bootstrap-tools commands e.g. `cfn-init` and `cfn-signal`
219+
func (c ComputedConfig) StackName() string {
220+
return c.NodePoolName
221+
}
222+
217223
func (c ComputedConfig) VPCRef() string {
218224
//This means this VPC already exists, and we can reference it directly by ID
219225
if c.VPCID != "" {

0 commit comments

Comments
 (0)