Skip to content
This repository was archived by the owner on Sep 30, 2020. It is now read-only.

Commit 5066a5e

Browse files
authored
Merge pull request #610 from mumoshu/fix-efs-per-node-pool
Fix elasticFileSystemId only on node pools
2 parents 6a7bc0e + ab95e18 commit 5066a5e

File tree

4 files changed

+83
-8
lines changed

4 files changed

+83
-8
lines changed

core/controlplane/config/templates/cluster.yaml

Lines changed: 24 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -455,6 +455,15 @@ worker:
455455
# clusterAutoscalerSupport:
456456
# enabled: true
457457
#
458+
# # Mount an EFS only to a specific node pool.
459+
# # This is a NFS share that will be available across a node pool through a hostPath volume on the "/efs" mountpoint
460+
# #
461+
# # If you'd like to configure an EFS mounted to every node, use the top-level `elasticFileSystemId` instead
462+
# #
463+
# # Beware that currently it doesn't work for a node pool in subnets managed by kube-aws.
464+
# # In other words, this node pool must be in existing subnets managed by you by specifying subnets[].id to make this work.
465+
# elasticFileSystemId: fs-47a2c22e
466+
#
458467
# # This option has not yet been tested with rkt as container runtime
459468
# ephemeralImageStorage:
460469
# enabled: true
@@ -1022,16 +1031,26 @@ worker:
10221031
# Use Calico for network policy.
10231032
# useCalico: false
10241033

1025-
# Create MountTargets for a pre-existing Elastic File System (Amazon EFS). Enter the resource id, eg "fs-47a2c22e"
1034+
# Create MountTargets to subnets managed by kube-aws for a pre-existing Elastic File System (Amazon EFS),
1035+
# and then mount to every node.
1036+
#
1037+
# This is for mounting an EFS to every node.
1038+
# If you'd like to mount an EFS only to a specific node pool, set `worker.nodePools[].elasticFileSystemId` instead.
1039+
#
1040+
# Enter the resource id, eg "fs-47a2c22e"
10261041
# This is a NFS share that will be available across the entire cluster through a hostPath volume on the "/efs" mountpoint
10271042
#
10281043
# You can create a new EFS volume using the CLI:
10291044
# $ aws efs create-file-system --creation-token $(uuidgen)
10301045
#
1031-
# Beware that an EFS file system can not be mounted from mutiple subnets from the same availability zone and
1032-
# therefore this feature won't work like you might have expected when you're deploying your cluster to an existing VPC and
1033-
# wanted to mount an existing EFS to the subnet(s) created by kube-aws
1034-
# See https://github.com/kubernetes-incubator/kube-aws/issues/208 for more information
1046+
# Beware that:
1047+
# * an EFS file system can not be mounted from multiple subnets from the same availability zone and
1048+
# therefore this feature won't work like you might have expected when you're deploying your cluster to an existing VPC and
1049+
# wanted to mount an existing EFS to the subnet(s) created by kube-aws
1050+
# See https://github.com/kubernetes-incubator/kube-aws/issues/208 for more information
1051+
# * kube-aws doesn't create MountTargets for existing subnets(=NOT managed by kube-aws). If you'd like to bring your own subnets
1052+
# to be used by kube-aws, it is now your responsibility to configure the subnets, including creating MountTargets, accordingly
1053+
# to your requirements
10351054
#elasticFileSystemId: fs-47a2c22e
10361055

10371056
# Create shared persistent volume

core/nodepool/config/config.go

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -238,6 +238,15 @@ define one or more public subnets in cluster.yaml or explicitly reference privat
238238
}
239239
}
240240

241+
anySubnetIsManaged := false
242+
for _, s := range c.Subnets {
243+
anySubnetIsManaged = anySubnetIsManaged || s.ManageSubnet()
244+
}
245+
246+
if anySubnetIsManaged && main.ElasticFileSystemID == "" && c.ElasticFileSystemID != "" {
247+
return fmt.Errorf("elasticFileSystemId cannot be specified for a node pool in managed subnet(s), but was: %s", c.ElasticFileSystemID)
248+
}
249+
241250
c.EtcdNodes = main.EtcdNodes
242251
c.KubeResourcesAutosave = main.KubeResourcesAutosave
243252

core/nodepool/config/deployment.go

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -116,9 +116,13 @@ func (c DeploymentSettings) WithDefaultsFrom(main cfg.DeploymentSettings) Deploy
116116
c.Region = main.Region
117117
c.ContainerRuntime = main.ContainerRuntime
118118
c.KMSKeyARN = main.KMSKeyARN
119-
// TODO Allow providing (1) one or more elasticFileSystemId's to be mounted (2) either cluster-wide or per node pool.
120-
// It currently supports only one elasticFileSystemId to be mounted cluster-wide
121-
c.ElasticFileSystemID = main.ElasticFileSystemID
119+
120+
// TODO Allow providing one or more elasticFileSystemId's to be mounted both per-node-pool/cluster-wide
121+
// TODO Allow providing elasticFileSystemId to a node pool in managed subnets.
122+
// Currently, per-node-pool elasticFileSystemId requires existing subnets configured by users to have appropriate MountTargets associated
123+
if c.ElasticFileSystemID == "" {
124+
c.ElasticFileSystemID = main.ElasticFileSystemID
125+
}
122126

123127
return c
124128
}

test/integration/maincluster_test.go

Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -521,6 +521,32 @@ worker:
521521
},
522522
},
523523
},
524+
{
525+
context: "WithElasticFileSystemIdInSpecificNodePool",
526+
configYaml: mainClusterYaml + `
527+
subnets:
528+
- name: existing1
529+
id: subnet-12345
530+
availabilityZone: us-west-1a
531+
worker:
532+
nodePools:
533+
- name: pool1
534+
subnets:
535+
- name: existing1
536+
elasticFileSystemId: efs-12345
537+
- name: pool2
538+
`,
539+
assertConfig: []ConfigTester{
540+
func(c *config.Config, t *testing.T) {
541+
if c.NodePools[0].ElasticFileSystemID != "efs-12345" {
542+
t.Errorf("Unexpected worker.nodePools[0].elasticFileSystemId: %s", c.NodePools[0].ElasticFileSystemID)
543+
}
544+
if c.NodePools[1].ElasticFileSystemID != "" {
545+
t.Errorf("Unexpected worker.nodePools[1].elasticFileSystemId: %s", c.NodePools[1].ElasticFileSystemID)
546+
}
547+
},
548+
},
549+
},
524550
{
525551
context: "WithEtcdDataVolumeEncrypted",
526552
configYaml: minimalValidConfigYaml + `
@@ -3310,6 +3336,23 @@ controller:
33103336
configYaml: kubeAwsSettings.withClusterName("my.cluster").minimumValidClusterYaml(),
33113337
expectedErrorMessage: "clusterName(=my.cluster) is malformed. It must consist only of alphanumeric characters, colons, or hyphens",
33123338
},
3339+
{
3340+
context: "WithElasticFileSystemIdInSpecificNodePoolWithManagedSubnets",
3341+
configYaml: mainClusterYaml + `
3342+
subnets:
3343+
- name: managed1
3344+
availabilityZone: us-west-1a
3345+
instanceCIDR: 10.0.1.0/24
3346+
worker:
3347+
nodePools:
3348+
- name: pool1
3349+
subnets:
3350+
- name: managed1
3351+
elasticFileSystemId: efs-12345
3352+
- name: pool2
3353+
`,
3354+
expectedErrorMessage: "invalid node pool at index 0: elasticFileSystemId cannot be specified for a node pool in managed subnet(s), but was: efs-12345",
3355+
},
33133356
{
33143357
context: "WithEtcdAutomatedDisasterRecoveryRequiresAutomatedSnapshot",
33153358
configYaml: minimalValidConfigYaml + `

0 commit comments

Comments
 (0)