@@ -8,11 +8,26 @@ locals {
88module "eks" {
99 # checkov:skip=CKV_TF_1:Using registry versioned modules
1010 source = " terraform-aws-modules/eks/aws"
11- version = " 19. 21.0 "
11+ version = " 21.15.1 "
1212
13- cluster_endpoint_public_access = true
13+ name = var. name
14+ kubernetes_version = var. k8s_version
15+ vpc_id = var. vpc_id
16+ subnet_ids = var. control_plane_subnets
1417
15- cluster_addons = {
18+ endpoint_public_access = true
19+
20+ authentication_mode = " API_AND_CONFIG_MAP"
21+
22+ kms_key_enable_default_policy = true
23+ kms_key_administrators = var. kms_key_administrators
24+
25+ encryption_config = {
26+ resources = [" secrets" ]
27+ provider_key_arn = var.secrets_encryption_kms_key_arn
28+ }
29+
30+ addons = {
1631 coredns = {
1732 addon_version = data.aws_eks_addon_version.coredns.version
1833 timeouts = {
@@ -25,67 +40,55 @@ module "eks" {
2540 }
2641 vpc-cni = {
2742 addon_version = data.aws_eks_addon_version.vpc_cni.version
28- # service_account_role_arn = module.vpc_cni_irsa.iam_role_arn
2943 }
3044 aws-ebs-csi-driver = {
3145 addon_version = data.aws_eks_addon_version.ebs_csi_driver.version
3246 resolve_conflicts = " OVERWRITE"
3347 }
3448 }
3549
36- # Self managed node groups will not automatically create the aws-auth configmap so we need to
37- create_aws_auth_configmap = true
38- manage_aws_auth_configmap = true
39-
40- kms_key_enable_default_policy = true
41- kms_key_administrators = var. kms_key_administrators
42-
43-
44- cluster_encryption_config = {
45- resources = [" secrets" ]
46- provider_key_arn = var.secrets_encryption_kms_key_arn
47- }
48-
49- cluster_name = var. name
50- cluster_version = var. k8s_version
51- subnet_ids = var. control_plane_subnets
52- vpc_id = var. vpc_id
53-
54- self_managed_node_group_defaults = {
55- iam_role_additional_policies = {
56- ssm = " arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"
57- // AmazonEBSCSIDriverPolicy is definitely not needed by all nodes, only by csi-driver, it's here just for simplicity (EKS module doesn't support it)
58- csi = " arn:aws:iam::aws:policy/service-role/AmazonEBSCSIDriverPolicy"
50+ access_entries = merge (
51+ { for role in var . map_roles :
52+ role . rolearn => {
53+ kubernetes_groups = role.groups
54+ principal_arn = role.rolearn
55+ }
56+ },
57+ { for user in var . map_users :
58+ user . userarn => {
59+ kubernetes_groups = user.groups
60+ principal_arn = user.userarn
61+ }
5962 }
60- }
61-
62- aws_auth_roles = var. map_roles
63- aws_auth_users = var. map_users
63+ )
6464
6565 self_managed_node_groups = {
6666 for i , v in var . worker_groups : " nodegroup${ i } " => {
6767 name = v.name
6868 instance_type = v.instance_type
6969
70- iam_role_attach_cni_policy = true
71-
72- platform = " bottlerocket"
73- ami_id = data.aws_ami.bottlerocket_ami.id
70+ ami_type = " BOTTLEROCKET_x86_64"
7471
7572 min_size = v.asg_min_size
7673 max_size = v.asg_max_size
7774 desired_size = v.asg_min_size
7875
7976 subnets = v.subnets
8077
78+ iam_role_additional_policies = {
79+ ssm = " arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"
80+ csi = " arn:aws:iam::aws:policy/service-role/AmazonEBSCSIDriverPolicy"
81+ }
82+
83+ iam_role_attach_cni_policy = true
84+
8185 bootstrap_extra_args = <<- EOT
8286 [settings.host-containers.admin]
8387 enabled = false
8488
8589 [settings.host-containers.control]
8690 enabled = true
8791
88- # extra args added
8992 [settings.kernel]
9093 lockdown = "integrity"
9194
@@ -99,16 +102,14 @@ module "eks" {
99102
100103 target_group_arns = v.target_group_arns
101104
102- // see https://eu-central-1.console.aws.amazon.com/ec2/v2/home?region=eu-central-1#ImageDetails:imageId=ami-00b9b96f830a6c28b
103- root_volume_size = 2
105+ root_volume_size = 20
104106
105- // ephemeral storage
106107 additional_ebs_volumes = [
107108 {
108- block_device_name = " /dev/xvdb" ,
109- volume_size = 20 ,
110- volume_type = " gp3" ,
111- delete_on_termination = true ,
109+ block_device_name = " /dev/xvdb"
110+ volume_size = 20
111+ volume_type = " gp3"
112+ delete_on_termination = true
112113 }
113114 ]
114115
@@ -126,23 +127,6 @@ module "eks" {
126127 }
127128}
128129
129- # TODO:
130- # module "vpc_cni_irsa" {
131- # source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks"
132- # version = "~> 5.0"
133-
134- # role_name_prefix = "VPC-CNI-IRSA"
135- # attach_vpc_cni_policy = true
136- # #vpc_cni_enable_ipv6 = true
137-
138- # oidc_providers = {
139- # main = {
140- # provider_arn = module.eks.oidc_provider_arn
141- # namespace_service_accounts = ["kube-system:aws-node"]
142- # }
143- # }
144- # }
145-
146130resource "aws_security_group_rule" "ingress" {
147131 for_each = var. allow_ingress
148132
@@ -156,7 +140,6 @@ resource "aws_security_group_rule" "ingress" {
156140 security_group_id = module. eks . node_security_group_id
157141}
158142
159- # this needs to be configured for properly working NodePorts
160143resource "aws_security_group_rule" "eks_workers_to_eks_workers_all" {
161144 type = " ingress"
162145 description = " EKS between workers all traffic"
0 commit comments