1- provider "kubernetes" {
2- host = aws_eks_cluster. cluster . endpoint
3- cluster_ca_certificate = base64decode (aws_eks_cluster. cluster . certificate_authority [0 ]. data )
4-
5- exec {
6- api_version = " client.authentication.k8s.io/v1beta1"
7- command = " aws"
8- args = [" eks" , " get-token" , " --cluster-name" , aws_eks_cluster . cluster . name ]
9- }
10- }
11-
121resource "aws_eks_cluster" "cluster" {
132 name = " ${ local . cluster_prefix } -cluster"
143 role_arn = aws_iam_role. cluster_control_plane . arn
154 version = var. kubernetes_version
165
6+ access_config {
7+ authentication_mode = " API_AND_CONFIG_MAP"
8+ }
179 vpc_config {
1810 subnet_ids = concat (module. vpc . private_subnets , module. vpc . public_subnets )
1911 }
@@ -118,23 +110,22 @@ resource "aws_iam_role_policy_attachment" "cluster_autoscaler" {
118110 policy_arn = aws_iam_policy. cluster_autoscaler . arn
119111}
120112
113+ resource "aws_eks_access_entry" "admin" {
114+ for_each = toset (var. cluster_admin_access_role_arns )
121115
122- resource "kubernetes_config_map" "aws_auth" {
123- metadata {
124- name = " aws-auth"
125- namespace = " kube-system"
126- }
116+ cluster_name = aws_eks_cluster. cluster . name
117+ principal_arn = each. value
118+ kubernetes_groups = [" cluster-admin" ]
119+ }
120+
121+ resource "aws_eks_access_policy_association" "admin_policy" {
122+ for_each = aws_eks_access_entry. admin
127123
128- data = {
129- mapRoles = yamlencode ([
130- {
131- rolearn = aws_iam_role.nodegroup.arn
132- username = " system:node:{{EC2PrivateDNSName}}"
133- groups = [" system:bootstrappers" , " system:nodes" ]
134- }
135- ])
136- mapAccounts = yamlencode ([])
137- mapUsers = yamlencode (var. map_users )
124+ cluster_name = aws_eks_cluster. cluster . name
125+ policy_arn = " arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy"
126+ principal_arn = each. value . principal_arn
127+
128+ access_scope {
129+ type = " cluster"
138130 }
139- depends_on = [aws_eks_cluster . cluster ]
140- }
131+ }
0 commit comments