Skip to content

Commit 6abada1

Browse files
committed
hybrid cluster auth (cm deprecated)
1 parent dd662d9 commit 6abada1

4 files changed

Lines changed: 34 additions & 42 deletions

File tree

.github/workflows/ci.yaml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,9 @@ jobs:
4141
with:
4242
aws-region: ${{ env.AWS_REGION }}
4343
role-to-assume: ${{ secrets.AWS_OIDC_ROLE }}
44+
# TODO: this should likely include a HOT role as well
45+
# TODO: determine if GH envs preferred, set TF_VAR_
46+
- run: sed -i 's/DEPLOY_ROLE/${{ secrets.AWS_OIDC_ROLE }}/' ${{ env.VAR_FILE }}
4447
- name: TF Format
4548
id: fmt
4649
run: tofu fmt -no-color

terraform/cluster.tf

Lines changed: 19 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -1,19 +1,11 @@
1-
provider "kubernetes" {
2-
host = aws_eks_cluster.cluster.endpoint
3-
cluster_ca_certificate = base64decode(aws_eks_cluster.cluster.certificate_authority[0].data)
4-
5-
exec {
6-
api_version = "client.authentication.k8s.io/v1beta1"
7-
command = "aws"
8-
args = ["eks", "get-token", "--cluster-name", aws_eks_cluster.cluster.name]
9-
}
10-
}
11-
121
resource "aws_eks_cluster" "cluster" {
132
name = "${local.cluster_prefix}-cluster"
143
role_arn = aws_iam_role.cluster_control_plane.arn
154
version = var.kubernetes_version
165

6+
access_config {
7+
authentication_mode = "API_AND_CONFIG_MAP"
8+
}
179
vpc_config {
1810
subnet_ids = concat(module.vpc.private_subnets, module.vpc.public_subnets)
1911
}
@@ -118,23 +110,22 @@ resource "aws_iam_role_policy_attachment" "cluster_autoscaler" {
118110
policy_arn = aws_iam_policy.cluster_autoscaler.arn
119111
}
120112

113+
resource "aws_eks_access_entry" "admin" {
114+
for_each = toset(var.cluster_admin_access_role_arns)
121115

122-
resource "kubernetes_config_map" "aws_auth" {
123-
metadata {
124-
name = "aws-auth"
125-
namespace = "kube-system"
126-
}
116+
cluster_name = aws_eks_cluster.cluster.name
117+
principal_arn = each.value
118+
kubernetes_groups = ["cluster-admin"]
119+
}
120+
121+
resource "aws_eks_access_policy_association" "admin_policy" {
122+
for_each = aws_eks_access_entry.admin
127123

128-
data = {
129-
mapRoles = yamlencode([
130-
{
131-
rolearn = aws_iam_role.nodegroup.arn
132-
username = "system:node:{{EC2PrivateDNSName}}"
133-
groups = ["system:bootstrappers", "system:nodes"]
134-
}
135-
])
136-
mapAccounts = yamlencode([])
137-
mapUsers = yamlencode(var.map_users)
124+
cluster_name = aws_eks_cluster.cluster.name
125+
policy_arn = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy"
126+
principal_arn = each.value.principal_arn
127+
128+
access_scope {
129+
type = "cluster"
138130
}
139-
depends_on = [aws_eks_cluster.cluster]
140-
}
131+
}

terraform/variables.tf

Lines changed: 6 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -148,14 +148,11 @@ variable "metrics_server_version" {
148148
EOT
149149
}
150150

151-
variable "map_users" {
152-
type = list(object({
153-
userarn = string
154-
username = string
155-
groups = list(string)
156-
}))
157-
default = []
158-
description = <<-EOT
159-
(Optional) Users to include on aws-auth ConfigMap
151+
variable "cluster_admin_access_role_arns" {
152+
type = list(string)
153+
default = []
154+
sensitive = true
155+
description = <<-EOT
156+
(Optional) Roles allowed admin access to cluster
160157
EOT
161158
}

terraform/vars/develop.tfvars

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,8 @@
1-
environment = "develop"
2-
region = "us-east-1"
3-
instance_type = "t3.xlarge"
4-
bucket_names = ["pgstac-backup",]
5-
tags = {
1+
environment = "develop"
2+
region = "us-east-1"
3+
instance_type = "t3.xlarge"
4+
bucket_names = ["pgstac-backup", ]
5+
cluster_admin_access_role_arns = ["DEPLOY_ROLE", ]
6+
tags = {
67
project = "k8s-infra"
78
}

0 commit comments

Comments
 (0)