Skip to content

Commit e9c33f1

Browse files
committed
Add Terraform configurations for EKS pod security group setup and related resources
1 parent 4bcd269 commit e9c33f1

10 files changed

Lines changed: 416 additions & 2 deletions

File tree

KaaS/Elastic Kubernetes Service(EKS)/Terraform/github-arc/variable.tf

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
11
variable "region" {
22
type = string
3-
default = "us-west-2"
3+
default = "us-east-1"
44
}
55

66
variable "eks_version" {
77
type = string
8-
default = "1.31"
8+
default = "1.34"
99
}
1010

1111
variable "tags" {
Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
## Tesdting Pod Security Group with EKS Cluster
2+
3+
### How to test
4+
- kubectl exec into one of the pods in the Deployment "my-deployment"
5+
- run `curl my-app` to test connectivity to the Service
6+
- provision and deprovision the `aws_security_group_rule.allow_dns_from_pod_sg` and `aws_security_group_rule.allow_dns_tcp_from_pod_sg` resources in to see the effect on DNS resolution from the pod with custom security group.
7+
Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
data "aws_partition" "current" {}
2+
data "aws_caller_identity" "current" {}
3+
data "aws_availability_zones" "available" {}
4+
data "aws_eks_cluster_auth" "this" {
5+
name = module.eks.cluster_name
6+
}
7+
8+
resource "random_id" "random_string" {
9+
byte_length = 2
10+
}
11+
12+
locals {
13+
name = lower("${basename(path.cwd)}-${substr(base64encode(random_id.random_string.b64_url), 0, 3)}")
14+
region = var.region
15+
16+
vpc_cidr = "10.0.0.0/16"
17+
azs = slice(data.aws_availability_zones.available.names, 0, 3)
18+
19+
tags = {
20+
project = local.name
21+
}
22+
}
Lines changed: 71 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,71 @@
1+
module "eks" {
2+
source = "terraform-aws-modules/eks/aws"
3+
version = "21.4.0"
4+
5+
name = local.name
6+
kubernetes_version = var.eks_version
7+
endpoint_public_access = true
8+
endpoint_private_access = true
9+
10+
vpc_id = module.vpc.vpc_id
11+
subnet_ids = module.vpc.private_subnets
12+
13+
enable_irsa = true
14+
15+
access_entries = {
16+
# Admin access entry
17+
admin = {
18+
kubernetes_groups = []
19+
principal_arn = data.aws_caller_identity.current.arn
20+
21+
policy_associations = {
22+
admin = {
23+
policy_arn = "arn:${data.aws_partition.current.partition}:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy"
24+
access_scope = {
25+
type = "cluster"
26+
}
27+
}
28+
}
29+
}
30+
}
31+
32+
eks_managed_node_groups = {
33+
default = {
34+
instance_types = ["m5.large"]
35+
36+
amiType = "AL2_x86_64"
37+
min_size = 1
38+
max_size = 1
39+
desired_size = 1
40+
}
41+
}
42+
43+
addons = {
44+
coredns = {
45+
most_recent = true
46+
}
47+
metrics-server = {
48+
most_recent = true
49+
}
50+
kube-proxy = {
51+
most_recent = true
52+
}
53+
vpc-cni = {
54+
most_recent = true
55+
configuration_values = jsonencode({
56+
env = {
57+
ENABLE_POD_ENI = "true"
58+
NETWORK_POLICY_ENFORCING_MODE = "strict"
59+
}
60+
})
61+
}
62+
}
63+
64+
tags = var.tags
65+
}
66+
67+
# Attach AmazonEKSVPCResourceController policy to the EKS cluster service role
68+
resource "aws_iam_role_policy_attachment" "eks_vpc_resource_controller" {
69+
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSVPCResourceController"
70+
role = module.eks.cluster_iam_role_name
71+
}
Lines changed: 69 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,69 @@
1+
resource "kubernetes_namespace" "default" {
2+
metadata {
3+
name = local.name
4+
}
5+
}
6+
7+
resource "kubernetes_deployment" "my_deployment" {
8+
metadata {
9+
name = "my-deployment"
10+
namespace = kubernetes_namespace.default.metadata[0].name
11+
labels = {
12+
app = "my-app"
13+
}
14+
}
15+
16+
spec {
17+
replicas = 4
18+
19+
selector {
20+
match_labels = {
21+
app = "my-app"
22+
}
23+
}
24+
25+
template {
26+
metadata {
27+
labels = {
28+
app = "my-app"
29+
role = "my-role"
30+
}
31+
}
32+
33+
spec {
34+
termination_grace_period_seconds = 120
35+
36+
container {
37+
name = "nginx"
38+
image = "public.ecr.aws/nginx/nginx:1.23"
39+
40+
port {
41+
container_port = 80
42+
}
43+
}
44+
}
45+
}
46+
}
47+
}
48+
49+
resource "kubernetes_service" "my_app" {
50+
metadata {
51+
name = "my-app"
52+
namespace = kubernetes_namespace.default.metadata[0].name
53+
labels = {
54+
app = "my-app"
55+
}
56+
}
57+
58+
spec {
59+
selector = {
60+
app = "my-app"
61+
}
62+
63+
port {
64+
protocol = "TCP"
65+
port = 80
66+
target_port = 80
67+
}
68+
}
69+
}
Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
output "configure_kubectl" {
2+
description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig"
3+
value = "aws eks --region ${local.region} update-kubeconfig --name ${module.eks.cluster_name}"
4+
}
5+
6+
output "pod_security_group_id" {
7+
description = "The Security Group ID assigned to pods via the SecurityGroupPolicy"
8+
value = kubernetes_manifest.security_group_policy.manifest["spec"]["securityGroups"]["groupIds"][0]
9+
}
Lines changed: 158 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,158 @@
1+
resource "kubernetes_manifest" "security_group_policy" {
2+
manifest = {
3+
apiVersion = "vpcresources.k8s.aws/v1beta1"
4+
kind = "SecurityGroupPolicy"
5+
metadata = {
6+
name = "my-security-group-policy"
7+
namespace = kubernetes_namespace.default.metadata[0].name
8+
}
9+
spec = {
10+
podSelector = {
11+
matchLabels = {
12+
role = "my-role"
13+
}
14+
}
15+
securityGroups = {
16+
groupIds = [
17+
aws_security_group.allow_all.id,
18+
]
19+
}
20+
}
21+
}
22+
}
23+
24+
resource "aws_security_group" "allow_all" {
25+
name = "allow_all_pods"
26+
description = "Allow all inbound and outbound traffic"
27+
vpc_id = module.vpc.vpc_id
28+
29+
# Allow inbound from CoreDNS and other pods using same SG
30+
ingress {
31+
description = "HTTP from within VPC"
32+
from_port = 80
33+
to_port = 80
34+
protocol = "tcp"
35+
cidr_blocks = [module.vpc.vpc_cidr_block]
36+
}
37+
38+
# Allow kubelet probes (e.g., health checks)
39+
ingress {
40+
description = "Allow kubelet probes from node SG"
41+
from_port = 10250
42+
to_port = 10250
43+
protocol = "tcp"
44+
security_groups = [module.eks.node_security_group_id] # node SG id
45+
}
46+
47+
# Allow pod-to-pod communication within the same security group
48+
ingress {
49+
description = "Allow pod-to-pod communication"
50+
from_port = 0
51+
to_port = 65535
52+
protocol = "tcp"
53+
self = true
54+
}
55+
56+
# Allow DNS queries to CoreDNS (UDP) - to node security group
57+
egress {
58+
description = "Allow DNS UDP to CoreDNS on nodes"
59+
from_port = 53
60+
to_port = 53
61+
protocol = "udp"
62+
security_groups = [module.eks.node_security_group_id]
63+
}
64+
65+
# Allow DNS queries to CoreDNS (TCP) - to node security group
66+
egress {
67+
description = "Allow DNS TCP to CoreDNS on nodes"
68+
from_port = 53
69+
to_port = 53
70+
protocol = "tcp"
71+
security_groups = [module.eks.node_security_group_id]
72+
}
73+
74+
# Also allow DNS to VPC CIDR as fallback
75+
egress {
76+
description = "Allow DNS UDP to VPC CIDR"
77+
from_port = 53
78+
to_port = 53
79+
protocol = "udp"
80+
cidr_blocks = [module.vpc.vpc_cidr_block]
81+
}
82+
83+
egress {
84+
description = "Allow DNS TCP to VPC CIDR"
85+
from_port = 53
86+
to_port = 53
87+
protocol = "tcp"
88+
cidr_blocks = [module.vpc.vpc_cidr_block]
89+
}
90+
91+
# Allow HTTPS outbound (for pulling images, etc.)
92+
egress {
93+
description = "Allow HTTPS outbound"
94+
from_port = 443
95+
to_port = 443
96+
protocol = "tcp"
97+
cidr_blocks = ["0.0.0.0/0"]
98+
}
99+
100+
# Allow HTTP outbound
101+
egress {
102+
description = "Allow HTTP outbound"
103+
from_port = 80
104+
to_port = 80
105+
protocol = "tcp"
106+
cidr_blocks = ["0.0.0.0/0"]
107+
}
108+
109+
# Allow pod-to-pod communication within the same security group
110+
egress {
111+
description = "Allow pod-to-pod communication"
112+
from_port = 0
113+
to_port = 65535
114+
protocol = "tcp"
115+
self = true
116+
}
117+
118+
# Allow communication to cluster security group (for API server access)
119+
egress {
120+
description = "Allow communication to EKS cluster"
121+
from_port = 443
122+
to_port = 443
123+
protocol = "tcp"
124+
security_groups = [module.eks.cluster_security_group_id]
125+
}
126+
127+
# Allow communication to node security group for service discovery
128+
egress {
129+
description = "Allow communication to nodes"
130+
from_port = 0
131+
to_port = 65535
132+
protocol = "tcp"
133+
security_groups = [module.eks.node_security_group_id]
134+
}
135+
136+
tags = var.tags
137+
}
138+
139+
# Add rule to node security group to allow DNS from pod security group
140+
resource "aws_security_group_rule" "allow_dns_from_pod_sg" {
141+
type = "ingress"
142+
from_port = 53
143+
to_port = 53
144+
protocol = "udp"
145+
source_security_group_id = aws_security_group.allow_all.id
146+
security_group_id = module.eks.node_security_group_id
147+
description = "Allow DNS UDP from pod security group"
148+
}
149+
150+
resource "aws_security_group_rule" "allow_dns_tcp_from_pod_sg" {
151+
type = "ingress"
152+
from_port = 53
153+
to_port = 53
154+
protocol = "tcp"
155+
source_security_group_id = aws_security_group.allow_all.id
156+
security_group_id = module.eks.node_security_group_id
157+
description = "Allow DNS TCP from pod security group"
158+
}

0 commit comments

Comments
 (0)