-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy patheks.tf
114 lines (101 loc) · 4.53 KB
/
eks.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
#---------------------------------------------------------------
# EKS Cluster
#---------------------------------------------------------------
module "eks" {
source = "terraform-aws-modules/eks/aws"
version = "~> 19.15"
cluster_name = local.name
cluster_version = var.eks_cluster_version
#WARNING: Avoid using this option (cluster_endpoint_public_access = true) in preprod or prod accounts. This feature is designed for sandbox accounts, simplifying cluster deployment and testing.
cluster_endpoint_public_access = true # if true, Your cluster API server is accessible from the internet. You can, optionally, limit the CIDR blocks that can access the public endpoint.
vpc_id = module.vpc.vpc_id
# Filtering only Secondary CIDR private subnets starting with "100.". Subnet IDs where the EKS Control Plane ENIs will be created
subnet_ids = compact([for subnet_id, cidr_block in zipmap(module.vpc.private_subnets, module.vpc.private_subnets_cidr_blocks) : substr(cidr_block, 0, 4) == "100." ? subnet_id : null])
manage_aws_auth_configmap = true
aws_auth_roles = [
# We need to add in the Karpenter node IAM role for nodes launched by Karpenter
{
rolearn = module.eks_blueprints_addons.karpenter.node_iam_role_arn
username = "system:node:{{EC2PrivateDNSName}}"
groups = [
"system:bootstrappers",
"system:nodes",
]
}
]
#---------------------------------------
# Note: This can further restricted to specific required for each Add-on and your application
#---------------------------------------
# Extend cluster security group rules
cluster_security_group_additional_rules = {
ingress_nodes_ephemeral_ports_tcp = {
description = "Nodes on ephemeral ports"
protocol = "tcp"
from_port = 1025
to_port = 65535
type = "ingress"
source_node_security_group = true
}
}
# Extend node-to-node security group rules
node_security_group_additional_rules = {
ingress_self_all = {
description = "Node to node all ports/protocols"
protocol = "-1"
from_port = 0
to_port = 0
type = "ingress"
self = true
}
# Allows Control Plane Nodes to talk to Worker nodes on all ports. Added this to simplify the example and further avoid issues with Add-ons communication with Control plane.
# This can be restricted further to specific port based on the requirement for each Add-on e.g., metrics-server 4443, spark-operator 8080, karpenter 8443 etc.
# Change this according to your security requirements if needed
ingress_cluster_to_node_all_traffic = {
description = "Cluster API to Nodegroup all traffic"
protocol = "-1"
from_port = 0
to_port = 0
type = "ingress"
source_cluster_security_group = true
}
}
eks_managed_node_group_defaults = {
iam_role_additional_policies = {
# Not required, but used in the example to access the nodes to inspect mounted volumes
AmazonSSMManagedInstanceCore = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"
}
}
eks_managed_node_groups = {
# We recommend to have a MNG to place your critical workloads and add-ons
# Then rely on Karpenter to scale your workloads
# You can also make uses on nodeSelector and Taints/tolerations to spread workloads on MNG or Karpenter provisioners
core_node_group = {
name = "core-node-group"
description = "EKS Core node group for hosting critical add-ons"
# Filtering only Secondary CIDR private subnets starting with "100.". Subnet IDs where the nodes/node groups will be provisioned
subnet_ids = compact([for subnet_id, cidr_block in zipmap(module.vpc.private_subnets, module.vpc.private_subnets_cidr_blocks) : substr(cidr_block, 0, 4) == "100." ? subnet_id : null])
min_size = 2
max_size = 6
desired_size = 2
instance_types = ["r5.xlarge"]
ebs_optimized = true
block_device_mappings = {
xvda = {
device_name = "/dev/xvda"
ebs = {
volume_size = 100
volume_type = "gp3"
}
}
}
labels = {
WorkerType = "ON_DEMAND"
NodeGroupType = "core"
}
tags = merge(local.tags, {
Name = "core-node-grp",
"karpenter.sh/discovery" = local.name
})
}
}
}