Skip to content

Commit 2f87c47

Browse files
committed
Implement AWS EKS Tailscale Operator setup with Terraform
1 parent 361a28c commit 2f87c47

File tree

5 files changed

+476
-0
lines changed

5 files changed

+476
-0
lines changed
Lines changed: 79 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,79 @@
1+
# aws-eks-tailscale-operator
2+
3+
This example creates the following:
4+
5+
- a VPC and related resources including a NAT Gateway
6+
- an EKS cluster with a managed node group
7+
- a Kubernetes namespace for the Tailscale operator with privileged pod security enforcement
8+
- the Tailscale Kubernetes Operator deployed via Helm
9+
- necessary IAM roles and security groups for EKS and Tailscale connectivity
10+
11+
## Considerations
12+
13+
- The EKS cluster is configured with both public and private API server access for flexibility
14+
- The Tailscale operator is deployed in a dedicated `tailscale` namespace with privileged pod security
15+
- OAuth credentials are stored as Kubernetes secrets and passed securely to the Helm chart
16+
- The operator will create a Tailscale device for API server proxy access
17+
- Any additional Tailscale resources (like ingress controllers) created by the operator will appear in your Tailnet
18+
19+
## Prerequisites
20+
21+
- Create a [Tailscale OAuth Client](https://tailscale.com/kb/1215/oauth-clients#setting-up-an-oauth-client) with appropriate scopes
22+
- Ensure you have AWS CLI configured with appropriate permissions for EKS
23+
- Install `kubectl` for cluster access after deployment
24+
25+
## To use
26+
27+
Follow the documentation to configure the Terraform providers:
28+
29+
- [AWS](https://registry.terraform.io/providers/hashicorp/aws/latest/docs)
30+
- [Kubernetes](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs)
31+
- [Helm](https://registry.terraform.io/providers/hashicorp/helm/latest/docs)
32+
33+
### Configure variables
34+
35+
Create a `terraform.tfvars` file with your Tailscale OAuth credentials:
36+
37+
```hcl
38+
tailscale_oauth_client_id = "your-oauth-client-id"
39+
tailscale_oauth_client_secret = "your-oauth-client-secret"
40+
```
41+
42+
### Deploy
43+
44+
```shell
45+
terraform init
46+
terraform apply
47+
```
48+
49+
After deployment, configure kubectl to access your cluster:
50+
51+
```shell
52+
aws eks update-kubeconfig --region us-east-1 --name $(terraform output -raw cluster_name)
53+
```
54+
55+
### Verify deployment
56+
57+
Check that the Tailscale operator is running:
58+
59+
```shell
60+
kubectl get pods -n tailscale
61+
kubectl logs -n tailscale -l app.kubernetes.io/name=tailscale-operator
62+
```
63+
64+
## To destroy
65+
66+
```shell
67+
terraform destroy
68+
```
69+
70+
## Customization
71+
72+
You can customize the EKS cluster configuration by modifying the locals in `main.tf`:
73+
74+
- `cluster_version`: EKS Kubernetes version
75+
- `node_instance_type`: EC2 instance type for worker nodes
76+
- `desired_size`, `max_size`, `min_size`: Node group scaling configuration
77+
- VPC CIDR blocks and subnet configurations
78+
79+
The Tailscale operator configuration can be customized in the `helm_release` resource values.
Lines changed: 286 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,286 @@
1+
locals {
2+
name = "example-${basename(path.cwd)}"
3+
4+
aws_tags = {
5+
Name = local.name
6+
}
7+
8+
// Modify these to use your own VPC
9+
vpc_cidr_block = module.vpc.vpc_cidr_block
10+
vpc_id = module.vpc.vpc_id
11+
subnet_ids = module.vpc.private_subnets
12+
security_group_ids = [aws_security_group.tailscale.id]
13+
14+
# EKS cluster configuration
15+
cluster_version = "1.30"
16+
node_instance_type = "t3.medium"
17+
node_capacity_type = "ON_DEMAND"
18+
node_ami_type = "AL2_x86_64"
19+
desired_size = 2
20+
max_size = 4
21+
min_size = 1
22+
23+
# Tailscale configuration
24+
tailscale_oauth_client_id = var.tailscale_oauth_client_id
25+
tailscale_oauth_client_secret = var.tailscale_oauth_client_secret
26+
}
27+
28+
// Remove this to use your own VPC.
29+
module "vpc" {
30+
source = "../internal-modules/aws-vpc"
31+
32+
name = local.name
33+
tags = local.aws_tags
34+
35+
cidr = "10.0.0.0/16"
36+
37+
public_subnets = ["10.0.1.0/24", "10.0.2.0/24"]
38+
private_subnets = ["10.0.10.0/24", "10.0.20.0/24"]
39+
}
40+
41+
# EKS Cluster
42+
resource "aws_eks_cluster" "main" {
43+
name = local.name
44+
role_arn = aws_iam_role.cluster.arn
45+
version = local.cluster_version
46+
47+
vpc_config {
48+
subnet_ids = local.subnet_ids
49+
endpoint_private_access = true
50+
endpoint_public_access = true
51+
public_access_cidrs = ["0.0.0.0/0"]
52+
security_group_ids = [aws_security_group.cluster.id]
53+
}
54+
55+
# Enable logging
56+
enabled_cluster_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"]
57+
58+
tags = local.aws_tags
59+
60+
depends_on = [
61+
aws_iam_role_policy_attachment.cluster_AmazonEKSClusterPolicy,
62+
aws_cloudwatch_log_group.eks_cluster,
63+
module.vpc.nat_ids, # remove if using your own VPC
64+
]
65+
}
66+
67+
# EKS Node Group
68+
resource "aws_eks_node_group" "main" {
69+
cluster_name = aws_eks_cluster.main.name
70+
node_group_name = "${local.name}-nodes"
71+
node_role_arn = aws_iam_role.node_group.arn
72+
subnet_ids = local.subnet_ids
73+
74+
capacity_type = local.node_capacity_type
75+
ami_type = local.node_ami_type
76+
instance_types = [local.node_instance_type]
77+
78+
scaling_config {
79+
desired_size = local.desired_size
80+
max_size = local.max_size
81+
min_size = local.min_size
82+
}
83+
84+
update_config {
85+
max_unavailable = 1
86+
}
87+
88+
tags = local.aws_tags
89+
90+
depends_on = [
91+
aws_iam_role_policy_attachment.node_group_AmazonEKSWorkerNodePolicy,
92+
aws_iam_role_policy_attachment.node_group_AmazonEKS_CNI_Policy,
93+
aws_iam_role_policy_attachment.node_group_AmazonEC2ContainerRegistryReadOnly,
94+
]
95+
}
96+
97+
# CloudWatch Log Group for EKS Cluster
98+
resource "aws_cloudwatch_log_group" "eks_cluster" {
99+
name = "/aws/eks/${local.name}/cluster"
100+
retention_in_days = 7
101+
tags = local.aws_tags
102+
}
103+
104+
# Kubernetes namespace for Tailscale operator
105+
resource "kubernetes_namespace" "tailscale_operator" {
106+
metadata {
107+
name = "tailscale"
108+
labels = {
109+
"pod-security.kubernetes.io/enforce" = "privileged"
110+
}
111+
}
112+
113+
depends_on = [aws_eks_node_group.main]
114+
}
115+
116+
117+
118+
# Deploy Tailscale Operator using Helm
119+
resource "helm_release" "tailscale_operator" {
120+
name = "tailscale-operator"
121+
repository = "https://pkgs.tailscale.com/helmcharts"
122+
chart = "tailscale-operator"
123+
version = "1.84.0"
124+
namespace = kubernetes_namespace.tailscale_operator.metadata[0].name
125+
126+
values = [
127+
yamlencode({
128+
operatorConfig = {
129+
image = {
130+
repo = "tailscale/k8s-operator"
131+
tag = "v1.84.0"
132+
}
133+
}
134+
apiServerProxyConfig = {
135+
mode = "true"
136+
tags = "tag:k8s-operator,tag:k8s-api-server"
137+
}
138+
oauth = {
139+
clientId = local.tailscale_oauth_client_id
140+
clientSecret = local.tailscale_oauth_client_secret
141+
hostname = "${local.name}-operator"
142+
tags = "tag:k8s-operator"
143+
}
144+
})
145+
]
146+
147+
set_sensitive {
148+
name = "oauth.clientId"
149+
value = local.tailscale_oauth_client_id
150+
}
151+
152+
set_sensitive {
153+
name = "oauth.clientSecret"
154+
value = local.tailscale_oauth_client_secret
155+
}
156+
157+
depends_on = [
158+
kubernetes_namespace.tailscale_operator,
159+
aws_eks_node_group.main,
160+
]
161+
}
162+
163+
# Security group for EKS cluster
164+
resource "aws_security_group" "cluster" {
165+
name_prefix = "${local.name}-cluster-"
166+
vpc_id = local.vpc_id
167+
168+
tags = merge(
169+
local.aws_tags,
170+
{
171+
Name = "${local.name}-cluster"
172+
}
173+
)
174+
}
175+
176+
resource "aws_security_group_rule" "cluster_egress" {
177+
security_group_id = aws_security_group.cluster.id
178+
type = "egress"
179+
from_port = 0
180+
to_port = 0
181+
protocol = "-1"
182+
cidr_blocks = ["0.0.0.0/0"]
183+
ipv6_cidr_blocks = ["::/0"]
184+
}
185+
186+
# Security group for Tailscale traffic
187+
resource "aws_security_group" "tailscale" {
188+
vpc_id = local.vpc_id
189+
name = "${local.name}-tailscale"
190+
191+
tags = merge(
192+
local.aws_tags,
193+
{
194+
Name = "${local.name}-tailscale"
195+
}
196+
)
197+
}
198+
199+
resource "aws_security_group_rule" "tailscale_ingress" {
200+
security_group_id = aws_security_group.tailscale.id
201+
type = "ingress"
202+
from_port = 41641
203+
to_port = 41641
204+
protocol = "udp"
205+
cidr_blocks = ["0.0.0.0/0"]
206+
ipv6_cidr_blocks = ["::/0"]
207+
}
208+
209+
resource "aws_security_group_rule" "tailscale_egress" {
210+
security_group_id = aws_security_group.tailscale.id
211+
type = "egress"
212+
from_port = 0
213+
to_port = 0
214+
protocol = "-1"
215+
cidr_blocks = ["0.0.0.0/0"]
216+
ipv6_cidr_blocks = ["::/0"]
217+
}
218+
219+
resource "aws_security_group_rule" "internal_vpc_ingress_ipv4" {
220+
security_group_id = aws_security_group.tailscale.id
221+
type = "ingress"
222+
from_port = 0
223+
to_port = 0
224+
protocol = "-1"
225+
cidr_blocks = [local.vpc_cidr_block]
226+
}
227+
228+
# IAM Role for EKS Cluster
229+
resource "aws_iam_role" "cluster" {
230+
name = "${local.name}-cluster-role"
231+
232+
assume_role_policy = jsonencode({
233+
Version = "2012-10-17"
234+
Statement = [
235+
{
236+
Action = "sts:AssumeRole"
237+
Effect = "Allow"
238+
Principal = {
239+
Service = "eks.amazonaws.com"
240+
}
241+
}
242+
]
243+
})
244+
245+
tags = local.aws_tags
246+
}
247+
248+
resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSClusterPolicy" {
249+
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"
250+
role = aws_iam_role.cluster.name
251+
}
252+
253+
# IAM Role for EKS Node Group
254+
resource "aws_iam_role" "node_group" {
255+
name = "${local.name}-node-group-role"
256+
257+
assume_role_policy = jsonencode({
258+
Version = "2012-10-17"
259+
Statement = [
260+
{
261+
Action = "sts:AssumeRole"
262+
Effect = "Allow"
263+
Principal = {
264+
Service = "ec2.amazonaws.com"
265+
}
266+
}
267+
]
268+
})
269+
270+
tags = local.aws_tags
271+
}
272+
273+
resource "aws_iam_role_policy_attachment" "node_group_AmazonEKSWorkerNodePolicy" {
274+
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"
275+
role = aws_iam_role.node_group.name
276+
}
277+
278+
resource "aws_iam_role_policy_attachment" "node_group_AmazonEKS_CNI_Policy" {
279+
policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"
280+
role = aws_iam_role.node_group.name
281+
}
282+
283+
resource "aws_iam_role_policy_attachment" "node_group_AmazonEC2ContainerRegistryReadOnly" {
284+
policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
285+
role = aws_iam_role.node_group.name
286+
}

0 commit comments

Comments
 (0)