-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathmain.tf
More file actions
183 lines (156 loc) · 5.1 KB
/
main.tf
File metadata and controls
183 lines (156 loc) · 5.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
check "reserved_cidrs_required_for_gke" {
assert {
condition = var.k8s_provider != "gke" || (var.reserved_subnet_cidrs != null && length(var.reserved_subnet_cidrs) > 0)
error_message = "'reserved_subnet_cidrs' must be provided for GKE cluster"
}
}
locals {
liqo_chart_repo = "https://castai.github.io/liqo"
liqo_chart_name = "liqo"
liqo_release_name = "omni"
liqo_image_tag = var.liqo_chart_version
omni_namespace = "castai-omni"
omni_agent_release = "omni-agent"
omni_agent_chart = "omni-agent"
castai_helm_repository = "https://castai.github.io/helm-charts"
# Common Liqo configurations as YAML
common_liqo_yaml_values = <<-EOT
networking:
fabric:
config:
healthProbeBindAddressPort: '7071'
metricsAddressPort: '7072'
EOT
# Select the appropriate set_values based on k8s_provider
provider_specific_liqo_values = var.k8s_provider == "gke" ? module.liqo_helm_values_gke[0].set_values : module.liqo_helm_values_eks[0].set_values
}
# GKE-specific Liqo Helm chart configuration
module "liqo_helm_values_gke" {
count = var.k8s_provider == "gke" ? 1 : 0
source = "./modules/gke"
image_tag = local.liqo_image_tag
cluster_name = var.cluster_name
cluster_region = var.cluster_region
cluster_zone = var.cluster_zone
api_server_address = var.api_server_address
pod_cidr = var.pod_cidr
service_cidr = var.service_cidr
reserved_subnet_cidrs = var.reserved_subnet_cidrs
}
# EKS-specific Liqo Helm chart configuration
module "liqo_helm_values_eks" {
count = var.k8s_provider == "eks" ? 1 : 0
source = "./modules/eks"
image_tag = local.liqo_image_tag
cluster_name = var.cluster_name
cluster_region = var.cluster_region
api_server_address = var.api_server_address
pod_cidr = var.pod_cidr
service_cidr = var.service_cidr
}
# Liqo Helm Release
resource "helm_release" "liqo" {
name = local.liqo_release_name
repository = local.liqo_chart_repo
chart = local.liqo_chart_name
version = var.liqo_chart_version
namespace = local.omni_namespace
create_namespace = true
cleanup_on_fail = true
wait = true
values = [local.common_liqo_yaml_values]
set = local.provider_specific_liqo_values
}
# Wait for Liqo network resources to be ready before proceeding
resource "null_resource" "wait_for_liqo_network" {
provisioner "local-exec" {
command = <<-EOT
set -e
echo "Waiting for Liqo networks.ipam.liqo.io CRD to be established..."
kubectl wait --for condition=established --timeout=300s crd/networks.ipam.liqo.io
echo "Waiting for external CIDR network resource to be created..."
timeout=300
elapsed=0
interval=5
while [ $elapsed -lt $timeout ]; do
CIDR=$(kubectl get networks.ipam.liqo.io -n ${local.omni_namespace} \
-l ipam.liqo.io/network-type=external-cidr \
-o jsonpath='{.items[0].status.cidr}' 2>/dev/null || echo "")
if [ -n "$CIDR" ]; then
echo "External CIDR network resource is ready: $CIDR"
exit 0
fi
echo "Waiting for external CIDR to be populated... ($elapsed/$timeout seconds)"
sleep $interval
elapsed=$((elapsed + interval))
done
echo "Timeout waiting for external CIDR network resource"
exit 1
EOT
}
depends_on = [helm_release.liqo]
}
# Extract the external CIDR value from Liqo network resource
data "external" "liqo_external_cidr" {
program = ["bash", "-c", <<-EOT
CIDR=$(kubectl get networks.ipam.liqo.io -n ${local.omni_namespace} \
-l ipam.liqo.io/network-type=external-cidr \
-o jsonpath='{.items[0].status.cidr}' 2>/dev/null)
if [ -z "$CIDR" ]; then
echo '{"cidr":""}'
else
echo "{\"cidr\":\"$CIDR\"}"
fi
EOT
]
depends_on = [null_resource.wait_for_liqo_network]
}
# Enabling CAST AI Omni functionality for a given cluster
resource "castai_omni_cluster" "this" {
cluster_id = var.cluster_id
organization_id = var.organization_id
depends_on = [null_resource.wait_for_liqo_network]
}
# CAST AI Omni Agent Helm Release
resource "helm_release" "omni_agent" {
name = local.omni_agent_release
repository = local.castai_helm_repository
chart = local.omni_agent_chart
namespace = local.omni_namespace
create_namespace = true
cleanup_on_fail = true
wait = true
set = [
{
name = "network.externalCIDR"
value = data.external.liqo_external_cidr.result.cidr
},
{
name = "network.podCIDR"
value = var.pod_cidr
},
{
name = "castai.apiUrl"
value = var.api_url
},
{
name = "castai.organizationID"
value = var.organization_id
},
{
name = "castai.clusterID"
value = var.cluster_id
},
{
name = "castai.clusterName"
value = var.cluster_name
}
]
set_sensitive = [
{
name = "castai.apiKey"
value = var.api_token
}
]
depends_on = [castai_omni_cluster.this]
}