-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathproviders.tf
More file actions
121 lines (107 loc) · 4.77 KB
/
providers.tf
File metadata and controls
121 lines (107 loc) · 4.77 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
locals {
k8s_mode = try(var.k8s_cluster.mode, "disabled")
provider_usage = {
aws = anytrue([
var.platform == "aws",
local.k8s_mode == "aws",
try(var.postgres.mode, "") == "aws",
try(var.redis.mode, "") == "aws",
try(var.object_storage.mode, "") == "aws",
try(var.oauth.mode, "") == "aws",
try(var.secrets.mode, "") == "aws",
try(var.metrics_logs.mode, "") == "aws",
try(var.dns.mode, "byo") == "aws"
])
azure = anytrue([
var.platform == "azure",
local.k8s_mode == "azure",
try(var.postgres.mode, "") == "azure",
try(var.redis.mode, "") == "azure",
try(var.object_storage.mode, "") == "azure",
try(var.oauth.mode, "") == "azure",
try(var.secrets.mode, "") == "azure",
try(var.metrics_logs.mode, "") == "azure",
try(var.dns.mode, "byo") == "azure"
])
gcp = anytrue([
var.platform == "gcp",
local.k8s_mode == "gcp",
try(var.postgres.mode, "") == "gcp",
try(var.redis.mode, "") == "gcp",
try(var.object_storage.mode, "") == "gcp",
try(var.oauth.mode, "") == "gcp",
try(var.secrets.mode, "") == "gcp",
try(var.metrics_logs.mode, "") == "gcp",
try(var.dns.mode, "byo") == "gcp"
])
cloudflare = try(var.dns.mode, "byo") == "cf"
}
}
# AWS Provider - configured via environment variables or AWS credential chain
# Set AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_REGION in environment
# or use AWS CLI profiles, IAM roles, etc.
provider "aws" {
# When no AWS-backed dependency is active, skip validation to avoid requiring credentials.
skip_credentials_validation = !local.provider_usage.aws
skip_metadata_api_check = !local.provider_usage.aws
skip_region_validation = !local.provider_usage.aws
}
# Azure Provider - configured via environment variables or Azure CLI
provider "azurerm" {
features {}
# Only register providers when running an Azure-backed dependency.
skip_provider_registration = !local.provider_usage.azure
disable_terraform_partner_id = !local.provider_usage.azure
}
# GCP Provider - configured via environment variables or gcloud CLI
provider "google" {
# Project and region can be set via GOOGLE_PROJECT and GOOGLE_REGION env vars
# or via gcloud CLI configuration
}
# Cloudflare provider is configured automatically when dns.mode = "cf" via environment variables.
# Write kubeconfig for managed clusters (AWS/Azure/GCP)
resource "local_file" "kubeconfig" {
count = module.k8s_cluster.write_kubeconfig ? 1 : 0
content = module.k8s_cluster.kubeconfig
filename = module.k8s_cluster.kubeconfig_path
depends_on = [module.k8s_cluster]
}
# Kubernetes Provider - uses dedicated kubeconfig file (never ~/.kube/config)
# Uses host/exec auth directly instead of kubeconfig file to avoid chicken-egg problem
provider "kubernetes" {
# For GCP, use the kubeconfig file instead of exec (gke-gcloud-auth-plugin needs context)
# Only use config_path if the file actually exists (avoids chicken-egg problem on first apply)
config_path = module.k8s_cluster.write_kubeconfig && fileexists(module.k8s_cluster.kubeconfig_path) ? module.k8s_cluster.kubeconfig_path : null
# For AWS/Azure, use direct connection with exec
host = module.k8s_cluster.write_kubeconfig ? null : try(module.k8s_cluster.cluster_endpoint, null)
cluster_ca_certificate = module.k8s_cluster.write_kubeconfig ? null : try(base64decode(module.k8s_cluster.cluster_ca_certificate), null)
dynamic "exec" {
for_each = module.k8s_cluster.write_kubeconfig ? [] : module.k8s_cluster.provider_exec
content {
api_version = exec.value.api_version
command = exec.value.command
args = try(exec.value.args, [])
env = try(exec.value.env, {})
}
}
}
# Helm Provider - same approach
provider "helm" {
kubernetes {
# For GCP, use the kubeconfig file instead of exec (gke-gcloud-auth-plugin needs context)
# Only use config_path if the file actually exists (avoids chicken-egg problem on first apply)
config_path = module.k8s_cluster.write_kubeconfig && fileexists(module.k8s_cluster.kubeconfig_path) ? module.k8s_cluster.kubeconfig_path : null
# For AWS/Azure, use direct connection with exec
host = module.k8s_cluster.write_kubeconfig ? null : try(module.k8s_cluster.cluster_endpoint, null)
cluster_ca_certificate = module.k8s_cluster.write_kubeconfig ? null : try(base64decode(module.k8s_cluster.cluster_ca_certificate), null)
dynamic "exec" {
for_each = module.k8s_cluster.write_kubeconfig ? [] : module.k8s_cluster.provider_exec
content {
api_version = exec.value.api_version
command = exec.value.command
args = try(exec.value.args, [])
env = try(exec.value.env, {})
}
}
}
}