diff --git a/docs/resources/cluster_custom_cloud.md b/docs/resources/cluster_custom_cloud.md index d9bbcf10a..f53890a7b 100644 --- a/docs/resources/cluster_custom_cloud.md +++ b/docs/resources/cluster_custom_cloud.md @@ -147,7 +147,7 @@ Refer to the [Import section](/docs#import) to learn more. - `cloud` (String) The cloud provider name. - `cloud_account_id` (String) The cloud account id to use for this cluster. - `cloud_config` (Block List, Min: 1, Max: 1) The Cloud environment configuration settings such as network parameters and encryption parameters that apply to this cluster. (see [below for nested schema](#nestedblock--cloud_config)) -- `machine_pool` (Block List, Min: 1) The machine pool configuration for the cluster. (see [below for nested schema](#nestedblock--machine_pool)) +- `machine_pool` (Block Set, Min: 1) The machine pool configuration for the cluster. (see [below for nested schema](#nestedblock--machine_pool)) - `name` (String) The name of the cluster. ### Optional diff --git a/docs/resources/cluster_eks.md b/docs/resources/cluster_eks.md index 939d6ccf2..7197eb298 100644 --- a/docs/resources/cluster_eks.md +++ b/docs/resources/cluster_eks.md @@ -116,7 +116,7 @@ Refer to the [Import section](/docs#import) to learn more. - `cloud_account_id` (String) The AWS cloud account id to use for this cluster. - `cloud_config` (Block List, Min: 1, Max: 1) The AWS environment configuration settings such as network parameters and encryption parameters that apply to this cluster. (see [below for nested schema](#nestedblock--cloud_config)) -- `machine_pool` (Block List, Min: 1) The machine pool configuration for the cluster. (see [below for nested schema](#nestedblock--machine_pool)) +- `machine_pool` (Block Set, Min: 1) The machine pool configuration for the cluster. (see [below for nested schema](#nestedblock--machine_pool)) - `name` (String) The name of the cluster. ### Optional diff --git a/spectrocloud/cluster_common_hash.go b/spectrocloud/cluster_common_hash.go index c5ac47dd0..ff2b16d3c 100644 --- a/spectrocloud/cluster_common_hash.go +++ b/spectrocloud/cluster_common_hash.go @@ -22,10 +22,28 @@ func CommonHash(nodePool map[string]interface{}) *bytes.Buffer { buf.WriteString(HashStringMapList(nodePool["taints"])) } if val, ok := nodePool["control_plane"]; ok { - buf.WriteString(fmt.Sprintf("%t-", val.(bool))) + var boolVal bool + switch v := val.(type) { + case bool: + boolVal = v + case *bool: + if v != nil { + boolVal = *v + } + } + buf.WriteString(fmt.Sprintf("%t-", boolVal)) } if val, ok := nodePool["control_plane_as_worker"]; ok { - buf.WriteString(fmt.Sprintf("%t-", val.(bool))) + var boolVal bool + switch v := val.(type) { + case bool: + boolVal = v + case *bool: + if v != nil { + boolVal = *v + } + } + buf.WriteString(fmt.Sprintf("%t-", boolVal)) } if val, ok := nodePool["name"]; ok { buf.WriteString(fmt.Sprintf("%s-", val.(string))) @@ -66,7 +84,16 @@ func resourceMachinePoolAzureHash(v interface{}) int { buf.WriteString(fmt.Sprintf("%s-", val.(string))) } if val, ok := m["is_system_node_pool"]; ok { - buf.WriteString(fmt.Sprintf("%t-", val.(bool))) + var boolVal bool + switch v := val.(type) { + case bool: + boolVal = v + case *bool: + if v != nil { + boolVal = *v + } + } + buf.WriteString(fmt.Sprintf("%t-", boolVal)) } if val, ok := m["os_type"]; ok && val != "" { buf.WriteString(fmt.Sprintf("%s-", val.(string))) @@ -86,7 +113,16 @@ func resourceMachinePoolAksHash(v interface{}) int { buf.WriteString(fmt.Sprintf("%d-", val.(int))) } if val, ok := m["is_system_node_pool"]; ok { - buf.WriteString(fmt.Sprintf("%t-", val.(bool))) + var boolVal bool + switch v := val.(type) { + case bool: + boolVal = v + case *bool: + if v != nil { + boolVal = *v + } + } + buf.WriteString(fmt.Sprintf("%t-", boolVal)) } if val, ok := m["storage_account_type"]; ok { buf.WriteString(fmt.Sprintf("%s-", val.(string))) @@ -150,31 +186,73 @@ func resourceMachinePoolAwsHash(v interface{}) int { } func resourceMachinePoolEksHash(v interface{}) int { - m := v.(map[string]interface{}) - buf := CommonHash(m) + nodePool := v.(map[string]interface{}) + var buf bytes.Buffer - buf.WriteString(fmt.Sprintf("%d-", m["disk_size_gb"].(int))) - if m["min"] != nil { - buf.WriteString(fmt.Sprintf("%d-", m["min"].(int))) + if val, ok := nodePool["count"]; ok { + buf.WriteString(fmt.Sprintf("%d-", val.(int))) } - if m["max"] != nil { - buf.WriteString(fmt.Sprintf("%d-", m["max"].(int))) + if val, ok := nodePool["disk_size_gb"]; ok { + buf.WriteString(fmt.Sprintf("%d-", val.(int))) + } + if val, ok := nodePool["instance_type"]; ok { + buf.WriteString(fmt.Sprintf("%s-", val.(string))) + } + if val, ok := nodePool["name"]; ok { + buf.WriteString(fmt.Sprintf("%s-", val.(string))) + } + + if _, ok := nodePool["additional_labels"]; ok { + buf.WriteString(HashStringMap(nodePool["additional_labels"])) + } + if val, ok := nodePool["ami_type"]; ok { + buf.WriteString(fmt.Sprintf("%s-", val.(string))) } - buf.WriteString(fmt.Sprintf("%s-", m["instance_type"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["capacity_type"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["max_price"].(string))) - keys := make([]string, 0, len(m["az_subnets"].(map[string]interface{}))) - for k := range m["az_subnets"].(map[string]interface{}) { + keys := make([]string, 0, len(nodePool["az_subnets"].(map[string]interface{}))) + for k := range nodePool["az_subnets"].(map[string]interface{}) { keys = append(keys, k) } sort.Strings(keys) for _, k := range keys { - buf.WriteString(fmt.Sprintf("%s-%s", k, m["az_subnets"].(map[string]interface{})[k].(string))) + buf.WriteString(fmt.Sprintf("%s-%s", k, nodePool["az_subnets"].(map[string]interface{})[k].(string))) } - if m["eks_launch_template"] != nil { - buf.WriteString(eksLaunchTemplate(m["eks_launch_template"])) + if nodePool["azs"] != nil { + azsList := nodePool["azs"].([]interface{}) + azsListStr := make([]string, len(azsList)) + for i, v := range azsList { + azsListStr[i] = v.(string) + } + sort.Strings(azsListStr) + azsStr := strings.Join(azsListStr, "-") + buf.WriteString(fmt.Sprintf("%s-", azsStr)) + } + + if val, ok := nodePool["capacity_type"]; ok { + buf.WriteString(fmt.Sprintf("%s-", val.(string))) + } + + if nodePool["min"] != nil { + buf.WriteString(fmt.Sprintf("%d-", nodePool["min"].(int))) + } + if nodePool["max"] != nil { + buf.WriteString(fmt.Sprintf("%d-", nodePool["max"].(int))) + } + if nodePool["max_price"] != nil { + buf.WriteString(fmt.Sprintf("%s-", nodePool["max_price"].(string))) + } + if nodePool["node"] != nil { + buf.WriteString(HashStringMapList(nodePool["node"])) + } + if _, ok := nodePool["taints"]; ok { + buf.WriteString(HashStringMapList(nodePool["taints"])) + } + if nodePool["eks_launch_template"] != nil { + buf.WriteString(eksLaunchTemplate(nodePool["eks_launch_template"])) + } + if val, ok := nodePool["update_strategy"]; ok { + buf.WriteString(fmt.Sprintf("%s-", val.(string))) } return int(hash(buf.String())) @@ -246,23 +324,33 @@ func resourceMachinePoolVsphereHash(v interface{}) int { func resourceMachinePoolCustomCloudHash(v interface{}) int { m := v.(map[string]interface{}) var buf bytes.Buffer - if _, ok := m["name"]; ok { - buf.WriteString(HashStringMap(m["name"])) - } - if _, ok := m["count"]; ok { - buf.WriteString(HashStringMap(m["count"])) - } - if _, ok := m["additional_labels"]; ok { - buf.WriteString(HashStringMap(m["additional_labels"])) - } + if _, ok := m["taints"]; ok { buf.WriteString(HashStringMapList(m["taints"])) } if val, ok := m["control_plane"]; ok { - buf.WriteString(fmt.Sprintf("%t-", val.(bool))) + var boolVal bool + switch v := val.(type) { + case bool: + boolVal = v + case *bool: + if v != nil { + boolVal = *v + } + } + buf.WriteString(fmt.Sprintf("%t-", boolVal)) } if val, ok := m["control_plane_as_worker"]; ok { - buf.WriteString(fmt.Sprintf("%t-", val.(bool))) + var boolVal bool + switch v := val.(type) { + case bool: + boolVal = v + case *bool: + if v != nil { + boolVal = *v + } + } + buf.WriteString(fmt.Sprintf("%t-", boolVal)) } buf.WriteString(fmt.Sprintf("%s-", m["node_pool_config"].(string))) diff --git a/spectrocloud/cluster_common_hash_test.go b/spectrocloud/cluster_common_hash_test.go index a05935efb..60f9df503 100644 --- a/spectrocloud/cluster_common_hash_test.go +++ b/spectrocloud/cluster_common_hash_test.go @@ -216,7 +216,7 @@ func TestResourceMachinePoolEksHash(t *testing.T) { }, }, }, - expected: 456946481, + expected: 706444520, }, } diff --git a/spectrocloud/resource_cluster_custom_cloud.go b/spectrocloud/resource_cluster_custom_cloud.go index 35d0f64a9..3d7af0fbb 100644 --- a/spectrocloud/resource_cluster_custom_cloud.go +++ b/spectrocloud/resource_cluster_custom_cloud.go @@ -37,7 +37,14 @@ func resourceClusterCustomCloud() *schema.Resource { Delete: schema.DefaultTimeout(60 * time.Minute), }, - SchemaVersion: 2, + SchemaVersion: 3, + StateUpgraders: []schema.StateUpgrader{ + { + Type: resourceClusterCustomCloudResourceV2().CoreConfigSchema().ImpliedType(), + Upgrade: resourceClusterCustomCloudStateUpgradeV2, + Version: 2, + }, + }, Schema: map[string]*schema.Schema{ "name": { Type: schema.TypeString, @@ -133,8 +140,9 @@ func resourceClusterCustomCloud() *schema.Resource { }, "machine_pool": { - Type: schema.TypeList, + Type: schema.TypeSet, Required: true, + Set: resourceMachinePoolCustomCloudHash, Description: "The machine pool configuration for the cluster.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -365,70 +373,74 @@ func resourceClusterCustomCloudUpdate(ctx context.Context, d *schema.ResourceDat log.Printf("[DEBUG] === MACHINE POOL CHANGE DETECTED ===") oraw, nraw := d.GetChange("machine_pool") if oraw == nil { - oraw = make([]interface{}, 0) + oraw = new(schema.Set) } if nraw == nil { - nraw = make([]interface{}, 0) + nraw = new(schema.Set) } - os := oraw.([]interface{}) - ns := nraw.([]interface{}) + os := oraw.(*schema.Set) + ns := nraw.(*schema.Set) - log.Printf("[DEBUG] Old machine pools count: %d, New machine pools count: %d", len(os), len(ns)) + log.Printf("[DEBUG] Old machine pools count: %d, New machine pools count: %d", os.Len(), ns.Len()) + // Create maps by machine pool name for proper comparison osMap := make(map[string]interface{}) - for _, mp := range os { - machinePool := mp.(map[string]interface{}) - osMap[machinePool["name"].(string)] = machinePool + for _, mp := range os.List() { + machinePoolResource := mp.(map[string]interface{}) + name := extractMachinePoolNameFromYAML(machinePoolResource) + if name != "" { + osMap[name] = machinePoolResource + } } nsMap := make(map[string]interface{}) - for i, mp := range ns { + for _, mp := range ns.List() { machinePoolResource := mp.(map[string]interface{}) - log.Printf("[DEBUG] Processing machine pool %d: %+v", i, machinePoolResource) - - // Extract name from YAML first name := extractMachinePoolNameFromYAML(machinePoolResource) - log.Printf("[DEBUG] Extracted machine pool name: '%s'", name) - - nsMap[name] = machinePoolResource if name != "" { - newHash := resourceMachinePoolCustomCloudHash(machinePoolResource) - var err error - machinePool := toMachinePoolCustomCloud(mp) - if oldMachinePool, ok := osMap[name]; !ok { + nsMap[name] = machinePoolResource + + // Check if this is a new, updated, or unchanged machine pool + if oldMachinePool, exists := osMap[name]; !exists { + // NEW machine pool - CREATE log.Printf("[DEBUG] Creating new machine pool %s", name) - if err = c.CreateMachinePoolCustomCloud(machinePool, cloudConfigId, cloudType); err != nil { + machinePool := toMachinePoolCustomCloud(mp) + if err := c.CreateMachinePoolCustomCloud(machinePool, cloudConfigId, cloudType); err != nil { return diag.FromErr(err) } } else { + // EXISTING machine pool - check if hash changed oldHash := resourceMachinePoolCustomCloudHash(oldMachinePool) - log.Printf("[DEBUG] Machine pool %s - Old hash: %d, New hash: %d", name, oldHash, newHash) - if newHash != oldHash { - log.Printf("[DEBUG] Change detected in machine pool %s - updating", name) - if err = c.UpdateMachinePoolCustomCloud(machinePool, name, cloudConfigId, cloudType); err != nil { + newHash := resourceMachinePoolCustomCloudHash(machinePoolResource) + + if oldHash != newHash { + // MODIFIED machine pool - UPDATE + log.Printf("[DEBUG] Updating machine pool %s (hash changed: %d -> %d)", name, oldHash, newHash) + machinePool := toMachinePoolCustomCloud(mp) + if err := c.UpdateMachinePoolCustomCloud(machinePool, name, cloudConfigId, cloudType); err != nil { return diag.FromErr(err) } } else { - log.Printf("[DEBUG] No changes detected in machine pool %s - skipping update", name) + // UNCHANGED machine pool - no action needed + log.Printf("[DEBUG] Machine pool %s unchanged (hash: %d)", name, oldHash) } } - // Processed (if exists) + + // Mark as processed delete(osMap, name) } else { - log.Printf("[DEBUG] WARNING: Machine pool %d has empty name!", i) + log.Printf("[DEBUG] WARNING: Machine pool has empty name!") } } - // Deleted old machine pools - for _, mp := range osMap { - machinePool := mp.(map[string]interface{}) - name := machinePool["name"].(string) - log.Printf("Deleted machine pool %s", name) - if err = c.DeleteMachinePoolCustomCloud(name, cloudConfigId, cloudType); err != nil { + + // REMOVED machine pools - DELETE + for name := range osMap { + log.Printf("[DEBUG] Deleting removed machine pool %s", name) + if err := c.DeleteMachinePoolCustomCloud(name, cloudConfigId, cloudType); err != nil { return diag.FromErr(err) } } - } diagnostics, done := updateCommonFields(d, c) @@ -457,7 +469,8 @@ func toCustomCloudCluster(c *client.V1Client, d *schema.ResourceData) (*models.V customClusterConfig := toCustomClusterConfig(d) machinePoolConfigs := make([]*models.V1CustomMachinePoolConfigEntity, 0) - for _, machinePool := range d.Get("machine_pool").([]interface{}) { + machinePoolSet := d.Get("machine_pool").(*schema.Set) + for _, machinePool := range machinePoolSet.List() { mp := toMachinePoolCustomCloud(machinePool) machinePoolConfigs = append(machinePoolConfigs, mp) } @@ -1471,9 +1484,19 @@ func flattenMachinePoolConfigsCustomCloudWithOverrides(machinePools []*models.V1 } // Get current machine pool configuration from state - currentMachinePools := d.Get("machine_pool").([]interface{}) + currentMachinePoolsRaw := d.Get("machine_pool") currentMPMap := make(map[string]map[string]interface{}) + // Handle both TypeSet (new) and TypeList (during migration) formats + var currentMachinePools []interface{} + if machinePoolSet, ok := currentMachinePoolsRaw.(*schema.Set); ok { + // TypeSet format + currentMachinePools = machinePoolSet.List() + } else if machinePoolList, ok := currentMachinePoolsRaw.([]interface{}); ok { + // TypeList format (legacy/migration) + currentMachinePools = machinePoolList + } + for _, mp := range currentMachinePools { if mpMap, ok := mp.(map[string]interface{}); ok { if name, exists := mpMap["name"]; exists { @@ -1603,7 +1626,7 @@ func flattenCloudConfigCustom(configUID string, d *schema.ResourceData, c *clien return diag.FromErr(err), true } log.Printf("[ERROR] About to call flattenMachinePoolConfigsCustomCloudWithOverrides") - if err := d.Set("machine_pool", flattenMachinePoolConfigsCustomCloudWithOverrides(config.Spec.MachinePoolConfig, d)); err != nil { + if err := d.Set("machine_pool", schema.NewSet(resourceMachinePoolCustomCloudHash, flattenMachinePoolConfigsCustomCloudWithOverrides(config.Spec.MachinePoolConfig, d))); err != nil { log.Printf("[ERROR] Failed to set machine_pool: %v", err) return diag.FromErr(err), true } @@ -1732,3 +1755,218 @@ func min(a, b int) int { } return b } + +// resourceClusterCustomCloudResourceV2 returns the schema for version 2 of the resource +func resourceClusterCustomCloudResourceV2() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The name of the cluster.", + }, + "context": { + Type: schema.TypeString, + Optional: true, + Default: "project", + ValidateFunc: validation.StringInSlice([]string{"", "project", "tenant"}, false), + Description: "The context of the EKS cluster. Allowed values are `project` or `tenant`. " + + "Default is `project`. " + PROJECT_NAME_NUANCE, + }, + "cloud": { + Type: schema.TypeString, + ForceNew: true, + Required: true, + Description: "The cloud provider name.", + }, + "tags": { + Type: schema.TypeSet, + Optional: true, + Set: schema.HashString, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "A list of tags to be applied to the cluster. Tags must be in the form of `key:value`.", + }, + "description": { + Type: schema.TypeString, + Optional: true, + Default: "", + Description: "The description of the cluster. Default value is empty string.", + }, + "cluster_profile": schemas.ClusterProfileSchema(), + "apply_setting": { + Type: schema.TypeString, + Optional: true, + Default: "DownloadAndInstall", + ValidateFunc: validation.StringInSlice([]string{"DownloadAndInstall", "DownloadAndInstallLater"}, false), + Description: "The setting to apply the cluster profile. `DownloadAndInstall` will download and install packs in one action. " + + "`DownloadAndInstallLater` will only download artifact and postpone install for later. " + + "Default value is `DownloadAndInstall`.", + }, + "cloud_account_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The cloud account id to use for this cluster.", + }, + "cloud_config_id": { + Type: schema.TypeString, + Computed: true, + Deprecated: "This field is deprecated and will be removed in the future. Use `cloud_config` instead.", + }, + "cloud_config": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Description: "The Cloud environment configuration settings such as network parameters and encryption parameters that apply to this cluster.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "values": { + Type: schema.TypeString, + Required: true, + Description: "The values of the cloud config. The values are specified in YAML format. ", + }, + "overrides": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "Key-value pairs to override specific values in the YAML.", + }, + }, + }, + }, + // Version 2 used TypeList for machine_pool + "machine_pool": { + Type: schema.TypeList, + Required: true, + Description: "The machine pool configuration for the cluster.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + Description: "The name of the machine pool. This will be derived from the name value in the `node_pool_config`.", + }, + "count": { + Type: schema.TypeInt, + Computed: true, + Description: "Number of nodes in the machine pool. This will be derived from the replica value in the 'node_pool_config'.", + }, + "taints": schemas.ClusterTaintsSchema(), + "control_plane": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Whether this machine pool is a control plane. Defaults to `false`.", + }, + "control_plane_as_worker": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Whether this machine pool is a control plane and a worker. Defaults to `false`.", + }, + "node_pool_config": { + Type: schema.TypeString, + Required: true, + Description: "The values of the node pool config. The values are specified in YAML format. ", + }, + "overrides": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "Key-value pairs to override specific values in the node pool config YAML.", + }, + }, + }, + }, + "pause_agent_upgrades": { + Type: schema.TypeString, + Optional: true, + Default: "unlock", + ValidateFunc: validation.StringInSlice([]string{"lock", "unlock"}, false), + Description: "The pause agent upgrades setting allows to control the automatic upgrade of the Palette component and agent for an individual cluster. The default value is `unlock`, meaning upgrades occur automatically. Setting it to `lock` pauses automatic agent upgrades for the cluster.", + }, + "os_patch_on_boot": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Whether to apply OS patch on boot. Default is `false`.", + }, + "os_patch_schedule": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: validateOsPatchSchedule, + Description: "The cron schedule for OS patching. This must be in the form of cron syntax. Ex: `0 0 * * *`.", + }, + "os_patch_after": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: validateOsPatchOnDemandAfter, + Description: "Date and time after which to patch cluster `RFC3339: 2006-01-02T15:04:05Z07:00`", + }, + "kubeconfig": { + Type: schema.TypeString, + Computed: true, + Description: "Kubeconfig for the cluster. This can be used to connect to the cluster using `kubectl`.", + }, + "admin_kube_config": { + Type: schema.TypeString, + Computed: true, + Description: "Admin Kube-config for the cluster. This can be used to connect to the cluster using `kubectl`, With admin privilege.", + }, + "backup_policy": schemas.BackupPolicySchema(), + "scan_policy": schemas.ScanPolicySchema(), + "cluster_rbac_binding": schemas.ClusterRbacBindingSchema(), + "namespaces": schemas.ClusterNamespacesSchema(), + "location_config": schemas.ClusterLocationSchema(), + "skip_completion": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "If `true`, the cluster will be created asynchronously. Default value is `false`.", + }, + "force_delete": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "If set to `true`, the cluster will be force deleted and user has to manually clean up the provisioned cloud resources.", + }, + "force_delete_delay": { + Type: schema.TypeInt, + Optional: true, + Default: 20, + Description: "Delay duration in minutes to before invoking cluster force delete. Default and minimum is 20.", + ValidateDiagFunc: validation.ToDiagFunc(validation.IntAtLeast(20)), + }, + }, + } +} + +// resourceClusterCustomCloudStateUpgradeV2 migrates state from version 2 to version 3 +func resourceClusterCustomCloudStateUpgradeV2(ctx context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { + log.Printf("[DEBUG] Upgrading cluster custom cloud state from version 2 to 3") + + // Convert machine_pool from TypeList to TypeSet + // Note: We keep the data as a list in rawState and let Terraform's schema processing + // convert it to TypeSet during normal resource loading. This avoids JSON serialization + // issues with schema.Set objects that contain hash functions. + if machinePoolRaw, exists := rawState["machine_pool"]; exists { + if machinePoolList, ok := machinePoolRaw.([]interface{}); ok { + log.Printf("[DEBUG] Keeping machine_pool as list during state upgrade with %d items", len(machinePoolList)) + + // Keep the machine pool data as-is (as a list) + // Terraform will convert it to TypeSet when loading the resource using the schema + rawState["machine_pool"] = machinePoolList + + log.Printf("[DEBUG] Successfully prepared machine_pool for TypeSet conversion") + } else { + log.Printf("[DEBUG] machine_pool is not a list, skipping conversion") + } + } else { + log.Printf("[DEBUG] No machine_pool found in state, skipping conversion") + } + + return rawState, nil +} diff --git a/spectrocloud/resource_cluster_eks.go b/spectrocloud/resource_cluster_eks.go index 99a68938b..fa3986b8f 100644 --- a/spectrocloud/resource_cluster_eks.go +++ b/spectrocloud/resource_cluster_eks.go @@ -34,7 +34,14 @@ func resourceClusterEks() *schema.Resource { Delete: schema.DefaultTimeout(60 * time.Minute), }, - SchemaVersion: 2, + SchemaVersion: 3, + StateUpgraders: []schema.StateUpgrader{ + { + Type: resourceClusterEksResourceV2().CoreConfigSchema().ImpliedType(), + Upgrade: resourceClusterEksStateUpgradeV2, + Version: 2, + }, + }, Schema: map[string]*schema.Schema{ "name": { Type: schema.TypeString, @@ -224,8 +231,9 @@ func resourceClusterEks() *schema.Resource { }, }, "machine_pool": { - Type: schema.TypeList, + Type: schema.TypeSet, Required: true, + Set: resourceMachinePoolEksHash, Description: "The machine pool configuration for the cluster.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -288,6 +296,7 @@ func resourceClusterEks() *schema.Resource { "max_price": { Type: schema.TypeString, Optional: true, + Default: "", }, "azs": { Type: schema.TypeList, @@ -571,7 +580,11 @@ func flattenMachinePoolConfigsEks(machinePools []*models.V1EksMachinePoolConfig) oi["capacity_type"] = machinePool.CapacityType } if machinePool.SpotMarketOptions != nil { - oi["max_price"] = machinePool.SpotMarketOptions.MaxPrice + if machinePool.SpotMarketOptions.MaxPrice != "" { + oi["max_price"] = machinePool.SpotMarketOptions.MaxPrice + } else { + oi["max_price"] = "" + } } oi["disk_size_gb"] = int(machinePool.RootDeviceSize) if len(machinePool.SubnetIds) > 0 { @@ -700,6 +713,7 @@ func resourceClusterEksUpdate(ctx context.Context, d *schema.ResourceData, m int _ = d.Get("machine_pool") if d.HasChange("machine_pool") { + log.Printf("[DEBUG] === MACHINE POOL CHANGE DETECTED ===") oraw, nraw := d.GetChange("machine_pool") if oraw == nil { oraw = new(schema.Set) @@ -708,55 +722,69 @@ func resourceClusterEksUpdate(ctx context.Context, d *schema.ResourceData, m int nraw = new(schema.Set) } - os := oraw.([]interface{}) - ns := nraw.([]interface{}) + os := oraw.(*schema.Set) + ns := nraw.(*schema.Set) + + log.Printf("[DEBUG] Old machine pools count: %d, New machine pools count: %d", os.Len(), ns.Len()) + // Create maps by machine pool name for proper comparison osMap := make(map[string]interface{}) - for _, mp := range os { - machinePool := mp.(map[string]interface{}) - osMap[machinePool["name"].(string)] = machinePool + for _, mp := range os.List() { + machinePoolResource := mp.(map[string]interface{}) + name := machinePoolResource["name"].(string) + if name != "" { + osMap[name] = machinePoolResource + } } nsMap := make(map[string]interface{}) - for _, mp := range ns { + for _, mp := range ns.List() { machinePoolResource := mp.(map[string]interface{}) - nsMap[machinePoolResource["name"].(string)] = machinePoolResource - // since known issue in TF SDK: https://github.com/hashicorp/terraform-plugin-sdk/issues/588 - if machinePoolResource["name"].(string) != "" { - name := machinePoolResource["name"].(string) - hash := resourceMachinePoolEksHash(machinePoolResource) - - machinePool := toMachinePoolEks(machinePoolResource) - - var err error - if oldMachinePool, ok := osMap[name]; !ok { - log.Printf("Create machine pool %s", name) - err = c.CreateMachinePoolEks(cloudConfigId, machinePool) - } else if hash != resourceMachinePoolEksHash(oldMachinePool) { - // TODO - log.Printf("Change in machine pool %s", name) - err = c.UpdateMachinePoolEks(cloudConfigId, machinePool) - // Node Maintenance Actions - err := resourceNodeAction(c, ctx, nsMap[name], c.GetNodeMaintenanceStatusEks, CloudConfig.Kind, cloudConfigId, name) - if err != nil { + name := machinePoolResource["name"].(string) + if name != "" { + nsMap[name] = machinePoolResource + + // Check if this is a new, updated, or unchanged machine pool + if oldMachinePool, exists := osMap[name]; !exists { + // NEW machine pool - CREATE + log.Printf("[DEBUG] Creating new machine pool %s", name) + machinePool := toMachinePoolEks(machinePoolResource) + if err := c.CreateMachinePoolEks(cloudConfigId, machinePool); err != nil { return diag.FromErr(err) } + } else { + // EXISTING machine pool - check if hash changed + oldHash := resourceMachinePoolEksHash(oldMachinePool) + newHash := resourceMachinePoolEksHash(machinePoolResource) + + if oldHash != newHash { + // MODIFIED machine pool - UPDATE + log.Printf("[DEBUG] Updating machine pool %s (hash changed: %d -> %d)", name, oldHash, newHash) + machinePool := toMachinePoolEks(machinePoolResource) + if err := c.UpdateMachinePoolEks(cloudConfigId, machinePool); err != nil { + return diag.FromErr(err) + } + // Node Maintenance Actions + err := resourceNodeAction(c, ctx, machinePoolResource, c.GetNodeMaintenanceStatusEks, CloudConfig.Kind, cloudConfigId, name) + if err != nil { + return diag.FromErr(err) + } + } else { + // UNCHANGED machine pool - no action needed + log.Printf("[DEBUG] Machine pool %s unchanged (hash: %d)", name, oldHash) + } } - if err != nil { - return diag.FromErr(err) - } - - // Processed (if exists) + // Mark as processed delete(osMap, name) + } else { + log.Printf("[DEBUG] WARNING: Machine pool has empty name!") } } - // Deleted old machine pools - for _, mp := range osMap { - machinePool := mp.(map[string]interface{}) - name := machinePool["name"].(string) - log.Printf("Deleted machine pool %s", name) + // REMOVED machine pools - DELETE + for name := range osMap { + log.Printf("[DEBUG] Deleting removed machine pool %s", name) if err := c.DeleteMachinePoolEks(cloudConfigId, name); err != nil { return diag.FromErr(err) } @@ -860,7 +888,7 @@ func toEksCluster(c *client.V1Client, d *schema.ResourceData) (*models.V1Spectro machinePoolConfigs = append(machinePoolConfigs, toMachinePoolEks(cpPool)) } - for _, machinePool := range d.Get("machine_pool").([]interface{}) { + for _, machinePool := range d.Get("machine_pool").(*schema.Set).List() { mp := toMachinePoolEks(machinePool) machinePoolConfigs = append(machinePoolConfigs, mp) } @@ -1120,3 +1148,380 @@ func toCloudConfigEks(cloudConfig map[string]interface{}) *models.V1EksCloudClus return clusterConfigEntity } + +func resourceClusterEksResourceV2() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The name of the cluster.", + }, + "context": { + Type: schema.TypeString, + Optional: true, + Default: "project", + ValidateFunc: validation.StringInSlice([]string{"", "project", "tenant"}, false), + Description: "The context of the EKS cluster. Allowed values are `project` or `tenant`. " + + "Default is `project`. " + PROJECT_NAME_NUANCE, + }, + "tags": { + Type: schema.TypeSet, + Optional: true, + Set: schema.HashString, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "A list of tags to be applied to the cluster. Tags must be in the form of `key:value`. The `tags` attribute will soon be deprecated. It is recommended to use `tags_map` instead.", + }, + "tags_map": { + Type: schema.TypeMap, + Optional: true, + ConflictsWith: []string{"tags"}, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "A map of tags to be applied to the cluster. tags and tags_map are mutually exclusive — only one should be used at a time", + }, + "description": { + Type: schema.TypeString, + Optional: true, + Default: "", + Description: "The description of the cluster. Default value is empty string.", + }, + "cluster_meta_attribute": { + Type: schema.TypeString, + Optional: true, + Description: "`cluster_meta_attribute` can be used to set additional cluster metadata information, eg `{'nic_name': 'test', 'env': 'stage'}`", + }, + "cluster_profile": schemas.ClusterProfileSchema(), + "apply_setting": { + Type: schema.TypeString, + Optional: true, + Default: "DownloadAndInstall", + ValidateFunc: validation.StringInSlice([]string{"DownloadAndInstall", "DownloadAndInstallLater"}, false), + Description: "The setting to apply the cluster profile. `DownloadAndInstall` will download and install packs in one action. " + + "`DownloadAndInstallLater` will only download artifact and postpone install for later. " + + "Default value is `DownloadAndInstall`.", + }, + "cloud_account_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The AWS cloud account id to use for this cluster.", + }, + "cloud_config_id": { + Type: schema.TypeString, + Computed: true, + Description: "ID of the cloud config used for the cluster. This cloud config must be of type `azure`.", + Deprecated: "This field is deprecated and will be removed in the future. Use `cloud_config` instead.", + }, + "review_repave_state": { + Type: schema.TypeString, + Default: "", + Optional: true, + ValidateFunc: validateReviewRepaveValue, + Description: "To authorize the cluster repave, set the value to `Approved` for approval and `\"\"` to decline. Default value is `\"\"`.", + }, + "pause_agent_upgrades": { + Type: schema.TypeString, + Optional: true, + Default: "unlock", + ValidateFunc: validation.StringInSlice([]string{"lock", "unlock"}, false), + Description: "The pause agent upgrades setting allows to control the automatic upgrade of the Palette component and agent for an individual cluster. The default value is `unlock`, meaning upgrades occur automatically. Setting it to `lock` pauses automatic agent upgrades for the cluster.", + }, + "os_patch_on_boot": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Whether to apply OS patch on boot. Default is `false`.", + }, + "os_patch_schedule": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: validateOsPatchSchedule, + Description: "Cron schedule for OS patching. This must be in the form of `0 0 * * *`.", + }, + "os_patch_after": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: validateOsPatchOnDemandAfter, + Description: "Date and time after which to patch cluster `RFC3339: 2006-01-02T15:04:05Z07:00`", + }, + "kubeconfig": { + Type: schema.TypeString, + Computed: true, + Description: "Kubeconfig for the cluster. This can be used to connect to the cluster using `kubectl`.", + }, + "admin_kube_config": { + Type: schema.TypeString, + Computed: true, + Description: "Admin Kube-config for the cluster. This can be used to connect to the cluster using `kubectl`, With admin privilege.", + }, + "cloud_config": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Description: "The AWS environment configuration settings such as network parameters and encryption parameters that apply to this cluster.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ssh_key_name": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Description: "Public SSH key to be used for the cluster nodes.", + }, + "region": { + Type: schema.TypeString, + ForceNew: true, + Required: true, + }, + "vpc_id": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + "azs": { + Type: schema.TypeList, + Description: "Mutually exclusive with `az_subnets`. Use for Dynamic provisioning.", + Optional: true, + ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "az_subnets": { + Type: schema.TypeMap, + Description: "Mutually exclusive with `azs`. Use for Static provisioning.", + Optional: true, + ForceNew: true, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + // UI strips the trailing newline on save + return strings.TrimSpace(old) == strings.TrimSpace(new) + }, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "endpoint_access": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{"public", "private", "private_and_public"}, false), + Description: "Choose between `private`, `public`, or `private_and_public` to define how communication is established with the endpoint for the managed Kubernetes API server and your cluster. The default value is `public`.", + Default: "public", + }, + "public_access_cidrs": { + Type: schema.TypeSet, + Optional: true, + Set: schema.HashString, + Description: "List of CIDR blocks that define the allowed public access to the resource. Requests originating from addresses within these CIDR blocks will be permitted to access the resource. All other addresses will be denied access.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "private_access_cidrs": { + Type: schema.TypeSet, + Optional: true, + Set: schema.HashString, + Description: "List of CIDR blocks that define the allowed private access to the resource. Only requests originating from addresses within these CIDR blocks will be permitted to access the resource.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "encryption_config_arn": { + Type: schema.TypeString, + Description: "The ARN of the KMS encryption key to use for the cluster. Refer to the [Enable Secrets Encryption for EKS Cluster](https://docs.spectrocloud.com/clusters/public-cloud/aws/enable-secrets-encryption-kms-key/) for additional guidance.", + ForceNew: true, + Optional: true, + }, + }, + }, + }, + "machine_pool": { + Type: schema.TypeList, + Required: true, + Description: "The machine pool configuration for the cluster.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + //ForceNew: true, + }, + "additional_labels": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "node": schemas.NodeSchema(), + "taints": schemas.ClusterTaintsSchema(), + "disk_size_gb": { + Type: schema.TypeInt, + Required: true, + }, + "count": { + Type: schema.TypeInt, + Required: true, + Description: "Number of nodes in the machine pool.", + }, + "update_strategy": { + Type: schema.TypeString, + Optional: true, + Default: "RollingUpdateScaleOut", + Description: "Update strategy for the machine pool. Valid values are `RollingUpdateScaleOut` and `RollingUpdateScaleIn`.", + }, + "min": { + Type: schema.TypeInt, + Optional: true, + Description: "Minimum number of nodes in the machine pool. This is used for autoscaling the machine pool.", + }, + "max": { + Type: schema.TypeInt, + Optional: true, + Description: "Maximum number of nodes in the machine pool. This is used for autoscaling the machine pool.", + }, + "instance_type": { + Type: schema.TypeString, + Required: true, + }, + "ami_type": { + Type: schema.TypeString, + Optional: true, + Default: "AL2_x86_64", + Description: "Specifies the type of Amazon Machine Image (AMI) to use for the machine pool. Valid values are [`AL2_x86_64`, `AL2_x86_64_GPU`, `AL2023_x86_64_STANDARD`, `AL2023_x86_64_NEURON` and `AL2023_x86_64_NVIDIA`]. Defaults to `AL2_x86_64`.", + }, + "capacity_type": { + Type: schema.TypeString, + Default: "on-demand", + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"on-demand", "spot"}, false), + Description: "Capacity type is an instance type, can be 'on-demand' or 'spot'. Defaults to 'on-demand'.", + }, + "max_price": { + Type: schema.TypeString, + Optional: true, + }, + "azs": { + Type: schema.TypeList, + Optional: true, + Description: "Mutually exclusive with `az_subnets`.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "az_subnets": { + Type: schema.TypeMap, + Optional: true, + Description: "Mutually exclusive with `azs`. Use for Static provisioning.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "eks_launch_template": schemas.AwsLaunchTemplate(), + }, + }, + }, + "fargate_profile": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "subnets": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "additional_tags": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "selector": { + Type: schema.TypeList, + Required: true, + //MinItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "namespace": { + Type: schema.TypeString, + Required: true, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + }, + }, + "backup_policy": schemas.BackupPolicySchema(), + "scan_policy": schemas.ScanPolicySchema(), + "cluster_rbac_binding": schemas.ClusterRbacBindingSchema(), + "namespaces": schemas.ClusterNamespacesSchema(), + "host_config": schemas.ClusterHostConfigSchema(), + "location_config": schemas.ClusterLocationSchemaComputed(), + "skip_completion": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "If `true`, the cluster will be created asynchronously. Default value is `false`.", + }, + "force_delete": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "If set to `true`, the cluster will be force deleted and user has to manually clean up the provisioned cloud resources.", + }, + "force_delete_delay": { + Type: schema.TypeInt, + Optional: true, + Default: 20, + Description: "Delay duration in minutes to before invoking cluster force delete. Default and minimum is 20.", + ValidateDiagFunc: validation.ToDiagFunc(validation.IntAtLeast(20)), + }, + }, + } +} + +func resourceClusterEksStateUpgradeV2(ctx context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { + log.Printf("[DEBUG] Upgrading cluster custom cloud state from version 2 to 3") + + // Convert machine_pool from TypeList to TypeSet + // Note: We keep the data as a list in rawState and let Terraform's schema processing + // convert it to TypeSet during normal resource loading. This avoids JSON serialization + // issues with schema.Set objects that contain hash functions. + if machinePoolRaw, exists := rawState["machine_pool"]; exists { + if machinePoolList, ok := machinePoolRaw.([]interface{}); ok { + log.Printf("[DEBUG] Keeping machine_pool as list during state upgrade with %d items", len(machinePoolList)) + + // Keep the machine pool data as-is (as a list) + // Terraform will convert it to TypeSet when loading the resource using the schema + rawState["machine_pool"] = machinePoolList + + log.Printf("[DEBUG] Successfully prepared machine_pool for TypeSet conversion") + } else { + log.Printf("[DEBUG] machine_pool is not a list, skipping conversion") + } + } else { + log.Printf("[DEBUG] No machine_pool found in state, skipping conversion") + } + + return rawState, nil +}