Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 32 additions & 0 deletions docs/resources/cluster_custom_cloud.md
Original file line number Diff line number Diff line change
Expand Up @@ -185,6 +185,18 @@ Required:

- `values` (String) The values of the cloud config. The values are specified in YAML format.

Optional:

- `overrides` (Map of String) Key-value pairs to override specific values in the YAML. Supports template variables, wildcard patterns, field pattern search, document-specific and global overrides.

Template variables: Simple identifiers that replace ${var}, {{var}}, or $var patterns in YAML (e.g., 'cluster_name' replaces ${cluster_name})
Wildcard patterns: Patterns starting with '*' that match field names containing the specified substring (e.g., '*cluster-api-autoscaler-node-group-max-size' matches any field containing 'cluster-api-autoscaler-node-group-max-size')
Field pattern search: Patterns that find and update ALL matching nested fields anywhere in YAML (e.g., 'replicas' updates any 'replicas' field, 'rootVolume.size' updates any 'rootVolume.size' pattern)
Document-specific syntax: 'Kind.path' (e.g., 'Cluster.metadata.labels', 'AWSCluster.spec.region')
Global path syntax: 'path' (e.g., 'metadata.name', 'spec.region')

Processing order: 1) Template substitution, 2) Wildcard patterns, 3) Field pattern search, 4) Path-based overrides. Supports dot notation for nested paths and array indexing with [index]. Values are strings but support JSON syntax for arrays/objects.


<a id="nestedblock--machine_pool"></a>
### Nested Schema for `machine_pool`
Expand All @@ -197,12 +209,32 @@ Optional:

- `control_plane` (Boolean) Whether this machine pool is a control plane. Defaults to `false`.
- `control_plane_as_worker` (Boolean) Whether this machine pool is a control plane and a worker. Defaults to `false`.
- `overrides` (Map of String) Key-value pairs to override specific values in the node pool config YAML. Supports template variables, wildcard patterns, field pattern search, document-specific and global overrides.

Template variables: Simple identifiers that replace ${var}, {{var}}, or $var patterns in YAML (e.g., 'node_count' replaces ${node_count})
Wildcard patterns: Patterns starting with '*' that match field names containing the specified substring (e.g., '*cluster-api-autoscaler-node-group-max-size' matches any field containing 'cluster-api-autoscaler-node-group-max-size')
Field pattern search: Patterns that find and update ALL matching nested fields anywhere in YAML (e.g., 'replicas' updates any 'replicas' field, 'rootVolume.size' updates any 'rootVolume.size' pattern)
Document-specific syntax: 'Kind.path' (e.g., 'AWSMachineTemplate.spec.template.spec.instanceType')
Global path syntax: 'path' (e.g., 'metadata.name', 'spec.instanceType')

Processing order: 1) Template substitution, 2) Wildcard patterns, 3) Field pattern search, 4) Path-based overrides. Supports dot notation for nested paths and array indexing with [index]. Values are strings but support JSON syntax for arrays/objects.
- `taints` (Block List) (see [below for nested schema](#nestedblock--machine_pool--taints))

Read-Only:

- `count` (Number) Number of nodes in the machine pool. This will be derived from the replica value in the 'node_pool_config'.
- `name` (String) The name of the machine pool. This will be derived from the name value in the `node_pool_config`.

<a id="nestedblock--machine_pool--taints"></a>
### Nested Schema for `machine_pool.taints`

Required:

- `effect` (String) The effect of the taint. Allowed values are: `NoSchedule`, `PreferNoSchedule` or `NoExecute`.
- `key` (String) The key of the taint.
- `value` (String) The value of the taint.



<a id="nestedblock--backup_policy"></a>
### Nested Schema for `backup_policy`
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@

import {
to = spectrocloud_cluster_custom_cloud.capi_cluster_taint
id = "687d12fec26d99b846c290a7:tenant:awstko271"
}
14 changes: 14 additions & 0 deletions examples/custom-cloud-import-demo-taint/providers.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
terraform {
required_providers {
spectrocloud = {
version = ">= 0.18.0"
source = "spectrocloud/spectrocloud"
}
}
}

provider "spectrocloud" {
host = var.sc_host
api_key = var.sc_api_key
project_name = var.sc_project_name
}
13 changes: 13 additions & 0 deletions examples/custom-cloud-import-demo-taint/variables.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
variable "sc_host" {
description = "Spectro Cloud Endpoint"
default = "api.spectrocloud.com"
}

variable "sc_api_key" {
description = "Spectro Cloud API key"
}

variable "sc_project_name" {
description = "Spectro Cloud Project (e.g: Default)"
default = "Default"
}
5 changes: 5 additions & 0 deletions examples/custom-cloud-import-demo/import_capi_cluster.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@

import {
to = spectrocloud_cluster_custom_cloud.capi_cluster
id = "68785a9ef75cadff7aaa6d7b:tenant:awstko271"
}
14 changes: 14 additions & 0 deletions examples/custom-cloud-import-demo/providers.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
terraform {
required_providers {
spectrocloud = {
version = ">= 0.18.0"
source = "spectrocloud/spectrocloud"
}
}
}

provider "spectrocloud" {
host = var.sc_host
api_key = var.sc_api_key
project_name = var.sc_project_name
}
13 changes: 13 additions & 0 deletions examples/custom-cloud-import-demo/variables.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
variable "sc_host" {
description = "Spectro Cloud Endpoint"
default = "api.spectrocloud.com"
}

variable "sc_api_key" {
description = "Spectro Cloud API key"
}

variable "sc_project_name" {
description = "Spectro Cloud Project (e.g: Default)"
default = "Default"
}
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ require (
github.com/robfig/cron v1.2.0
github.com/spectrocloud/palette-sdk-go v0.0.0-20250708143007-797b352a2da2
github.com/stretchr/testify v1.10.0
gopkg.in/yaml.v3 v3.0.1
gotest.tools v2.2.0+incompatible
k8s.io/api v0.23.5
k8s.io/apimachinery v0.23.5
Expand Down Expand Up @@ -118,7 +119,6 @@ require (
google.golang.org/protobuf v1.31.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apiextensions-apiserver v0.23.5 // indirect
k8s.io/klog/v2 v2.80.1 // indirect
k8s.io/utils v0.0.0-20221107191617-1a15be271d1d // indirect
Expand Down
139 changes: 139 additions & 0 deletions spectrocloud/cluster_common_hash.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
"strings"

"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"gopkg.in/yaml.v3"
)

func CommonHash(nodePool map[string]interface{}) *bytes.Buffer {
Expand Down Expand Up @@ -263,6 +264,12 @@ func resourceMachinePoolCustomCloudHash(v interface{}) int {
buf.WriteString(fmt.Sprintf("%t-", val.(bool)))
}
buf.WriteString(fmt.Sprintf("%s-", m["node_pool_config"].(string)))

// Include overrides in hash calculation for change detection
if overrides, ok := m["overrides"]; ok {
buf.WriteString(HashStringMap(overrides))
}

return int(hash(buf.String()))
}

Expand Down Expand Up @@ -455,3 +462,135 @@ func hash(s string) uint32 {
_, _ = h.Write([]byte(s))
return h.Sum32()
}

// YamlContentHash creates a hash based on YAML semantic content, ignoring formatting
func YamlContentHash(yamlContent string) string {
canonicalContent := yamlContentToCanonicalString(yamlContent)
h := fnv.New64a()
h.Write([]byte(canonicalContent))
return fmt.Sprintf("%x", h.Sum64())
}

// yamlContentToCanonicalString converts YAML content to a canonical string for hashing
func yamlContentToCanonicalString(yamlContent string) string {
if strings.TrimSpace(yamlContent) == "" {
return ""
}

// Split multi-document YAML
documents := strings.Split(yamlContent, "---")
var canonicalDocs []string

for _, doc := range documents {
doc = strings.TrimSpace(doc)
if doc == "" {
continue
}

// Parse YAML document
var yamlData interface{}
if err := yaml.Unmarshal([]byte(doc), &yamlData); err != nil {
// If parsing fails, use original doc for canonical form
canonicalDocs = append(canonicalDocs, doc)
continue
}

// Convert to canonical string representation
canonical := toCanonicalString(yamlData)
canonicalDocs = append(canonicalDocs, canonical)
}

if len(canonicalDocs) == 0 {
return ""
}

return strings.Join(canonicalDocs, "|||") // Use ||| as document separator
}

// toCanonicalString converts a YAML structure to a deterministic string representation
func toCanonicalString(data interface{}) string {
switch v := data.(type) {
case map[string]interface{}:
// Sort keys for deterministic output
keys := make([]string, 0, len(v))
for k := range v {
keys = append(keys, k)
}
sort.Strings(keys)

var parts []string
for _, k := range keys {
value := toCanonicalString(v[k])
parts = append(parts, fmt.Sprintf("%s:%s", k, value))
}
return "{" + strings.Join(parts, ",") + "}"

case map[interface{}]interface{}:
// Convert to string map and recurse
stringMap := make(map[string]interface{})
for key, value := range v {
if keyStr, ok := key.(string); ok {
stringMap[keyStr] = value
}
}
return toCanonicalString(stringMap)

case []interface{}:
var parts []string
for _, item := range v {
parts = append(parts, toCanonicalString(item))
}
return "[" + strings.Join(parts, ",") + "]"

case string:
return fmt.Sprintf("\"%s\"", v)
case int, int64, float64, bool:
return fmt.Sprintf("%v", v)
case nil:
return "null"
default:
return fmt.Sprintf("%v", v)
}
}

// NormalizeYamlContent parses YAML and re-serializes it in a consistent format for StateFunc
func NormalizeYamlContent(yamlContent string) string {
if strings.TrimSpace(yamlContent) == "" {
return ""
}

// Split multi-document YAML
documents := strings.Split(yamlContent, "---")
var normalizedDocs []string

for _, doc := range documents {
doc = strings.TrimSpace(doc)
if doc == "" {
continue
}

// Parse YAML document
var yamlData interface{}
if err := yaml.Unmarshal([]byte(doc), &yamlData); err != nil {
// If parsing fails, return original (trimmed)
normalizedDocs = append(normalizedDocs, doc)
continue
}

// Re-serialize in consistent format
normalizedYaml, err := yaml.Marshal(yamlData)
if err != nil {
// If marshaling fails, return original (trimmed)
normalizedDocs = append(normalizedDocs, doc)
continue
}

normalizedDocs = append(normalizedDocs, strings.TrimSpace(string(normalizedYaml)))
}

if len(normalizedDocs) == 0 {
return ""
}

return strings.Join(normalizedDocs, "\n---\n")
}
5 changes: 2 additions & 3 deletions spectrocloud/cluster_common_hash_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -619,8 +619,7 @@ func TestResourceMachinePoolCustomCloudHash(t *testing.T) {
"name": "custom-cloud",
"count": 3,
"control_plane": true,
"control_plane_as_worker": false, //comment this for fail test
"additional_labels": map[string]string{"env": "prod"},
"control_plane_as_worker": false,
"taints": []interface{}{"key1=value1", "key2=value2"},
"node_pool_config": "standard",
},
Expand All @@ -631,7 +630,7 @@ func TestResourceMachinePoolCustomCloudHash(t *testing.T) {
input: map[string]interface{}{
"name": "test-pool",
"count": 3,
"node_pool_config": "standard", //comment this for fail test
"node_pool_config": "standard",
},
expected: 1525978111,
},
Expand Down
Loading