Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions docs/data-sources/cluster.md
Original file line number Diff line number Diff line change
Expand Up @@ -50,5 +50,7 @@ resource "local_file" "admin_kube_config" {
### Read-Only

- `admin_kube_config` (String) The admin kubeconfig file for accessing the cluster. This is computed automatically.
- `health` (String) The current health status of the cluster. This is computed automatically.
- `id` (String) The ID of this resource.
- `kube_config` (String) The kubeconfig file for accessing the cluster as a non-admin user. This is computed automatically.
- `state` (String) The current state of the cluster. This is computed automatically.
2 changes: 1 addition & 1 deletion docs/resources/cluster_custom_cloud.md
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@ Refer to the [Import section](/docs#import) to learn more.
- `cloud` (String) The cloud provider name.
- `cloud_account_id` (String) The cloud account id to use for this cluster.
- `cloud_config` (Block List, Min: 1, Max: 1) The Cloud environment configuration settings such as network parameters and encryption parameters that apply to this cluster. (see [below for nested schema](#nestedblock--cloud_config))
- `machine_pool` (Block List, Min: 1) The machine pool configuration for the cluster. (see [below for nested schema](#nestedblock--machine_pool))
- `machine_pool` (Block Set, Min: 1) The machine pool configuration for the cluster. (see [below for nested schema](#nestedblock--machine_pool))
- `name` (String) The name of the cluster.

### Optional
Expand Down
2 changes: 1 addition & 1 deletion docs/resources/cluster_eks.md
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ Refer to the [Import section](/docs#import) to learn more.

- `cloud_account_id` (String) The AWS cloud account id to use for this cluster.
- `cloud_config` (Block List, Min: 1, Max: 1) The AWS environment configuration settings such as network parameters and encryption parameters that apply to this cluster. (see [below for nested schema](#nestedblock--cloud_config))
- `machine_pool` (Block List, Min: 1) The machine pool configuration for the cluster. (see [below for nested schema](#nestedblock--machine_pool))
- `machine_pool` (Block Set, Min: 1) The machine pool configuration for the cluster. (see [below for nested schema](#nestedblock--machine_pool))
- `name` (String) The name of the cluster.

### Optional
Expand Down
148 changes: 118 additions & 30 deletions spectrocloud/cluster_common_hash.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,28 @@ func CommonHash(nodePool map[string]interface{}) *bytes.Buffer {
buf.WriteString(HashStringMapList(nodePool["taints"]))
}
if val, ok := nodePool["control_plane"]; ok {
buf.WriteString(fmt.Sprintf("%t-", val.(bool)))
var boolVal bool
switch v := val.(type) {
case bool:
boolVal = v
case *bool:
if v != nil {
boolVal = *v
}
}
buf.WriteString(fmt.Sprintf("%t-", boolVal))
}
if val, ok := nodePool["control_plane_as_worker"]; ok {
buf.WriteString(fmt.Sprintf("%t-", val.(bool)))
var boolVal bool
switch v := val.(type) {
case bool:
boolVal = v
case *bool:
if v != nil {
boolVal = *v
}
}
buf.WriteString(fmt.Sprintf("%t-", boolVal))
}
if val, ok := nodePool["name"]; ok {
buf.WriteString(fmt.Sprintf("%s-", val.(string)))
Expand Down Expand Up @@ -66,7 +84,16 @@ func resourceMachinePoolAzureHash(v interface{}) int {
buf.WriteString(fmt.Sprintf("%s-", val.(string)))
}
if val, ok := m["is_system_node_pool"]; ok {
buf.WriteString(fmt.Sprintf("%t-", val.(bool)))
var boolVal bool
switch v := val.(type) {
case bool:
boolVal = v
case *bool:
if v != nil {
boolVal = *v
}
}
buf.WriteString(fmt.Sprintf("%t-", boolVal))
}
if val, ok := m["os_type"]; ok && val != "" {
buf.WriteString(fmt.Sprintf("%s-", val.(string)))
Expand All @@ -86,7 +113,16 @@ func resourceMachinePoolAksHash(v interface{}) int {
buf.WriteString(fmt.Sprintf("%d-", val.(int)))
}
if val, ok := m["is_system_node_pool"]; ok {
buf.WriteString(fmt.Sprintf("%t-", val.(bool)))
var boolVal bool
switch v := val.(type) {
case bool:
boolVal = v
case *bool:
if v != nil {
boolVal = *v
}
}
buf.WriteString(fmt.Sprintf("%t-", boolVal))
}
if val, ok := m["storage_account_type"]; ok {
buf.WriteString(fmt.Sprintf("%s-", val.(string)))
Expand Down Expand Up @@ -150,31 +186,73 @@ func resourceMachinePoolAwsHash(v interface{}) int {
}

func resourceMachinePoolEksHash(v interface{}) int {
m := v.(map[string]interface{})
buf := CommonHash(m)
nodePool := v.(map[string]interface{})
var buf bytes.Buffer

buf.WriteString(fmt.Sprintf("%d-", m["disk_size_gb"].(int)))
if m["min"] != nil {
buf.WriteString(fmt.Sprintf("%d-", m["min"].(int)))
if val, ok := nodePool["count"]; ok {
buf.WriteString(fmt.Sprintf("%d-", val.(int)))
}
if m["max"] != nil {
buf.WriteString(fmt.Sprintf("%d-", m["max"].(int)))
if val, ok := nodePool["disk_size_gb"]; ok {
buf.WriteString(fmt.Sprintf("%d-", val.(int)))
}
if val, ok := nodePool["instance_type"]; ok {
buf.WriteString(fmt.Sprintf("%s-", val.(string)))
}
if val, ok := nodePool["name"]; ok {
buf.WriteString(fmt.Sprintf("%s-", val.(string)))
}

if _, ok := nodePool["additional_labels"]; ok {
buf.WriteString(HashStringMap(nodePool["additional_labels"]))
}
if val, ok := nodePool["ami_type"]; ok {
buf.WriteString(fmt.Sprintf("%s-", val.(string)))
}
buf.WriteString(fmt.Sprintf("%s-", m["instance_type"].(string)))
buf.WriteString(fmt.Sprintf("%s-", m["capacity_type"].(string)))
buf.WriteString(fmt.Sprintf("%s-", m["max_price"].(string)))

keys := make([]string, 0, len(m["az_subnets"].(map[string]interface{})))
for k := range m["az_subnets"].(map[string]interface{}) {
keys := make([]string, 0, len(nodePool["az_subnets"].(map[string]interface{})))
for k := range nodePool["az_subnets"].(map[string]interface{}) {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
buf.WriteString(fmt.Sprintf("%s-%s", k, m["az_subnets"].(map[string]interface{})[k].(string)))
buf.WriteString(fmt.Sprintf("%s-%s", k, nodePool["az_subnets"].(map[string]interface{})[k].(string)))
}

if m["eks_launch_template"] != nil {
buf.WriteString(eksLaunchTemplate(m["eks_launch_template"]))
if nodePool["azs"] != nil {
azsList := nodePool["azs"].([]interface{})
azsListStr := make([]string, len(azsList))
for i, v := range azsList {
azsListStr[i] = v.(string)
}
sort.Strings(azsListStr)
azsStr := strings.Join(azsListStr, "-")
buf.WriteString(fmt.Sprintf("%s-", azsStr))
}

if val, ok := nodePool["capacity_type"]; ok {
buf.WriteString(fmt.Sprintf("%s-", val.(string)))
}

if nodePool["min"] != nil {
buf.WriteString(fmt.Sprintf("%d-", nodePool["min"].(int)))
}
if nodePool["max"] != nil {
buf.WriteString(fmt.Sprintf("%d-", nodePool["max"].(int)))
}
if nodePool["max_price"] != nil {
buf.WriteString(fmt.Sprintf("%s-", nodePool["max_price"].(string)))
}
if nodePool["node"] != nil {
buf.WriteString(HashStringMapList(nodePool["node"]))
}
if _, ok := nodePool["taints"]; ok {
buf.WriteString(HashStringMapList(nodePool["taints"]))
}
if nodePool["eks_launch_template"] != nil {
buf.WriteString(eksLaunchTemplate(nodePool["eks_launch_template"]))
}
if val, ok := nodePool["update_strategy"]; ok {
buf.WriteString(fmt.Sprintf("%s-", val.(string)))
}

return int(hash(buf.String()))
Expand Down Expand Up @@ -246,23 +324,33 @@ func resourceMachinePoolVsphereHash(v interface{}) int {
func resourceMachinePoolCustomCloudHash(v interface{}) int {
m := v.(map[string]interface{})
var buf bytes.Buffer
if _, ok := m["name"]; ok {
buf.WriteString(HashStringMap(m["name"]))
}
if _, ok := m["count"]; ok {
buf.WriteString(HashStringMap(m["count"]))
}
if _, ok := m["additional_labels"]; ok {
buf.WriteString(HashStringMap(m["additional_labels"]))
}

if _, ok := m["taints"]; ok {
buf.WriteString(HashStringMapList(m["taints"]))
}
if val, ok := m["control_plane"]; ok {
buf.WriteString(fmt.Sprintf("%t-", val.(bool)))
var boolVal bool
switch v := val.(type) {
case bool:
boolVal = v
case *bool:
if v != nil {
boolVal = *v
}
}
buf.WriteString(fmt.Sprintf("%t-", boolVal))
}
if val, ok := m["control_plane_as_worker"]; ok {
buf.WriteString(fmt.Sprintf("%t-", val.(bool)))
var boolVal bool
switch v := val.(type) {
case bool:
boolVal = v
case *bool:
if v != nil {
boolVal = *v
}
}
buf.WriteString(fmt.Sprintf("%t-", boolVal))
}
buf.WriteString(fmt.Sprintf("%s-", m["node_pool_config"].(string)))

Expand Down
2 changes: 1 addition & 1 deletion spectrocloud/cluster_common_hash_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,7 @@ func TestResourceMachinePoolEksHash(t *testing.T) {
},
},
},
expected: 456946481,
expected: 706444520,
},
}

Expand Down
25 changes: 25 additions & 0 deletions spectrocloud/common/safe_conversions.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
package common

// SafeUint32 converts int to uint32 with bounds checking to prevent overflow
func SafeUint32(value int) uint32 {
if value < 0 {
return 0
}
// On 32-bit systems, int max is smaller than uint32 max, so no overflow possible
// On 64-bit systems, we need to check against uint32 max
if ^uint(0)>>32 == 0 {
// 32-bit system: int and uint32 have same size, no overflow possible
if value >= 0 {
return uint32(value)
}
return 0
}
// 64-bit system: check against uint32 max
if uint64(value) > 0xFFFFFFFF {
return 0xFFFFFFFF
}
if value >= 0 {
return uint32(value)
}
return 0
}
25 changes: 25 additions & 0 deletions spectrocloud/data_source_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,16 @@ func dataSourceCluster() *schema.Resource {
Default: false,
Description: "If set to true, the cluster will treated as a virtual cluster. Defaults to `false`.",
},
"state": {
Type: schema.TypeString,
Computed: true,
Description: "The current state of the cluster. This is computed automatically.",
},
"health": {
Type: schema.TypeString,
Computed: true,
Description: "The current health status of the cluster. This is computed automatically.",
},
},
}
}
Expand Down Expand Up @@ -73,6 +83,21 @@ func dataSourceClusterRead(_ context.Context, d *schema.ResourceData, m interfac
if err := d.Set("name", cluster.Metadata.Name); err != nil {
return diag.FromErr(err)
}

// Set cluster state
if cluster.Status != nil && cluster.Status.State != "" {
if err := d.Set("state", cluster.Status.State); err != nil {
return diag.FromErr(err)
}
}

// Set cluster health
clusterSummary, summaryErr := c.GetClusterOverview(cluster.Metadata.UID)
if summaryErr == nil && clusterSummary.Status.Health != nil && clusterSummary.Status.Health.State != "" {
if err := d.Set("health", clusterSummary.Status.Health.State); err != nil {
return diag.FromErr(err)
}
}
}
}
return diags
Expand Down
7 changes: 6 additions & 1 deletion spectrocloud/data_source_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,11 @@ package spectrocloud

import (
"context"
"testing"

"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/stretchr/testify/assert"
"testing"
)

func TestDataSourceClusterRead(t *testing.T) {
Expand All @@ -23,6 +24,8 @@ func TestDataSourceClusterRead(t *testing.T) {
"virtual": {Type: schema.TypeBool, Optional: true},
"kube_config": {Type: schema.TypeString, Computed: true},
"admin_kube_config": {Type: schema.TypeString, Computed: true},
"state": {Type: schema.TypeString, Computed: true},
"health": {Type: schema.TypeString, Computed: true},
}, map[string]interface{}{
"name": "test-cluster",
"context": "some-context",
Expand All @@ -39,6 +42,8 @@ func TestDataSourceClusterRead(t *testing.T) {
"virtual": {Type: schema.TypeBool, Optional: true},
"kube_config": {Type: schema.TypeString, Computed: true},
"admin_kube_config": {Type: schema.TypeString, Computed: true},
"state": {Type: schema.TypeString, Computed: true},
"health": {Type: schema.TypeString, Computed: true},
}, map[string]interface{}{
"name": "test-cluster",
"context": "some-context",
Expand Down
24 changes: 17 additions & 7 deletions spectrocloud/kubevirt/schema/virtualmachineinstance/domain_spec.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,14 @@ package virtualmachineinstance

import (
"fmt"
"math"

"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
"k8s.io/apimachinery/pkg/api/resource"
kubevirtapiv1 "kubevirt.io/api/core/v1"

"github.com/spectrocloud/terraform-provider-spectrocloud/spectrocloud/constants"
"github.com/spectrocloud/terraform-provider-spectrocloud/spectrocloud/common"
"github.com/spectrocloud/terraform-provider-spectrocloud/spectrocloud/kubevirt/utils"
)

Expand Down Expand Up @@ -289,22 +290,31 @@ func expandCPU(cpu map[string]interface{}) (kubevirtapiv1.CPU, error) {
}

if v, ok := cpu["cores"].(int); ok {
if v < 0 || v > constants.UInt32MaxValue {
if v < 0 {
return result, fmt.Errorf("cores value %d cannot be negative", v)
}
if v > math.MaxInt { // Cap to max representable int on this architecture
return result, fmt.Errorf("cores value %d is out of range for uint32", v)
}
result.Cores = uint32(v)
result.Cores = common.SafeUint32(v)
}
if v, ok := cpu["sockets"].(int); ok {
if v < 0 || v > constants.UInt32MaxValue {
if v < 0 {
return result, fmt.Errorf("sockets value %d cannot be negative", v)
}
if v > math.MaxInt { // Cap to max representable int on this architecture
return result, fmt.Errorf("sockets value %d is out of range for uint32", v)
}
result.Sockets = uint32(v)
result.Sockets = common.SafeUint32(v)
}
if v, ok := cpu["threads"].(int); ok {
if v < 0 || v > constants.UInt32MaxValue {
if v < 0 {
return result, fmt.Errorf("threads value %d cannot be negative", v)
}
if v > math.MaxInt { // Cap to max representable int on this architecture
return result, fmt.Errorf("threads value %d is out of range for uint32", v)
}
result.Threads = uint32(v)
result.Threads = common.SafeUint32(v)
}

return result, nil
Expand Down
Loading