Skip to content

Commit a9d784a

Browse files
authored
PLT-795:Added import support for GKE & GCP cluster. (#459)
* PLT-795:Added import support for GCP(IAAS) type cluster. * added validtion * added cluster profile support in import * added cluster profile import support in gcp
1 parent a9a3c6d commit a9d784a

8 files changed

+408
-1
lines changed

spectrocloud/cluster_common.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ var (
2424
"spectrocloud_cluster_openstack": "openstack",
2525
"spectrocloud_cluster_tke": "tke",
2626
"spectrocloud_cluster_vsphere": "vsphere",
27+
"spectrocloud_cluster_gke": "gke",
2728
}
2829
)
2930

spectrocloud/cluster_common_profiles.go

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -172,3 +172,21 @@ func updateProfiles(c *client.V1Client, d *schema.ResourceData) error {
172172

173173
return nil
174174
}
175+
176+
func flattenClusterProfileForImport(c *client.V1Client, d *schema.ResourceData) ([]interface{}, error) {
177+
clusterContext := "project"
178+
if v, ok := d.GetOk("context"); ok {
179+
clusterContext = v.(string)
180+
}
181+
clusterProfiles := make([]interface{}, 0)
182+
cluster, err := c.GetCluster(clusterContext, d.Id())
183+
if err != nil {
184+
return clusterProfiles, err
185+
}
186+
for _, profileTemplate := range cluster.Spec.ClusterProfileTemplates {
187+
profile := make(map[string]interface{})
188+
profile["id"] = profileTemplate.UID
189+
clusterProfiles = append(clusterProfiles, profile)
190+
}
191+
return clusterProfiles, nil
192+
}

spectrocloud/resource_cluster_gcp.go

Lines changed: 40 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ package spectrocloud
22

33
import (
44
"context"
5+
"github.com/spectrocloud/gomi/pkg/ptr"
56
"log"
67
"time"
78

@@ -22,7 +23,10 @@ func resourceClusterGcp() *schema.Resource {
2223
ReadContext: resourceClusterGcpRead,
2324
UpdateContext: resourceClusterGcpUpdate,
2425
DeleteContext: resourceClusterDelete,
25-
Description: "Resource for managing GCP clusters in Spectro Cloud through Palette.",
26+
Importer: &schema.ResourceImporter{
27+
StateContext: resourceClusterGcpImport,
28+
},
29+
Description: "Resource for managing GCP clusters in Spectro Cloud through Palette.",
2630

2731
Timeouts: &schema.ResourceTimeout{
2832
Create: schema.DefaultTimeout(60 * time.Minute),
@@ -294,6 +298,17 @@ func resourceClusterGcpRead(_ context.Context, d *schema.ResourceData, m interfa
294298
return diags
295299
}
296300

301+
configUID := cluster.Spec.CloudConfigRef.UID
302+
if err := d.Set("cloud_config_id", configUID); err != nil {
303+
return diag.FromErr(err)
304+
}
305+
306+
// verify cluster type
307+
err = ValidateCloudType("spectrocloud_cluster_gcp", cluster)
308+
if err != nil {
309+
return diag.FromErr(err)
310+
}
311+
297312
diagnostics, done := readCommonFields(c, d, cluster)
298313
if done {
299314
return diagnostics
@@ -310,6 +325,12 @@ func flattenCloudConfigGcp(configUID string, d *schema.ResourceData, c *client.V
310325
if config, err := c.GetCloudConfigGcp(configUID, ClusterContext); err != nil {
311326
return diag.FromErr(err)
312327
} else {
328+
if err := d.Set("cloud_account_id", config.Spec.CloudAccountRef.UID); err != nil {
329+
return diag.FromErr(err)
330+
}
331+
if err := d.Set("cloud_config", flattenClusterConfigsGcp(config)); err != nil {
332+
return diag.FromErr(err)
333+
}
313334
mp := flattenMachinePoolConfigsGcp(config.Spec.MachinePoolConfig)
314335
mp, err := flattenNodeMaintenanceStatus(c, d, c.GetNodeStatusMapGcp, mp, configUID, ClusterContext)
315336
if err != nil {
@@ -322,6 +343,24 @@ func flattenCloudConfigGcp(configUID string, d *schema.ResourceData, c *client.V
322343
return diag.Diagnostics{}
323344
}
324345

346+
func flattenClusterConfigsGcp(config *models.V1GcpCloudConfig) []interface{} {
347+
if config == nil || config.Spec == nil || config.Spec.ClusterConfig == nil {
348+
return make([]interface{}, 0)
349+
}
350+
m := make(map[string]interface{})
351+
352+
if config.Spec.ClusterConfig.Project != nil {
353+
m["project"] = config.Spec.ClusterConfig.Project
354+
}
355+
if config.Spec.ClusterConfig.Network != "" {
356+
m["network"] = config.Spec.ClusterConfig.Network
357+
}
358+
if ptr.String(config.Spec.ClusterConfig.Region) != "" {
359+
m["region"] = ptr.String(config.Spec.ClusterConfig.Region)
360+
}
361+
return []interface{}{m}
362+
}
363+
325364
func flattenMachinePoolConfigsGcp(machinePools []*models.V1GcpMachinePoolConfig) []interface{} {
326365

327366
if machinePools == nil {
Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
package spectrocloud
2+
3+
import (
4+
"context"
5+
"fmt"
6+
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
7+
"github.com/spectrocloud/palette-sdk-go/client"
8+
)
9+
10+
func resourceClusterGcpImport(ctx context.Context, d *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) {
11+
c := m.(*client.V1Client)
12+
err := GetCommonCluster(d, c)
13+
if err != nil {
14+
return nil, err
15+
}
16+
17+
diags := resourceClusterGcpRead(ctx, d, m)
18+
if diags.HasError() {
19+
return nil, fmt.Errorf("could not read cluster for import: %v", diags)
20+
}
21+
22+
clusterProfiles, err := flattenClusterProfileForImport(c, d)
23+
if err != nil {
24+
return nil, err
25+
}
26+
if err := d.Set("cluster_profile", clusterProfiles); err != nil {
27+
return nil, fmt.Errorf("could not read cluster for import: %v", diags)
28+
}
29+
30+
// Return the resource data. In most cases, this method is only used to
31+
// import one resource at a time, so you should return the resource data
32+
// in a slice with a single element.
33+
return []*schema.ResourceData{d}, nil
34+
}
Lines changed: 217 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,217 @@
1+
package spectrocloud
2+
3+
import (
4+
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
5+
"github.com/spectrocloud/gomi/pkg/ptr"
6+
"github.com/spectrocloud/hapi/models"
7+
"github.com/spectrocloud/terraform-provider-spectrocloud/types"
8+
"github.com/stretchr/testify/assert"
9+
"testing"
10+
)
11+
12+
func TestToMachinePoolGcp(t *testing.T) {
13+
tests := []struct {
14+
name string
15+
input map[string]interface{}
16+
expectedOutput *models.V1GcpMachinePoolConfigEntity
17+
expectError bool
18+
}{
19+
{
20+
name: "Control Plane",
21+
input: map[string]interface{}{
22+
"control_plane": true,
23+
"control_plane_as_worker": true,
24+
"azs": schema.NewSet(schema.HashString, []interface{}{"us-central1-a"}),
25+
"instance_type": "n1-standard-1",
26+
"disk_size_gb": 50,
27+
"name": "example-name",
28+
"count": 3,
29+
"node_repave_interval": 0,
30+
},
31+
expectedOutput: &models.V1GcpMachinePoolConfigEntity{
32+
CloudConfig: &models.V1GcpMachinePoolCloudConfigEntity{
33+
Azs: []string{"us-central1-a"},
34+
InstanceType: types.Ptr("n1-standard-1"),
35+
RootDeviceSize: int64(50),
36+
},
37+
PoolConfig: &models.V1MachinePoolConfigEntity{
38+
AdditionalLabels: map[string]string{},
39+
Taints: nil,
40+
IsControlPlane: true,
41+
Labels: []string{"master"},
42+
Name: types.Ptr("example-name"),
43+
Size: types.Ptr(int32(3)),
44+
UpdateStrategy: &models.V1UpdateStrategy{
45+
Type: "RollingUpdateScaleOut",
46+
},
47+
UseControlPlaneAsWorker: true,
48+
},
49+
},
50+
expectError: false,
51+
},
52+
{
53+
name: "Node Repave Interval Error",
54+
input: map[string]interface{}{
55+
"control_plane": true,
56+
"control_plane_as_worker": false,
57+
"azs": schema.NewSet(schema.HashString, []interface{}{"us-central1-a"}),
58+
"instance_type": "n1-standard-2",
59+
"disk_size_gb": 100,
60+
"name": "example-name-2",
61+
"count": 2,
62+
"node_repave_interval": -1,
63+
},
64+
expectedOutput: &models.V1GcpMachinePoolConfigEntity{
65+
CloudConfig: &models.V1GcpMachinePoolCloudConfigEntity{
66+
Azs: []string{"us-central1-a"},
67+
InstanceType: types.Ptr("n1-standard-2"),
68+
RootDeviceSize: int64(100),
69+
},
70+
PoolConfig: &models.V1MachinePoolConfigEntity{
71+
AdditionalLabels: map[string]string{"example": "label"},
72+
Taints: []*models.V1Taint{},
73+
IsControlPlane: true,
74+
Labels: []string{"master"},
75+
Name: types.Ptr("example-name-2"),
76+
Size: types.Ptr(int32(2)),
77+
UpdateStrategy: &models.V1UpdateStrategy{
78+
Type: "RollingUpdate",
79+
},
80+
UseControlPlaneAsWorker: false,
81+
},
82+
},
83+
expectError: true,
84+
},
85+
}
86+
87+
for _, tt := range tests {
88+
t.Run(tt.name, func(t *testing.T) {
89+
output, err := toMachinePoolGcp(tt.input)
90+
91+
if tt.expectError {
92+
assert.Error(t, err)
93+
} else {
94+
assert.NoError(t, err)
95+
assert.Equal(t, tt.expectedOutput, output)
96+
}
97+
})
98+
}
99+
}
100+
101+
func TestFlattenMachinePoolConfigsGcp(t *testing.T) {
102+
tests := []struct {
103+
name string
104+
input []*models.V1GcpMachinePoolConfig
105+
expectedOutput []interface{}
106+
}{
107+
{
108+
name: "Single Machine Pool",
109+
input: []*models.V1GcpMachinePoolConfig{
110+
{
111+
AdditionalLabels: map[string]string{"label1": "value1", "label2": "value2"},
112+
Taints: []*models.V1Taint{{Key: "taint1", Value: "value1", Effect: "NoSchedule"}},
113+
IsControlPlane: ptr.BoolPtr(true),
114+
UseControlPlaneAsWorker: true,
115+
Name: "machine-pool-1",
116+
Size: int32(3),
117+
UpdateStrategy: &models.V1UpdateStrategy{Type: "RollingUpdate"},
118+
InstanceType: types.Ptr("n1-standard-4"),
119+
RootDeviceSize: int64(100),
120+
Azs: []string{"us-west1-a", "us-west1-b"},
121+
NodeRepaveInterval: 0,
122+
},
123+
},
124+
expectedOutput: []interface{}{
125+
map[string]interface{}{
126+
"additional_labels": map[string]string{
127+
"label1": "value1",
128+
"label2": "value2",
129+
},
130+
"taints": []interface{}{
131+
map[string]interface{}{
132+
"key": "taint1",
133+
"value": "value1",
134+
"effect": "NoSchedule",
135+
},
136+
},
137+
"control_plane": true,
138+
"control_plane_as_worker": true,
139+
"name": "machine-pool-1",
140+
"count": 3,
141+
"update_strategy": "RollingUpdate",
142+
"instance_type": "n1-standard-4",
143+
"disk_size_gb": 100,
144+
"azs": []string{"us-west1-a", "us-west1-b"},
145+
},
146+
},
147+
},
148+
}
149+
150+
for _, tt := range tests {
151+
t.Run(tt.name, func(t *testing.T) {
152+
output := flattenMachinePoolConfigsGcp(tt.input)
153+
assert.Equal(t, tt.expectedOutput, output)
154+
})
155+
}
156+
}
157+
158+
func TestFlattenClusterConfigsGcp(t *testing.T) {
159+
tests := []struct {
160+
name string
161+
input *models.V1GcpCloudConfig
162+
expectedOutput []interface{}
163+
}{
164+
{
165+
name: "Valid Cloud Config",
166+
input: &models.V1GcpCloudConfig{
167+
Spec: &models.V1GcpCloudConfigSpec{
168+
ClusterConfig: &models.V1GcpClusterConfig{
169+
Project: ptr.StringPtr("my-project"),
170+
Network: "my-network",
171+
Region: ptr.StringPtr("us-west1"),
172+
},
173+
},
174+
},
175+
expectedOutput: []interface{}{
176+
map[string]interface{}{
177+
"project": ptr.StringPtr("my-project"),
178+
"network": "my-network",
179+
"region": "us-west1",
180+
},
181+
},
182+
},
183+
{
184+
name: "Nil Cloud Config",
185+
input: nil,
186+
expectedOutput: []interface{}{},
187+
},
188+
{
189+
name: "Empty Cluster Config",
190+
input: &models.V1GcpCloudConfig{},
191+
expectedOutput: []interface{}{},
192+
},
193+
{
194+
name: "Empty Cluster Config Spec",
195+
input: &models.V1GcpCloudConfig{Spec: &models.V1GcpCloudConfigSpec{}},
196+
expectedOutput: []interface{}{},
197+
},
198+
{
199+
name: "Missing Fields in Cluster Config",
200+
input: &models.V1GcpCloudConfig{
201+
Spec: &models.V1GcpCloudConfigSpec{
202+
ClusterConfig: &models.V1GcpClusterConfig{},
203+
},
204+
},
205+
expectedOutput: []interface{}{
206+
map[string]interface{}{},
207+
},
208+
},
209+
}
210+
211+
for _, tt := range tests {
212+
t.Run(tt.name, func(t *testing.T) {
213+
output := flattenClusterConfigsGcp(tt.input)
214+
assert.Equal(t, tt.expectedOutput, output)
215+
})
216+
}
217+
}

spectrocloud/resource_cluster_gke.go

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -272,6 +272,12 @@ func resourceClusterGkeRead(ctx context.Context, d *schema.ResourceData, m inter
272272
return diag.FromErr(err)
273273
}
274274

275+
// verify cluster type
276+
err = ValidateCloudType("spectrocloud_cluster_gke", cluster)
277+
if err != nil {
278+
return diag.FromErr(err)
279+
}
280+
275281
diagnostics, done := readCommonFields(c, d, cluster)
276282
if done {
277283
return diagnostics

spectrocloud/resource_cluster_gke_import.go

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,14 @@ func resourceClusterGkeImport(ctx context.Context, d *schema.ResourceData, m int
2121
return nil, fmt.Errorf("could not read cluster for import: %v", diags)
2222
}
2323

24+
clusterProfiles, err := flattenClusterProfileForImport(c, d)
25+
if err != nil {
26+
return nil, err
27+
}
28+
if err := d.Set("cluster_profile", clusterProfiles); err != nil {
29+
return nil, fmt.Errorf("could not read cluster for import: %v", diags)
30+
}
31+
2432
// Return the resource data. In most cases, this method is only used to
2533
// import one resource at a time, so you should return the resource data
2634
// in a slice with a single element.

0 commit comments

Comments
 (0)